index
int64 0
0
| repo_id
stringlengths 21
232
| file_path
stringlengths 34
259
| content
stringlengths 1
14.1M
| __index_level_0__
int64 0
10k
|
---|---|---|---|---|
0 | kubeflow_public_repos/pipelines/components/contrib/_converters/ApacheParquet | kubeflow_public_repos/pipelines/components/contrib/_converters/ApacheParquet/from_TSV/component.py | from kfp.components import InputPath, OutputPath, create_component_from_func
def convert_tsv_to_apache_parquet(
data_path: InputPath('TSV'),
output_data_path: OutputPath('ApacheParquet'),
):
'''Converts TSV table to Apache Parquet.
[Apache Parquet](https://parquet.apache.org/)
Annotations:
author: Alexey Volkov <[email protected]>
'''
from pyarrow import csv, parquet
table = csv.read_csv(data_path, parse_options=csv.ParseOptions(delimiter='\t'))
parquet.write_table(table, output_data_path)
if __name__ == '__main__':
create_component_from_func(
convert_tsv_to_apache_parquet,
output_component_file='component.yaml',
base_image='python:3.7',
packages_to_install=['pyarrow==0.17.1'],
annotations={
"author": "Alexey Volkov <[email protected]>",
"canonical_location": "https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/_converters/ApacheParquet/from_TSV/component.yaml",
},
)
| 600 |
0 | kubeflow_public_repos/pipelines/components/contrib/_converters/ApacheParquet | kubeflow_public_repos/pipelines/components/contrib/_converters/ApacheParquet/from_TSV/component.yaml | name: Convert tsv to apache parquet
description: |-
Converts TSV table to Apache Parquet.
[Apache Parquet](https://parquet.apache.org/)
Annotations:
author: Alexey Volkov <[email protected]>
inputs:
- {name: data, type: TSV}
outputs:
- {name: output_data, type: ApacheParquet}
metadata:
annotations:
author: Alexey Volkov <[email protected]>
canonical_location: 'https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/_converters/ApacheParquet/from_TSV/component.yaml'
implementation:
container:
image: python:3.7
command:
- sh
- -c
- (PIP_DISABLE_PIP_VERSION_CHECK=1 python3 -m pip install --quiet --no-warn-script-location
'pyarrow==0.17.1' || PIP_DISABLE_PIP_VERSION_CHECK=1 python3 -m pip install
--quiet --no-warn-script-location 'pyarrow==0.17.1' --user) && "$0" "$@"
- python3
- -u
- -c
- |
def _make_parent_dirs_and_return_path(file_path: str):
import os
os.makedirs(os.path.dirname(file_path), exist_ok=True)
return file_path
def convert_tsv_to_apache_parquet(
data_path,
output_data_path,
):
'''Converts TSV table to Apache Parquet.
[Apache Parquet](https://parquet.apache.org/)
Annotations:
author: Alexey Volkov <[email protected]>
'''
from pyarrow import csv, parquet
table = csv.read_csv(data_path, parse_options=csv.ParseOptions(delimiter='\t'))
parquet.write_table(table, output_data_path)
import argparse
_parser = argparse.ArgumentParser(prog='Convert tsv to apache parquet', description='Converts TSV table to Apache Parquet.\n\n [Apache Parquet](https://parquet.apache.org/)\n\n Annotations:\n author: Alexey Volkov <[email protected]>')
_parser.add_argument("--data", dest="data_path", type=str, required=True, default=argparse.SUPPRESS)
_parser.add_argument("--output-data", dest="output_data_path", type=_make_parent_dirs_and_return_path, required=True, default=argparse.SUPPRESS)
_parsed_args = vars(_parser.parse_args())
_output_files = _parsed_args.pop("_output_paths", [])
_outputs = convert_tsv_to_apache_parquet(**_parsed_args)
_output_serializers = [
]
import os
for idx, output_file in enumerate(_output_files):
try:
os.makedirs(os.path.dirname(output_file))
except OSError:
pass
with open(output_file, 'w') as f:
f.write(_output_serializers[idx](_outputs[idx]))
args:
- --data
- {inputPath: data}
- --output-data
- {outputPath: output_data}
| 601 |
0 | kubeflow_public_repos/pipelines/components/contrib/_converters/ApacheParquet | kubeflow_public_repos/pipelines/components/contrib/_converters/ApacheParquet/to_TSV/component.py | from kfp.components import InputPath, OutputPath, create_component_from_func
def convert_apache_parquet_to_tsv(
data_path: InputPath('ApacheParquet'),
output_data_path: OutputPath('TSV'),
):
'''Converts Apache Parquet to TSV.
[Apache Parquet](https://parquet.apache.org/)
Annotations:
author: Alexey Volkov <[email protected]>
'''
from pyarrow import parquet
data_frame = parquet.read_pandas(data_path).to_pandas()
data_frame.to_csv(
output_data_path,
index=False,
sep='\t',
)
if __name__ == '__main__':
convert_apache_parquet_to_tsv_op = create_component_from_func(
convert_apache_parquet_to_tsv,
output_component_file='component.yaml',
base_image='python:3.7',
packages_to_install=['pyarrow==0.17.1', 'pandas==1.0.3'],
annotations={
"author": "Alexey Volkov <[email protected]>",
"canonical_location": "https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/_converters/ApacheParquet/to_TSV/component.yaml",
},
)
| 602 |
0 | kubeflow_public_repos/pipelines/components/contrib/_converters/ApacheParquet | kubeflow_public_repos/pipelines/components/contrib/_converters/ApacheParquet/to_TSV/component.yaml | name: Convert apache parquet to tsv
description: |-
Converts Apache Parquet to TSV.
[Apache Parquet](https://parquet.apache.org/)
Annotations:
author: Alexey Volkov <[email protected]>
inputs:
- {name: data, type: ApacheParquet}
outputs:
- {name: output_data, type: TSV}
metadata:
annotations:
author: Alexey Volkov <[email protected]>
canonical_location: 'https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/_converters/ApacheParquet/to_TSV/component.yaml'
implementation:
container:
image: python:3.7
command:
- sh
- -c
- (PIP_DISABLE_PIP_VERSION_CHECK=1 python3 -m pip install --quiet --no-warn-script-location
'pyarrow==0.17.1' 'pandas==1.0.3' || PIP_DISABLE_PIP_VERSION_CHECK=1 python3
-m pip install --quiet --no-warn-script-location 'pyarrow==0.17.1' 'pandas==1.0.3'
--user) && "$0" "$@"
- python3
- -u
- -c
- |
def _make_parent_dirs_and_return_path(file_path: str):
import os
os.makedirs(os.path.dirname(file_path), exist_ok=True)
return file_path
def convert_apache_parquet_to_tsv(
data_path,
output_data_path,
):
'''Converts Apache Parquet to TSV.
[Apache Parquet](https://parquet.apache.org/)
Annotations:
author: Alexey Volkov <[email protected]>
'''
from pyarrow import parquet
data_frame = parquet.read_pandas(data_path).to_pandas()
data_frame.to_csv(
output_data_path,
index=False,
sep='\t',
)
import argparse
_parser = argparse.ArgumentParser(prog='Convert apache parquet to tsv', description='Converts Apache Parquet to TSV.\n\n [Apache Parquet](https://parquet.apache.org/)\n\n Annotations:\n author: Alexey Volkov <[email protected]>')
_parser.add_argument("--data", dest="data_path", type=str, required=True, default=argparse.SUPPRESS)
_parser.add_argument("--output-data", dest="output_data_path", type=_make_parent_dirs_and_return_path, required=True, default=argparse.SUPPRESS)
_parsed_args = vars(_parser.parse_args())
_outputs = convert_apache_parquet_to_tsv(**_parsed_args)
args:
- --data
- {inputPath: data}
- --output-data
- {outputPath: output_data}
| 603 |
0 | kubeflow_public_repos/pipelines/components/contrib/_converters/ApacheParquet | kubeflow_public_repos/pipelines/components/contrib/_converters/ApacheParquet/from_CSV/component.py | from kfp.components import InputPath, OutputPath, create_component_from_func
def convert_csv_to_apache_parquet(
data_path: InputPath('CSV'),
output_data_path: OutputPath('ApacheParquet'),
):
'''Converts CSV table to Apache Parquet.
[Apache Parquet](https://parquet.apache.org/)
Annotations:
author: Alexey Volkov <[email protected]>
'''
from pyarrow import csv, parquet
table = csv.read_csv(data_path)
parquet.write_table(table, output_data_path)
if __name__ == '__main__':
create_component_from_func(
convert_csv_to_apache_parquet,
output_component_file='component.yaml',
base_image='python:3.7',
packages_to_install=['pyarrow==0.17.1'],
annotations={
"author": "Alexey Volkov <[email protected]>",
"canonical_location": "https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/_converters/ApacheParquet/from_CSV/component.yaml",
},
)
| 604 |
0 | kubeflow_public_repos/pipelines/components/contrib/_converters/ApacheParquet | kubeflow_public_repos/pipelines/components/contrib/_converters/ApacheParquet/from_CSV/component.yaml | name: Convert csv to apache parquet
description: |-
Converts CSV table to Apache Parquet.
[Apache Parquet](https://parquet.apache.org/)
Annotations:
author: Alexey Volkov <[email protected]>
inputs:
- {name: data, type: CSV}
outputs:
- {name: output_data, type: ApacheParquet}
metadata:
annotations:
author: Alexey Volkov <[email protected]>
canonical_location: 'https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/_converters/ApacheParquet/from_CSV/component.yaml'
implementation:
container:
image: python:3.7
command:
- sh
- -c
- (PIP_DISABLE_PIP_VERSION_CHECK=1 python3 -m pip install --quiet --no-warn-script-location
'pyarrow==0.17.1' || PIP_DISABLE_PIP_VERSION_CHECK=1 python3 -m pip install
--quiet --no-warn-script-location 'pyarrow==0.17.1' --user) && "$0" "$@"
- python3
- -u
- -c
- |
def _make_parent_dirs_and_return_path(file_path: str):
import os
os.makedirs(os.path.dirname(file_path), exist_ok=True)
return file_path
def convert_csv_to_apache_parquet(
data_path,
output_data_path,
):
'''Converts CSV table to Apache Parquet.
[Apache Parquet](https://parquet.apache.org/)
Annotations:
author: Alexey Volkov <[email protected]>
'''
from pyarrow import csv, parquet
table = csv.read_csv(data_path)
parquet.write_table(table, output_data_path)
import argparse
_parser = argparse.ArgumentParser(prog='Convert csv to apache parquet', description='Converts CSV table to Apache Parquet.\n\n [Apache Parquet](https://parquet.apache.org/)\n\n Annotations:\n author: Alexey Volkov <[email protected]>')
_parser.add_argument("--data", dest="data_path", type=str, required=True, default=argparse.SUPPRESS)
_parser.add_argument("--output-data", dest="output_data_path", type=_make_parent_dirs_and_return_path, required=True, default=argparse.SUPPRESS)
_parsed_args = vars(_parser.parse_args())
_output_files = _parsed_args.pop("_output_paths", [])
_outputs = convert_csv_to_apache_parquet(**_parsed_args)
_output_serializers = [
]
import os
for idx, output_file in enumerate(_output_files):
try:
os.makedirs(os.path.dirname(output_file))
except OSError:
pass
with open(output_file, 'w') as f:
f.write(_output_serializers[idx](_outputs[idx]))
args:
- --data
- {inputPath: data}
- --output-data
- {outputPath: output_data}
| 605 |
0 | kubeflow_public_repos/pipelines/components/contrib/_converters/ApacheParquet | kubeflow_public_repos/pipelines/components/contrib/_converters/ApacheParquet/_samples/sample_pipeline.py | import kfp
from kfp import components
component_store = components.ComponentStore(url_search_prefixes=['https://raw.githubusercontent.com/kubeflow/pipelines/af3eaf64e87313795cad1add9bfd9fa1e86af6de/components/'])
chicago_taxi_dataset_op = component_store.load_component(name='datasets/Chicago_Taxi_Trips')
convert_csv_to_apache_parquet_op = component_store.load_component(name='_converters/ApacheParquet/from_CSV')
convert_tsv_to_apache_parquet_op = component_store.load_component(name='_converters/ApacheParquet/from_TSV')
convert_apache_parquet_to_csv_op = component_store.load_component(name='_converters/ApacheParquet/to_CSV')
convert_apache_parquet_to_tsv_op = component_store.load_component(name='_converters/ApacheParquet/to_TSV')
convert_apache_parquet_to_apache_arrow_feather_op = component_store.load_component(name='_converters/ApacheParquet/to_ApacheArrowFeather')
convert_apache_arrow_feather_to_apache_parquet_op = component_store.load_component(name='_converters/ApacheParquet/from_ApacheArrowFeather')
def parquet_pipeline():
csv = chicago_taxi_dataset_op(
where='trip_start_timestamp >= "2019-01-01" AND trip_start_timestamp < "2019-02-01"',
select='tips,trip_seconds,trip_miles,pickup_community_area,dropoff_community_area,fare,tolls,extras,trip_total',
limit=10000,
).output
tsv = chicago_taxi_dataset_op(
where='trip_start_timestamp >= "2019-01-01" AND trip_start_timestamp < "2019-02-01"',
select='tips,trip_seconds,trip_miles,pickup_community_area,dropoff_community_area,fare,tolls,extras,trip_total',
limit=10000,
format='tsv',
).output
csv_parquet = convert_csv_to_apache_parquet_op(csv).output
csv_parquet_csv = convert_apache_parquet_to_csv_op(csv_parquet).output
csv_parquet_feather = convert_apache_parquet_to_apache_arrow_feather_op(csv_parquet).output
csv_parquet_feather_parquet = convert_apache_arrow_feather_to_apache_parquet_op(csv_parquet_feather).output
tsv_parquet = convert_tsv_to_apache_parquet_op(tsv).output
tsv_parquet_tsv = convert_apache_parquet_to_tsv_op(tsv_parquet).output
tsv_parquet_feather = convert_apache_parquet_to_apache_arrow_feather_op(tsv_parquet).output
tsv_parquet_feather_parquet = convert_apache_arrow_feather_to_apache_parquet_op(tsv_parquet_feather).output
if __name__ == '__main__':
kfp_endpoint = None
kfp.Client(host=kfp_endpoint).create_run_from_pipeline_func(parquet_pipeline, arguments={})
| 606 |
0 | kubeflow_public_repos/pipelines/components/contrib/_converters/ApacheParquet | kubeflow_public_repos/pipelines/components/contrib/_converters/ApacheParquet/from_ApacheArrowFeather/component.py | from kfp.components import InputPath, OutputPath, create_component_from_func
def convert_apache_arrow_feather_to_apache_parquet(
data_path: InputPath('ApacheArrowFeather'),
output_data_path: OutputPath('ApacheParquet'),
):
'''Converts Apache Arrow Feather to Apache Parquet.
[Apache Arrow Feather](https://arrow.apache.org/docs/python/feather.html)
[Apache Parquet](https://parquet.apache.org/)
Annotations:
author: Alexey Volkov <[email protected]>
'''
from pyarrow import feather, parquet
table = feather.read_table(data_path)
parquet.write_table(table, output_data_path)
if __name__ == '__main__':
create_component_from_func(
convert_apache_arrow_feather_to_apache_parquet,
output_component_file='component.yaml',
base_image='python:3.7',
packages_to_install=['pyarrow==0.17.1'],
annotations={
"author": "Alexey Volkov <[email protected]>",
"canonical_location": "https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/_converters/ApacheParquet/from_ApacheArrowFeather/component.yaml",
},
)
| 607 |
0 | kubeflow_public_repos/pipelines/components/contrib/_converters/ApacheParquet | kubeflow_public_repos/pipelines/components/contrib/_converters/ApacheParquet/from_ApacheArrowFeather/component.yaml | name: Convert apache arrow feather to apache parquet
description: |-
Converts Apache Arrow Feather to Apache Parquet.
[Apache Arrow Feather](https://arrow.apache.org/docs/python/feather.html)
[Apache Parquet](https://parquet.apache.org/)
Annotations:
author: Alexey Volkov <[email protected]>
inputs:
- {name: data, type: ApacheArrowFeather}
outputs:
- {name: output_data, type: ApacheParquet}
metadata:
annotations:
author: Alexey Volkov <[email protected]>
canonical_location: 'https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/_converters/ApacheParquet/from_ApacheArrowFeather/component.yaml'
implementation:
container:
image: python:3.7
command:
- sh
- -c
- (PIP_DISABLE_PIP_VERSION_CHECK=1 python3 -m pip install --quiet --no-warn-script-location
'pyarrow==0.17.1' || PIP_DISABLE_PIP_VERSION_CHECK=1 python3 -m pip install
--quiet --no-warn-script-location 'pyarrow==0.17.1' --user) && "$0" "$@"
- python3
- -u
- -c
- |
def _make_parent_dirs_and_return_path(file_path: str):
import os
os.makedirs(os.path.dirname(file_path), exist_ok=True)
return file_path
def convert_apache_arrow_feather_to_apache_parquet(
data_path,
output_data_path,
):
'''Converts Apache Arrow Feather to Apache Parquet.
[Apache Arrow Feather](https://arrow.apache.org/docs/python/feather.html)
[Apache Parquet](https://parquet.apache.org/)
Annotations:
author: Alexey Volkov <[email protected]>
'''
from pyarrow import feather, parquet
table = feather.read_table(data_path)
parquet.write_table(table, output_data_path)
import argparse
_parser = argparse.ArgumentParser(prog='Convert apache arrow feather to apache parquet', description='Converts Apache Arrow Feather to Apache Parquet.\n\n [Apache Arrow Feather](https://arrow.apache.org/docs/python/feather.html)\n [Apache Parquet](https://parquet.apache.org/)\n\n Annotations:\n author: Alexey Volkov <[email protected]>')
_parser.add_argument("--data", dest="data_path", type=str, required=True, default=argparse.SUPPRESS)
_parser.add_argument("--output-data", dest="output_data_path", type=_make_parent_dirs_and_return_path, required=True, default=argparse.SUPPRESS)
_parsed_args = vars(_parser.parse_args())
_output_files = _parsed_args.pop("_output_paths", [])
_outputs = convert_apache_arrow_feather_to_apache_parquet(**_parsed_args)
_output_serializers = [
]
import os
for idx, output_file in enumerate(_output_files):
try:
os.makedirs(os.path.dirname(output_file))
except OSError:
pass
with open(output_file, 'w') as f:
f.write(_output_serializers[idx](_outputs[idx]))
args:
- --data
- {inputPath: data}
- --output-data
- {outputPath: output_data}
| 608 |
0 | kubeflow_public_repos/pipelines/components/contrib/_converters/ApacheParquet | kubeflow_public_repos/pipelines/components/contrib/_converters/ApacheParquet/to_ApacheArrowFeather/component.py | from kfp.components import InputPath, OutputPath, create_component_from_func
def convert_apache_parquet_to_apache_arrow_feather(
data_path: InputPath('ApacheParquet'),
output_data_path: OutputPath('ApacheArrowFeather'),
):
'''Converts Apache Parquet to Apache Arrow Feather.
[Apache Arrow Feather](https://arrow.apache.org/docs/python/feather.html)
[Apache Parquet](https://parquet.apache.org/)
Annotations:
author: Alexey Volkov <[email protected]>
'''
from pyarrow import feather, parquet
data_frame = parquet.read_pandas(data_path).to_pandas()
feather.write_feather(data_frame, output_data_path)
if __name__ == '__main__':
convert_apache_parquet_to_apache_arrow_feather_op = create_component_from_func(
convert_apache_parquet_to_apache_arrow_feather,
output_component_file='component.yaml',
base_image='python:3.7',
packages_to_install=['pyarrow==0.17.1', 'pandas==1.0.3'],
annotations={
"author": "Alexey Volkov <[email protected]>",
"canonical_location": "https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/_converters/ApacheParquet/to_ApacheArrowFeather/component.yaml",
},
)
| 609 |
0 | kubeflow_public_repos/pipelines/components/contrib/_converters/ApacheParquet | kubeflow_public_repos/pipelines/components/contrib/_converters/ApacheParquet/to_ApacheArrowFeather/component.yaml | name: Convert apache parquet to apache arrow feather
description: |-
Converts Apache Parquet to Apache Arrow Feather.
[Apache Arrow Feather](https://arrow.apache.org/docs/python/feather.html)
[Apache Parquet](https://parquet.apache.org/)
Annotations:
author: Alexey Volkov <[email protected]>
inputs:
- {name: data, type: ApacheParquet}
outputs:
- {name: output_data, type: ApacheArrowFeather}
metadata:
annotations:
author: Alexey Volkov <[email protected]>
canonical_location: 'https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/_converters/ApacheParquet/to_ApacheArrowFeather/component.yaml'
implementation:
container:
image: python:3.7
command:
- sh
- -c
- (PIP_DISABLE_PIP_VERSION_CHECK=1 python3 -m pip install --quiet --no-warn-script-location
'pyarrow==0.17.1' 'pandas==1.0.3' || PIP_DISABLE_PIP_VERSION_CHECK=1 python3
-m pip install --quiet --no-warn-script-location 'pyarrow==0.17.1' 'pandas==1.0.3'
--user) && "$0" "$@"
- python3
- -u
- -c
- |
def _make_parent_dirs_and_return_path(file_path: str):
import os
os.makedirs(os.path.dirname(file_path), exist_ok=True)
return file_path
def convert_apache_parquet_to_apache_arrow_feather(
data_path,
output_data_path,
):
'''Converts Apache Parquet to Apache Arrow Feather.
[Apache Arrow Feather](https://arrow.apache.org/docs/python/feather.html)
[Apache Parquet](https://parquet.apache.org/)
Annotations:
author: Alexey Volkov <[email protected]>
'''
from pyarrow import feather, parquet
data_frame = parquet.read_pandas(data_path).to_pandas()
feather.write_feather(data_frame, output_data_path)
import argparse
_parser = argparse.ArgumentParser(prog='Convert apache parquet to apache arrow feather', description='Converts Apache Parquet to Apache Arrow Feather.\n\n [Apache Arrow Feather](https://arrow.apache.org/docs/python/feather.html)\n [Apache Parquet](https://parquet.apache.org/)\n\n Annotations:\n author: Alexey Volkov <[email protected]>')
_parser.add_argument("--data", dest="data_path", type=str, required=True, default=argparse.SUPPRESS)
_parser.add_argument("--output-data", dest="output_data_path", type=_make_parent_dirs_and_return_path, required=True, default=argparse.SUPPRESS)
_parsed_args = vars(_parser.parse_args())
_output_files = _parsed_args.pop("_output_paths", [])
_outputs = convert_apache_parquet_to_apache_arrow_feather(**_parsed_args)
_output_serializers = [
]
import os
for idx, output_file in enumerate(_output_files):
try:
os.makedirs(os.path.dirname(output_file))
except OSError:
pass
with open(output_file, 'w') as f:
f.write(_output_serializers[idx](_outputs[idx]))
args:
- --data
- {inputPath: data}
- --output-data
- {outputPath: output_data}
| 610 |
0 | kubeflow_public_repos/pipelines/components/contrib/_converters/TensorflowJSGraphModel | kubeflow_public_repos/pipelines/components/contrib/_converters/TensorflowJSGraphModel/from_TensorflowSavedModel/component.yaml | name: Convert Tensorflow SavedModel to Tensorflow JS GraphModel
inputs:
- {name: Model, type: TensorflowSavedModel}
outputs:
- {name: Model, type: TensorflowJSGraphModel}
metadata:
annotations:
author: Alexey Volkov <[email protected]>
canonical_location: 'https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/_converters/TensorflowJSGraphModel/from_TensorflowSavedModel/component.yaml'
implementation:
container:
image: tensorflow/tensorflow:2.3.0
command:
- sh
- -exc
- |
# Manually installing prerequisites so that tensorflowjs does not re-install tensorflow-cpu on top of tensorflow. See https://github.com/tensorflow/tfjs/issues/3953
python3 -m pip install --quiet 'h5py>=2.8.0' 'numpy>=1.16.4,<1.19.0' 'six>=1.12.0' 'tensorflow-hub==0.7.0' 'PyInquirer==1.0.3'
python3 -m pip install --quiet tensorflowjs==2.4.0 --no-dependencies
"$0" "$*"
- tensorflowjs_converter
- --input_format=tf_saved_model
- --output_format=tfjs_graph_model
- inputPath: Model
- outputPath: Model
| 611 |
0 | kubeflow_public_repos/pipelines/components/contrib/_converters/TensorflowJSGraphModel | kubeflow_public_repos/pipelines/components/contrib/_converters/TensorflowJSGraphModel/from_KerasModelHdf5/component.yaml | name: Convert Keras HDF5 model to Tensorflow JS GraphModel
inputs:
- {name: Model, type: KerasModelHdf5}
outputs:
- {name: Model, type: TensorflowJSGraphModel}
metadata:
annotations:
author: Alexey Volkov <[email protected]>
canonical_location: 'https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/_converters/TensorflowJSGraphModel/from_KerasModelHdf5/component.yaml'
implementation:
container:
image: tensorflow/tensorflow:2.3.0
command:
- sh
- -exc
- |
# Manually installing prerequisites so that tensorflowjs does not re-install tensorflow-cpu on top of tensorflow. See https://github.com/tensorflow/tfjs/issues/3953
python3 -m pip install --quiet 'h5py>=2.8.0' 'numpy>=1.16.4,<1.19.0' 'six>=1.12.0' 'tensorflow-hub==0.7.0' 'PyInquirer==1.0.3'
python3 -m pip install --quiet tensorflowjs==2.4.0 --no-dependencies
"$0" "$*"
- tensorflowjs_converter
- --input_format=keras
- --output_format=tfjs_graph_model
- inputPath: Model
- outputPath: Model
| 612 |
0 | kubeflow_public_repos/pipelines/components/contrib/keras/Train_classifier | kubeflow_public_repos/pipelines/components/contrib/keras/Train_classifier/from_CSV/component.py | from typing import NamedTuple
from kfp.components import create_component_from_func, InputPath, OutputPath
def keras_train_classifier_from_csv(
training_features_path: InputPath('CSV'),
training_labels_path: InputPath('CSV'),
network_json_path: InputPath('KerasModelJson'),
model_path: OutputPath('KerasModelHdf5'),
loss_name: str = 'categorical_crossentropy',
num_classes: int = None,
optimizer: str = 'rmsprop',
optimizer_config: dict = None,
learning_rate: float = 0.01,
num_epochs: int = 100,
batch_size: int = 32,
metrics: list = ['accuracy'],
random_seed: int = 0,
) -> NamedTuple('Outputs', [
('final_loss', float),
('final_metrics', dict),
('metrics_history', dict),
]):
'''Trains classifier model using Keras.
Annotations:
author: Alexey Volkov <[email protected]>
'''
from pathlib import Path
import keras
import numpy
import pandas
import tensorflow
tensorflow.random.set_seed(random_seed)
numpy.random.seed(random_seed)
training_features_df = pandas.read_csv(training_features_path)
training_labels_df = pandas.read_csv(training_labels_path)
x_train = training_features_df.to_numpy()
y_train_labels = training_labels_df.to_numpy()
print('Training features shape:', x_train.shape)
print('Numer of training samples:', x_train.shape[0])
# Convert class vectors to binary class matrices.
y_train_one_hot = keras.utils.to_categorical(y_train_labels, num_classes)
model_json_str = Path(network_json_path).read_text()
model = keras.models.model_from_json(model_json_str)
model.add(keras.layers.Activation('softmax'))
# Initializing the optimizer
optimizer_config = optimizer_config or {}
optimizer_config['learning_rate'] = learning_rate
optimizer = keras.optimizers.deserialize({
'class_name': optimizer,
'config': optimizer_config,
})
model.compile(
loss=loss_name,
optimizer=optimizer,
metrics=metrics,
)
history = model.fit(
x_train,
y_train_one_hot,
batch_size=batch_size,
epochs=num_epochs,
shuffle=True
)
model.save(model_path)
metrics_history = {name: [float(value) for value in values] for name, values in history.history.items()}
final_metrics = {name: values[-1] for name, values in metrics_history.items()}
final_loss = final_metrics['loss']
return (final_loss, final_metrics, metrics_history)
if __name__ == '__main__':
keras_train_classifier_from_csv_op = create_component_from_func(
keras_train_classifier_from_csv,
base_image='tensorflow/tensorflow:2.2.0',
packages_to_install=['keras==2.3.1', 'pandas==1.0.5'],
output_component_file='component.yaml',
annotations={
"author": "Alexey Volkov <[email protected]>",
"canonical_location": "https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/keras/Train_classifier/from_CSV/component.yaml",
},
)
| 613 |
0 | kubeflow_public_repos/pipelines/components/contrib/keras/Train_classifier | kubeflow_public_repos/pipelines/components/contrib/keras/Train_classifier/from_CSV/component.yaml | name: Keras train classifier from csv
description: |-
Trains classifier model using Keras.
Annotations:
author: Alexey Volkov <[email protected]>
inputs:
- {name: training_features, type: CSV}
- {name: training_labels, type: CSV}
- {name: network_json, type: KerasModelJson}
- {name: loss_name, type: String, default: categorical_crossentropy, optional: true}
- {name: num_classes, type: Integer, optional: true}
- {name: optimizer, type: String, default: rmsprop, optional: true}
- {name: optimizer_config, type: JsonObject, optional: true}
- {name: learning_rate, type: Float, default: '0.01', optional: true}
- {name: num_epochs, type: Integer, default: '100', optional: true}
- {name: batch_size, type: Integer, default: '32', optional: true}
- {name: metrics, type: JsonArray, default: '["accuracy"]', optional: true}
- {name: random_seed, type: Integer, default: '0', optional: true}
outputs:
- {name: model, type: KerasModelHdf5}
- {name: final_loss, type: Float}
- {name: final_metrics, type: JsonObject}
- {name: metrics_history, type: JsonObject}
metadata:
annotations:
author: Alexey Volkov <[email protected]>
canonical_location: 'https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/keras/Train_classifier/from_CSV/component.yaml'
implementation:
container:
image: tensorflow/tensorflow:2.2.0
command:
- sh
- -c
- (PIP_DISABLE_PIP_VERSION_CHECK=1 python3 -m pip install --quiet --no-warn-script-location
'keras==2.3.1' 'pandas==1.0.5' || PIP_DISABLE_PIP_VERSION_CHECK=1 python3 -m
pip install --quiet --no-warn-script-location 'keras==2.3.1' 'pandas==1.0.5'
--user) && "$0" "$@"
- python3
- -u
- -c
- |
def _make_parent_dirs_and_return_path(file_path: str):
import os
os.makedirs(os.path.dirname(file_path), exist_ok=True)
return file_path
def keras_train_classifier_from_csv(
training_features_path,
training_labels_path,
network_json_path,
model_path,
loss_name = 'categorical_crossentropy',
num_classes = None,
optimizer = 'rmsprop',
optimizer_config = None,
learning_rate = 0.01,
num_epochs = 100,
batch_size = 32,
metrics = ['accuracy'],
random_seed = 0,
):
'''Trains classifier model using Keras.
Annotations:
author: Alexey Volkov <[email protected]>
'''
from pathlib import Path
import keras
import numpy
import pandas
import tensorflow
tensorflow.random.set_seed(random_seed)
numpy.random.seed(random_seed)
training_features_df = pandas.read_csv(training_features_path)
training_labels_df = pandas.read_csv(training_labels_path)
x_train = training_features_df.to_numpy()
y_train_labels = training_labels_df.to_numpy()
print('Training features shape:', x_train.shape)
print('Numer of training samples:', x_train.shape[0])
# Convert class vectors to binary class matrices.
y_train_one_hot = keras.utils.to_categorical(y_train_labels, num_classes)
model_json_str = Path(network_json_path).read_text()
model = keras.models.model_from_json(model_json_str)
model.add(keras.layers.Activation('softmax'))
# Initializing the optimizer
optimizer_config = optimizer_config or {}
optimizer_config['learning_rate'] = learning_rate
optimizer = keras.optimizers.deserialize({
'class_name': optimizer,
'config': optimizer_config,
})
model.compile(
loss=loss_name,
optimizer=optimizer,
metrics=metrics,
)
history = model.fit(
x_train,
y_train_one_hot,
batch_size=batch_size,
epochs=num_epochs,
shuffle=True
)
model.save(model_path)
metrics_history = {name: [float(value) for value in values] for name, values in history.history.items()}
final_metrics = {name: values[-1] for name, values in metrics_history.items()}
final_loss = final_metrics['loss']
return (final_loss, final_metrics, metrics_history)
import json
def _serialize_float(float_value: float) -> str:
if isinstance(float_value, str):
return float_value
if not isinstance(float_value, (float, int)):
raise TypeError('Value "{}" has type "{}" instead of float.'.format(str(float_value), str(type(float_value))))
return str(float_value)
def _serialize_json(obj) -> str:
if isinstance(obj, str):
return obj
import json
def default_serializer(obj):
if hasattr(obj, 'to_struct'):
return obj.to_struct()
else:
raise TypeError("Object of type '%s' is not JSON serializable and does not have .to_struct() method." % obj.__class__.__name__)
return json.dumps(obj, default=default_serializer, sort_keys=True)
import argparse
_parser = argparse.ArgumentParser(prog='Keras train classifier from csv', description='Trains classifier model using Keras.\n\n Annotations:\n author: Alexey Volkov <[email protected]>')
_parser.add_argument("--training-features", dest="training_features_path", type=str, required=True, default=argparse.SUPPRESS)
_parser.add_argument("--training-labels", dest="training_labels_path", type=str, required=True, default=argparse.SUPPRESS)
_parser.add_argument("--network-json", dest="network_json_path", type=str, required=True, default=argparse.SUPPRESS)
_parser.add_argument("--loss-name", dest="loss_name", type=str, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--num-classes", dest="num_classes", type=int, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--optimizer", dest="optimizer", type=str, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--optimizer-config", dest="optimizer_config", type=json.loads, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--learning-rate", dest="learning_rate", type=float, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--num-epochs", dest="num_epochs", type=int, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--batch-size", dest="batch_size", type=int, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--metrics", dest="metrics", type=json.loads, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--random-seed", dest="random_seed", type=int, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--model", dest="model_path", type=_make_parent_dirs_and_return_path, required=True, default=argparse.SUPPRESS)
_parser.add_argument("----output-paths", dest="_output_paths", type=str, nargs=3)
_parsed_args = vars(_parser.parse_args())
_output_files = _parsed_args.pop("_output_paths", [])
_outputs = keras_train_classifier_from_csv(**_parsed_args)
_output_serializers = [
_serialize_float,
_serialize_json,
_serialize_json,
]
import os
for idx, output_file in enumerate(_output_files):
try:
os.makedirs(os.path.dirname(output_file))
except OSError:
pass
with open(output_file, 'w') as f:
f.write(_output_serializers[idx](_outputs[idx]))
args:
- --training-features
- {inputPath: training_features}
- --training-labels
- {inputPath: training_labels}
- --network-json
- {inputPath: network_json}
- if:
cond: {isPresent: loss_name}
then:
- --loss-name
- {inputValue: loss_name}
- if:
cond: {isPresent: num_classes}
then:
- --num-classes
- {inputValue: num_classes}
- if:
cond: {isPresent: optimizer}
then:
- --optimizer
- {inputValue: optimizer}
- if:
cond: {isPresent: optimizer_config}
then:
- --optimizer-config
- {inputValue: optimizer_config}
- if:
cond: {isPresent: learning_rate}
then:
- --learning-rate
- {inputValue: learning_rate}
- if:
cond: {isPresent: num_epochs}
then:
- --num-epochs
- {inputValue: num_epochs}
- if:
cond: {isPresent: batch_size}
then:
- --batch-size
- {inputValue: batch_size}
- if:
cond: {isPresent: metrics}
then:
- --metrics
- {inputValue: metrics}
- if:
cond: {isPresent: random_seed}
then:
- --random-seed
- {inputValue: random_seed}
- --model
- {outputPath: model}
- '----output-paths'
- {outputPath: final_loss}
- {outputPath: final_metrics}
- {outputPath: metrics_history}
| 614 |
0 | kubeflow_public_repos/pipelines/components/contrib/keras/Train_classifier | kubeflow_public_repos/pipelines/components/contrib/keras/Train_classifier/_samples/sample_pipeline.py | import keras
import kfp
from kfp import components
chicago_taxi_dataset_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/e3337b8bdcd63636934954e592d4b32c95b49129/components/datasets/Chicago%20Taxi/component.yaml')
pandas_transform_csv_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/6162d55998b176b50267d351241100bb0ee715bc/components/pandas/Transform_DataFrame/in_CSV_format/component.yaml')
keras_train_classifier_from_csv_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/f6aabf7f10b1f545f1fd5079aa8071845224f8e7/components/keras/Train_classifier/from_CSV/component.yaml')
keras_convert_hdf5_model_to_tf_saved_model_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/51e49282d9511e4b72736c12dc66e37486849c6e/components/_converters/KerasModelHdf5/to_TensorflowSavedModel/component.yaml')
number_of_classes = 2
# Creating the network
dense_network_with_sigmoid = keras.Sequential(layers=[
keras.layers.Dense(10, activation=keras.activations.sigmoid),
keras.layers.Dense(number_of_classes, activation=keras.activations.sigmoid),
])
def keras_classifier_pipeline():
training_data_in_csv = chicago_taxi_dataset_op(
where='trip_start_timestamp >= "2019-01-01" AND trip_start_timestamp < "2019-02-01"',
select='tips,trip_seconds,trip_miles,pickup_community_area,dropoff_community_area,fare,tolls,extras,trip_total',
limit=1000,
).output
training_data_for_classification_in_csv = pandas_transform_csv_op(
table=training_data_in_csv,
transform_code='''df.insert(0, "was_tipped", df["tips"] > 0); del df["tips"]; df = df.fillna(0)''',
).output
features_in_csv = pandas_transform_csv_op(
table=training_data_for_classification_in_csv,
transform_code='''df = df.drop(columns=["was_tipped"])''',
).output
labels_in_csv = pandas_transform_csv_op(
table=training_data_for_classification_in_csv,
transform_code='''df = df["was_tipped"] * 1''',
).output
keras_model_in_hdf5 = keras_train_classifier_from_csv_op(
training_features=features_in_csv,
training_labels=labels_in_csv,
network_json=dense_network_with_sigmoid.to_json(),
learning_rate=0.1,
num_epochs=100,
).outputs['model']
keras_model_in_tf_format = keras_convert_hdf5_model_to_tf_saved_model_op(
model=keras_model_in_hdf5,
).output
if __name__ == '__main__':
kfp_endpoint = None
kfp.Client(host=kfp_endpoint).create_run_from_pipeline_func(keras_classifier_pipeline, arguments={})
| 615 |
0 | kubeflow_public_repos/pipelines/components/contrib/ml_metrics/Calculate_regression_metrics | kubeflow_public_repos/pipelines/components/contrib/ml_metrics/Calculate_regression_metrics/from_CSV/component.py | from typing import NamedTuple
from kfp.components import InputPath, OutputPath, create_component_from_func
def calculate_regression_metrics_from_csv(
true_values_path: InputPath(),
predicted_values_path: InputPath(),
) -> NamedTuple('Outputs', [
('number_of_items', int),
('max_absolute_error', float),
('mean_absolute_error', float),
('mean_squared_error', float),
('root_mean_squared_error', float),
('metrics', dict),
]):
'''Calculates regression metrics.
Annotations:
author: Alexey Volkov <[email protected]>
'''
import math
import numpy
true_values = numpy.loadtxt(true_values_path, dtype=numpy.float64)
predicted_values = numpy.loadtxt(predicted_values_path, dtype=numpy.float64)
if len(predicted_values.shape) != 1:
raise NotImplemented('Only single prediction values are supported.')
if len(true_values.shape) != 1:
raise NotImplemented('Only single true values are supported.')
if predicted_values.shape != true_values.shape:
raise ValueError('Input shapes are different: {} != {}'.format(predicted_values.shape, true_values.shape))
number_of_items = true_values.size
errors = (true_values - predicted_values)
abs_errors = numpy.abs(errors)
squared_errors = errors ** 2
max_absolute_error = numpy.max(abs_errors)
mean_absolute_error = numpy.average(abs_errors)
mean_squared_error = numpy.average(squared_errors)
root_mean_squared_error = math.sqrt(mean_squared_error)
metrics = dict(
number_of_items=number_of_items,
max_absolute_error=max_absolute_error,
mean_absolute_error=mean_absolute_error,
mean_squared_error=mean_squared_error,
root_mean_squared_error=root_mean_squared_error,
)
return (
number_of_items,
max_absolute_error,
mean_absolute_error,
mean_squared_error,
root_mean_squared_error,
metrics,
)
if __name__ == '__main__':
calculate_regression_metrics_from_csv_op = create_component_from_func(
calculate_regression_metrics_from_csv,
output_component_file='component.yaml',
base_image='python:3.7',
packages_to_install=['numpy==1.19.0'],
annotations={
"author": "Alexey Volkov <[email protected]>",
"canonical_location": "https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/ml_metrics/Calculate_regression_metrics/from_CSV/component.yaml",
},
)
| 616 |
0 | kubeflow_public_repos/pipelines/components/contrib/ml_metrics/Calculate_regression_metrics | kubeflow_public_repos/pipelines/components/contrib/ml_metrics/Calculate_regression_metrics/from_CSV/component.yaml | name: Calculate regression metrics from csv
description: |-
Calculates regression metrics.
Annotations:
author: Alexey Volkov <[email protected]>
metadata:
annotations:
author: Alexey Volkov <[email protected]>
canonical_location: 'https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/ml_metrics/Calculate_regression_metrics/from_CSV/component.yaml'
inputs:
- {name: true_values}
- {name: predicted_values}
outputs:
- {name: number_of_items, type: Integer}
- {name: max_absolute_error, type: Float}
- {name: mean_absolute_error, type: Float}
- {name: mean_squared_error, type: Float}
- {name: root_mean_squared_error, type: Float}
- {name: metrics, type: JsonObject}
implementation:
container:
image: python:3.7
command:
- sh
- -c
- (PIP_DISABLE_PIP_VERSION_CHECK=1 python3 -m pip install --quiet --no-warn-script-location
'numpy==1.19.0' || PIP_DISABLE_PIP_VERSION_CHECK=1 python3 -m pip install --quiet
--no-warn-script-location 'numpy==1.19.0' --user) && "$0" "$@"
- python3
- -u
- -c
- |
def calculate_regression_metrics_from_csv(
true_values_path,
predicted_values_path,
):
'''Calculates regression metrics.
Annotations:
author: Alexey Volkov <[email protected]>
'''
import math
import numpy
true_values = numpy.loadtxt(true_values_path, dtype=numpy.float64)
predicted_values = numpy.loadtxt(predicted_values_path, dtype=numpy.float64)
if len(predicted_values.shape) != 1:
raise NotImplemented('Only single prediction values are supported.')
if len(true_values.shape) != 1:
raise NotImplemented('Only single true values are supported.')
if predicted_values.shape != true_values.shape:
raise ValueError('Input shapes are different: {} != {}'.format(predicted_values.shape, true_values.shape))
number_of_items = true_values.size
errors = (true_values - predicted_values)
abs_errors = numpy.abs(errors)
squared_errors = errors ** 2
max_absolute_error = numpy.max(abs_errors)
mean_absolute_error = numpy.average(abs_errors)
mean_squared_error = numpy.average(squared_errors)
root_mean_squared_error = math.sqrt(mean_squared_error)
metrics = dict(
number_of_items=number_of_items,
max_absolute_error=max_absolute_error,
mean_absolute_error=mean_absolute_error,
mean_squared_error=mean_squared_error,
root_mean_squared_error=root_mean_squared_error,
)
return (
number_of_items,
max_absolute_error,
mean_absolute_error,
mean_squared_error,
root_mean_squared_error,
metrics,
)
def _serialize_json(obj) -> str:
if isinstance(obj, str):
return obj
import json
def default_serializer(obj):
if hasattr(obj, 'to_struct'):
return obj.to_struct()
else:
raise TypeError("Object of type '%s' is not JSON serializable and does not have .to_struct() method." % obj.__class__.__name__)
return json.dumps(obj, default=default_serializer, sort_keys=True)
def _serialize_float(float_value: float) -> str:
if isinstance(float_value, str):
return float_value
if not isinstance(float_value, (float, int)):
raise TypeError('Value "{}" has type "{}" instead of float.'.format(str(float_value), str(type(float_value))))
return str(float_value)
def _serialize_int(int_value: int) -> str:
if isinstance(int_value, str):
return int_value
if not isinstance(int_value, int):
raise TypeError('Value "{}" has type "{}" instead of int.'.format(str(int_value), str(type(int_value))))
return str(int_value)
import argparse
_parser = argparse.ArgumentParser(prog='Calculate regression metrics from csv', description='Calculates regression metrics.\n\n Annotations:\n author: Alexey Volkov <[email protected]>')
_parser.add_argument("--true-values", dest="true_values_path", type=str, required=True, default=argparse.SUPPRESS)
_parser.add_argument("--predicted-values", dest="predicted_values_path", type=str, required=True, default=argparse.SUPPRESS)
_parser.add_argument("----output-paths", dest="_output_paths", type=str, nargs=6)
_parsed_args = vars(_parser.parse_args())
_output_files = _parsed_args.pop("_output_paths", [])
_outputs = calculate_regression_metrics_from_csv(**_parsed_args)
_output_serializers = [
_serialize_int,
_serialize_float,
_serialize_float,
_serialize_float,
_serialize_float,
_serialize_json,
]
import os
for idx, output_file in enumerate(_output_files):
try:
os.makedirs(os.path.dirname(output_file))
except OSError:
pass
with open(output_file, 'w') as f:
f.write(_output_serializers[idx](_outputs[idx]))
args:
- --true-values
- {inputPath: true_values}
- --predicted-values
- {inputPath: predicted_values}
- '----output-paths'
- {outputPath: number_of_items}
- {outputPath: max_absolute_error}
- {outputPath: mean_absolute_error}
- {outputPath: mean_squared_error}
- {outputPath: root_mean_squared_error}
- {outputPath: metrics}
| 617 |
0 | kubeflow_public_repos/pipelines/components/contrib/ml_metrics/Calculate_regression_metrics | kubeflow_public_repos/pipelines/components/contrib/ml_metrics/Calculate_regression_metrics/_samples/sample_pipleine.py | import kfp
from kfp import components
chicago_taxi_dataset_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/e3337b8bdcd63636934954e592d4b32c95b49129/components/datasets/Chicago%20Taxi/component.yaml')
xgboost_train_on_csv_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/567c04c51ff00a1ee525b3458425b17adbe3df61/components/XGBoost/Train/component.yaml')
xgboost_predict_on_csv_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/567c04c51ff00a1ee525b3458425b17adbe3df61/components/XGBoost/Predict/component.yaml')
pandas_transform_csv_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/6162d55998b176b50267d351241100bb0ee715bc/components/pandas/Transform_DataFrame/in_CSV_format/component.yaml')
drop_header_op = kfp.components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/02c9638287468c849632cf9f7885b51de4c66f86/components/tables/Remove_header/component.yaml')
calculate_regression_metrics_from_csv_op = kfp.components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/616542ac0f789914f4eb53438da713dd3004fba4/components/ml_metrics/Calculate_regression_metrics/from_CSV/component.yaml')
def regression_metrics_pipeline():
training_data_csv = chicago_taxi_dataset_op(
where='trip_start_timestamp >= "2019-01-01" AND trip_start_timestamp < "2019-02-01"',
select='tips,trip_seconds,trip_miles,pickup_community_area,dropoff_community_area,fare,tolls,extras,trip_total',
limit=10000,
).output
# Training
model_trained_on_csv = xgboost_train_on_csv_op(
training_data=training_data_csv,
label_column=0,
objective='reg:squarederror',
num_iterations=200,
).outputs['model']
# Predicting
predictions = xgboost_predict_on_csv_op(
data=training_data_csv,
model=model_trained_on_csv,
label_column=0,
).output
# Preparing the true values
true_values_table = pandas_transform_csv_op(
table=training_data_csv,
transform_code='''df = df[["tips"]]''',
).output
true_values = drop_header_op(true_values_table).output
# Calculating the regression metrics
calculate_regression_metrics_from_csv_op(
true_values=true_values,
predicted_values=predictions,
)
if __name__ == '__main__':
kfp_endpoint=None
kfp.Client(host=kfp_endpoint).create_run_from_pipeline_func(regression_metrics_pipeline, arguments={})
| 618 |
0 | kubeflow_public_repos/pipelines/components/contrib/ml_metrics | kubeflow_public_repos/pipelines/components/contrib/ml_metrics/Aggregate_regression_metrics/component.py | from typing import NamedTuple
from kfp.components import create_component_from_func
def aggregate_regression_metrics(
metrics_1: dict,
metrics_2: dict = None,
metrics_3: dict = None,
metrics_4: dict = None,
metrics_5: dict = None,
) -> NamedTuple('Outputs', [
('number_of_items', int),
('max_absolute_error', float),
('mean_absolute_error', float),
('mean_squared_error', float),
('root_mean_squared_error', float),
('metrics', dict),
]):
'''Calculates regression metrics.
Annotations:
author: Alexey Volkov <[email protected]>
'''
import math
metrics_dicts = [d for d in [metrics_1, metrics_2, metrics_3, metrics_4, metrics_5] if d is not None]
number_of_items = sum(metrics['number_of_items'] for metrics in metrics_dicts)
max_absolute_error = max(metrics['max_absolute_error'] for metrics in metrics_dicts)
mean_absolute_error = sum(metrics['mean_absolute_error'] * metrics['number_of_items'] for metrics in metrics_dicts) / number_of_items
mean_squared_error = sum(metrics['mean_squared_error'] * metrics['number_of_items'] for metrics in metrics_dicts) / number_of_items
root_mean_squared_error = math.sqrt(mean_squared_error)
metrics = dict(
number_of_items=number_of_items,
max_absolute_error=max_absolute_error,
mean_absolute_error=mean_absolute_error,
mean_squared_error=mean_squared_error,
root_mean_squared_error=root_mean_squared_error,
)
return (
number_of_items,
max_absolute_error,
mean_absolute_error,
mean_squared_error,
root_mean_squared_error,
metrics,
)
if __name__ == '__main__':
aggregate_regression_metrics_op = create_component_from_func(
aggregate_regression_metrics,
output_component_file='component.yaml',
base_image='python:3.7',
annotations={
"author": "Alexey Volkov <[email protected]>",
"canonical_location": "https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/ml_metrics/Aggregate_regression_metrics/component.yaml",
},
)
| 619 |
0 | kubeflow_public_repos/pipelines/components/contrib/ml_metrics | kubeflow_public_repos/pipelines/components/contrib/ml_metrics/Aggregate_regression_metrics/component.yaml | name: Aggregate regression metrics
description: |-
Calculates regression metrics.
Annotations:
author: Alexey Volkov <[email protected]>
inputs:
- {name: metrics_1, type: JsonObject}
- {name: metrics_2, type: JsonObject, optional: true}
- {name: metrics_3, type: JsonObject, optional: true}
- {name: metrics_4, type: JsonObject, optional: true}
- {name: metrics_5, type: JsonObject, optional: true}
outputs:
- {name: number_of_items, type: Integer}
- {name: max_absolute_error, type: Float}
- {name: mean_absolute_error, type: Float}
- {name: mean_squared_error, type: Float}
- {name: root_mean_squared_error, type: Float}
- {name: metrics, type: JsonObject}
metadata:
annotations:
author: Alexey Volkov <[email protected]>
canonical_location: 'https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/ml_metrics/Aggregate_regression_metrics/component.yaml'
implementation:
container:
image: python:3.7
command:
- python3
- -u
- -c
- |
def aggregate_regression_metrics(
metrics_1,
metrics_2 = None,
metrics_3 = None,
metrics_4 = None,
metrics_5 = None,
):
'''Calculates regression metrics.
Annotations:
author: Alexey Volkov <[email protected]>
'''
import math
metrics_dicts = [d for d in [metrics_1, metrics_2, metrics_3, metrics_4, metrics_5] if d is not None]
number_of_items = sum(metrics['number_of_items'] for metrics in metrics_dicts)
max_absolute_error = max(metrics['max_absolute_error'] for metrics in metrics_dicts)
mean_absolute_error = sum(metrics['mean_absolute_error'] * metrics['number_of_items'] for metrics in metrics_dicts) / number_of_items
mean_squared_error = sum(metrics['mean_squared_error'] * metrics['number_of_items'] for metrics in metrics_dicts) / number_of_items
root_mean_squared_error = math.sqrt(mean_squared_error)
metrics = dict(
number_of_items=number_of_items,
max_absolute_error=max_absolute_error,
mean_absolute_error=mean_absolute_error,
mean_squared_error=mean_squared_error,
root_mean_squared_error=root_mean_squared_error,
)
return (
number_of_items,
max_absolute_error,
mean_absolute_error,
mean_squared_error,
root_mean_squared_error,
metrics,
)
def _serialize_json(obj) -> str:
if isinstance(obj, str):
return obj
import json
def default_serializer(obj):
if hasattr(obj, 'to_struct'):
return obj.to_struct()
else:
raise TypeError("Object of type '%s' is not JSON serializable and does not have .to_struct() method." % obj.__class__.__name__)
return json.dumps(obj, default=default_serializer, sort_keys=True)
def _serialize_float(float_value: float) -> str:
if isinstance(float_value, str):
return float_value
if not isinstance(float_value, (float, int)):
raise TypeError('Value "{}" has type "{}" instead of float.'.format(str(float_value), str(type(float_value))))
return str(float_value)
def _serialize_int(int_value: int) -> str:
if isinstance(int_value, str):
return int_value
if not isinstance(int_value, int):
raise TypeError('Value "{}" has type "{}" instead of int.'.format(str(int_value), str(type(int_value))))
return str(int_value)
import json
import argparse
_parser = argparse.ArgumentParser(prog='Aggregate regression metrics', description='Calculates regression metrics.\n\n Annotations:\n author: Alexey Volkov <[email protected]>')
_parser.add_argument("--metrics-1", dest="metrics_1", type=json.loads, required=True, default=argparse.SUPPRESS)
_parser.add_argument("--metrics-2", dest="metrics_2", type=json.loads, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--metrics-3", dest="metrics_3", type=json.loads, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--metrics-4", dest="metrics_4", type=json.loads, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--metrics-5", dest="metrics_5", type=json.loads, required=False, default=argparse.SUPPRESS)
_parser.add_argument("----output-paths", dest="_output_paths", type=str, nargs=6)
_parsed_args = vars(_parser.parse_args())
_output_files = _parsed_args.pop("_output_paths", [])
_outputs = aggregate_regression_metrics(**_parsed_args)
_output_serializers = [
_serialize_int,
_serialize_float,
_serialize_float,
_serialize_float,
_serialize_float,
_serialize_json,
]
import os
for idx, output_file in enumerate(_output_files):
try:
os.makedirs(os.path.dirname(output_file))
except OSError:
pass
with open(output_file, 'w') as f:
f.write(_output_serializers[idx](_outputs[idx]))
args:
- --metrics-1
- {inputValue: metrics_1}
- if:
cond: {isPresent: metrics_2}
then:
- --metrics-2
- {inputValue: metrics_2}
- if:
cond: {isPresent: metrics_3}
then:
- --metrics-3
- {inputValue: metrics_3}
- if:
cond: {isPresent: metrics_4}
then:
- --metrics-4
- {inputValue: metrics_4}
- if:
cond: {isPresent: metrics_5}
then:
- --metrics-5
- {inputValue: metrics_5}
- '----output-paths'
- {outputPath: number_of_items}
- {outputPath: max_absolute_error}
- {outputPath: mean_absolute_error}
- {outputPath: mean_squared_error}
- {outputPath: root_mean_squared_error}
- {outputPath: metrics}
| 620 |
0 | kubeflow_public_repos/pipelines/components/contrib/ml_metrics/Calculate_classification_metrics | kubeflow_public_repos/pipelines/components/contrib/ml_metrics/Calculate_classification_metrics/from_CSV/component.py | from typing import NamedTuple
from kfp.components import InputPath, create_component_from_func
def calculate_classification_metrics_from_csv(
true_values_path: InputPath(),
predicted_values_path: InputPath(),
sample_weights_path: InputPath() = None,
average: str = 'binary'
) -> NamedTuple('Outputs', [
('f1', float),
('precision', float),
('recall', float),
('accuracy', float),
]):
"""
Calculates classification metrics.
Annotations:
author: Anton Kiselev <[email protected]>
"""
import numpy
from sklearn.metrics import f1_score, precision_score, recall_score, accuracy_score
true_values = numpy.loadtxt(true_values_path, dtype=str)
predicted_values = numpy.loadtxt(predicted_values_path, dtype=str)
if len(predicted_values.shape) != 1:
raise NotImplemented('Only single prediction values are supported.')
if len(true_values.shape) != 1:
raise NotImplemented('Only single true values are supported.')
if predicted_values.shape != true_values.shape:
raise ValueError(f'Input shapes are different: {predicted_values.shape} != {true_values.shape}')
sample_weights = None
if sample_weights_path is not None:
sample_weights = numpy.loadtxt(sample_weights_path, dtype=float)
if len(sample_weights.shape) != 1:
raise NotImplemented('Only single sample weights are supported.')
if sample_weights.shape != predicted_values.shape:
raise ValueError(f'Input shapes of sample weights and predictions are different: '
f'{sample_weights.shape} != {predicted_values.shape}')
f1 = f1_score(true_values, predicted_values, average=average, sample_weight=sample_weights)
precision = precision_score(true_values, predicted_values, average=average, sample_weight=sample_weights)
recall = recall_score(true_values, predicted_values, average=average, sample_weight=sample_weights)
accuracy = accuracy_score(true_values, predicted_values, normalize=average, sample_weight=sample_weights)
metrics = dict(
f1=f1,
precision=precision,
recall=recall,
accuracy=accuracy
)
return (
f1,
precision,
recall,
accuracy,
metrics,
)
if __name__ == '__main__':
calculate_regression_metrics_from_csv_op = create_component_from_func(
calculate_classification_metrics_from_csv,
output_component_file='component.yaml',
base_image='python:3.7',
packages_to_install=['numpy==1.19.0', 'scikit-learn==0.23.2']
)
| 621 |
0 | kubeflow_public_repos/pipelines/components/contrib/ml_metrics/Calculate_classification_metrics | kubeflow_public_repos/pipelines/components/contrib/ml_metrics/Calculate_classification_metrics/from_CSV/component.yaml | name: Calculate classification metrics from csv
description: |-
Calculates classification metrics.
Annotations:
author: Anton Kiselev <[email protected]>
inputs:
- {name: true_values}
- {name: predicted_values}
- {name: sample_weights, optional: true}
- {name: average, type: String, default: binary, optional: true}
outputs:
- {name: f1, type: Float}
- {name: precision, type: Float}
- {name: recall, type: Float}
- {name: accuracy, type: Float}
implementation:
container:
image: python:3.7
command:
- sh
- -c
- (PIP_DISABLE_PIP_VERSION_CHECK=1 python3 -m pip install --quiet --no-warn-script-location
'numpy==1.19.0' 'scikit-learn==0.23.2' || PIP_DISABLE_PIP_VERSION_CHECK=1 python3
-m pip install --quiet --no-warn-script-location 'numpy==1.19.0' 'scikit-learn==0.23.2'
--user) && "$0" "$@"
- python3
- -u
- -c
- "def calculate_classification_metrics_from_csv(\n true_values_path ,\n\
\ predicted_values_path ,\n sample_weights_path = None,\n \
\ average = 'binary'\n): \n \n \n \n \n\n \"\"\"\n\
\ Calculates classification metrics.\n\n Annotations:\n author:\
\ Anton Kiselev <[email protected]>\n \"\"\"\n import numpy\n \
\ from sklearn.metrics import f1_score, precision_score, recall_score, accuracy_score\n\
\n true_values = numpy.loadtxt(true_values_path, dtype=str)\n predicted_values\
\ = numpy.loadtxt(predicted_values_path, dtype=str)\n\n if len(predicted_values.shape)\
\ != 1:\n raise NotImplemented('Only single prediction values are supported.')\n\
\ if len(true_values.shape) != 1:\n raise NotImplemented('Only single\
\ true values are supported.')\n\n if predicted_values.shape != true_values.shape:\n\
\ raise ValueError(f'Input shapes are different: {predicted_values.shape}\
\ != {true_values.shape}')\n\n sample_weights = None\n if sample_weights_path\
\ is not None:\n sample_weights = numpy.loadtxt(sample_weights_path,\
\ dtype=float)\n\n if len(sample_weights.shape) != 1:\n raise\
\ NotImplemented('Only single sample weights are supported.')\n\n if\
\ sample_weights.shape != predicted_values.shape:\n raise ValueError(f'Input\
\ shapes of sample weights and predictions are different: '\n \
\ f'{sample_weights.shape} != {predicted_values.shape}')\n\n \
\ f1 = f1_score(true_values, predicted_values, average=average, sample_weight=sample_weights)\n\
\ precision = precision_score(true_values, predicted_values, average=average,\
\ sample_weight=sample_weights)\n recall = recall_score(true_values, predicted_values,\
\ average=average, sample_weight=sample_weights)\n accuracy = accuracy_score(true_values,\
\ predicted_values, normalize=average, sample_weight=sample_weights)\n\n \
\ metrics = dict(\n f1=f1,\n precision=precision,\n recall=recall,\n\
\ accuracy=accuracy\n )\n\n return (\n f1,\n precision,\n\
\ recall,\n accuracy,\n metrics,\n )\n\ndef _serialize_float(float_value:\
\ float) -> str:\n if isinstance(float_value, str):\n return float_value\n\
\ if not isinstance(float_value, (float, int)):\n raise TypeError('Value\
\ \"{}\" has type \"{}\" instead of float.'.format(str(float_value), str(type(float_value))))\n\
\ return str(float_value)\n\nimport argparse\n_parser = argparse.ArgumentParser(prog='Calculate\
\ classification metrics from csv', description='Calculates classification metrics.\\\
n\\n Annotations:\\n author: Anton Kiselev <[email protected]>')\n\
_parser.add_argument(\"--true-values\", dest=\"true_values_path\", type=str,\
\ required=True, default=argparse.SUPPRESS)\n_parser.add_argument(\"--predicted-values\"\
, dest=\"predicted_values_path\", type=str, required=True, default=argparse.SUPPRESS)\n\
_parser.add_argument(\"--sample-weights\", dest=\"sample_weights_path\", type=str,\
\ required=False, default=argparse.SUPPRESS)\n_parser.add_argument(\"--average\"\
, dest=\"average\", type=str, required=False, default=argparse.SUPPRESS)\n_parser.add_argument(\"\
----output-paths\", dest=\"_output_paths\", type=str, nargs=4)\n_parsed_args\
\ = vars(_parser.parse_args())\n_output_files = _parsed_args.pop(\"_output_paths\"\
, [])\n\n_outputs = calculate_classification_metrics_from_csv(**_parsed_args)\n\
\n_output_serializers = [\n _serialize_float,\n _serialize_float,\n \
\ _serialize_float,\n _serialize_float,\n\n]\n\nimport os\nfor idx, output_file\
\ in enumerate(_output_files):\n try:\n os.makedirs(os.path.dirname(output_file))\n\
\ except OSError:\n pass\n with open(output_file, 'w') as f:\n\
\ f.write(_output_serializers[idx](_outputs[idx]))\n"
args:
- --true-values
- {inputPath: true_values}
- --predicted-values
- {inputPath: predicted_values}
- if:
cond: {isPresent: sample_weights}
then:
- --sample-weights
- {inputPath: sample_weights}
- if:
cond: {isPresent: average}
then:
- --average
- {inputValue: average}
- '----output-paths'
- {outputPath: f1}
- {outputPath: precision}
- {outputPath: recall}
- {outputPath: accuracy}
| 622 |
0 | kubeflow_public_repos/pipelines/components/contrib/ml_metrics/Calculate_classification_metrics | kubeflow_public_repos/pipelines/components/contrib/ml_metrics/Calculate_classification_metrics/_samples/sample_pipleine.py | from pathlib import Path
import kfp
from kfp.components import ComponentStore, create_component_from_func, InputPath, OutputPath, load_component_from_file
store = ComponentStore.default_store
chicago_taxi_dataset_op = store.load_component('datasets/Chicago_Taxi_Trips')
xgboost_train_on_csv_op = store.load_component('XGBoost/Train')
xgboost_predict_on_csv_op = store.load_component('XGBoost/Predict')
pandas_transform_csv_op = store.load_component('pandas/Transform_DataFrame/in_CSV_format')
drop_header_op = store.load_component('tables/Remove_header')
def convert_values_to_int(text_path: InputPath('Text'),
output_path: OutputPath('Text')):
"""Returns the number of values in a CSV column."""
import numpy as np
result = np.loadtxt(text_path)
np.savetxt(output_path, result, fmt='%d')
convert_values_to_int_op = create_component_from_func(
func=convert_values_to_int,
base_image='python:3.7',
packages_to_install=['pandas==1.1'],
)
calculate_classification_metrics_from_csv_op = load_component_from_file(
str(Path(__file__).parent.parent / 'from_CSV' / 'component.yaml')
)
def classification_metrics_pipeline():
features = ['trip_seconds', 'trip_miles', 'pickup_community_area', 'dropoff_community_area',
'fare', 'tolls', 'extras', 'trip_total']
target = 'company'
training_data_csv = chicago_taxi_dataset_op(
select=','.join([target] + features),
where='trip_start_timestamp >= "2019-01-01" AND trip_start_timestamp < "2019-02-01"',
limit=100,
).output
training_data_transformed_csv = pandas_transform_csv_op(
table=training_data_csv,
transform_code=f'''df["{target}"] = df["{target}"].astype('category').cat.codes''',
).output
# Training
model_trained_on_csv = xgboost_train_on_csv_op(
training_data=training_data_transformed_csv,
label_column=0,
booster_params={'num_class': 13},
objective='multi:softmax',
num_iterations=50,
).outputs['model']
# Predicting
predictions = xgboost_predict_on_csv_op(
data=training_data_csv,
model=model_trained_on_csv,
label_column=0,
).output
predictions_converted = convert_values_to_int_op(
text=predictions
).output
# Preparing the true values
true_values_table = pandas_transform_csv_op(
table=training_data_csv,
transform_code=f'df["{target}"] = df["{target}"].astype("category").cat.codes\n'
f'df = df[["{target}"]]'
).output
true_values = drop_header_op(true_values_table).output
# Calculating the regression metrics
calculate_classification_metrics_from_csv_op(
true_values=true_values,
predicted_values=predictions_converted,
average='macro',
)
if __name__ == '__main__':
kfp_endpoint = None
kfp.Client(host=kfp_endpoint).create_run_from_pipeline_func(classification_metrics_pipeline, arguments={})
| 623 |
0 | kubeflow_public_repos/pipelines/components/contrib/datasets/HuggingFace | kubeflow_public_repos/pipelines/components/contrib/datasets/HuggingFace/Load_dataset/component.py | from typing import NamedTuple
from kfp.components import create_component_from_func, OutputPath
def load_dataset_using_huggingface(
dataset_name: str,
dataset_dict_path: OutputPath('HuggingFaceDatasetDict'),
) -> NamedTuple('Outputs', [
('splits', list),
]):
from datasets import load_dataset
dataset_dict = load_dataset(dataset_name)
dataset_dict.save_to_disk(dataset_dict_path)
splits = list(dataset_dict.keys())
return (splits,)
if __name__ == '__main__':
load_dataset_op = create_component_from_func(
load_dataset_using_huggingface,
base_image='python:3.9',
packages_to_install=['datasets==1.6.2'],
annotations={
'author': 'Alexey Volkov <[email protected]>',
"canonical_location": "https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/datasets/HuggingFace/Load_dataset/component.yaml",
},
output_component_file='component.yaml',
)
| 624 |
0 | kubeflow_public_repos/pipelines/components/contrib/datasets/HuggingFace | kubeflow_public_repos/pipelines/components/contrib/datasets/HuggingFace/Load_dataset/component.yaml | name: Load dataset using huggingface
metadata:
annotations:
author: Alexey Volkov <[email protected]>
canonical_location: 'https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/datasets/HuggingFace/Load_dataset/component.yaml'
inputs:
- {name: dataset_name, type: String}
outputs:
- {name: dataset_dict, type: HuggingFaceDatasetDict}
- {name: splits, type: JsonArray}
implementation:
container:
image: python:3.9
command:
- sh
- -c
- (PIP_DISABLE_PIP_VERSION_CHECK=1 python3 -m pip install --quiet --no-warn-script-location
'datasets==1.6.2' || PIP_DISABLE_PIP_VERSION_CHECK=1 python3 -m pip install
--quiet --no-warn-script-location 'datasets==1.6.2' --user) && "$0" "$@"
- sh
- -ec
- |
program_path=$(mktemp)
printf "%s" "$0" > "$program_path"
python3 -u "$program_path" "$@"
- |
def _make_parent_dirs_and_return_path(file_path: str):
import os
os.makedirs(os.path.dirname(file_path), exist_ok=True)
return file_path
def load_dataset_using_huggingface(
dataset_name,
dataset_dict_path,
):
from datasets import load_dataset
dataset_dict = load_dataset(dataset_name)
dataset_dict.save_to_disk(dataset_dict_path)
splits = list(dataset_dict.keys())
return (splits,)
def _serialize_json(obj) -> str:
if isinstance(obj, str):
return obj
import json
def default_serializer(obj):
if hasattr(obj, 'to_struct'):
return obj.to_struct()
else:
raise TypeError("Object of type '%s' is not JSON serializable and does not have .to_struct() method." % obj.__class__.__name__)
return json.dumps(obj, default=default_serializer, sort_keys=True)
import argparse
_parser = argparse.ArgumentParser(prog='Load dataset using huggingface', description='')
_parser.add_argument("--dataset-name", dest="dataset_name", type=str, required=True, default=argparse.SUPPRESS)
_parser.add_argument("--dataset-dict", dest="dataset_dict_path", type=_make_parent_dirs_and_return_path, required=True, default=argparse.SUPPRESS)
_parser.add_argument("----output-paths", dest="_output_paths", type=str, nargs=1)
_parsed_args = vars(_parser.parse_args())
_output_files = _parsed_args.pop("_output_paths", [])
_outputs = load_dataset_using_huggingface(**_parsed_args)
_output_serializers = [
_serialize_json,
]
import os
for idx, output_file in enumerate(_output_files):
try:
os.makedirs(os.path.dirname(output_file))
except OSError:
pass
with open(output_file, 'w') as f:
f.write(_output_serializers[idx](_outputs[idx]))
args:
- --dataset-name
- {inputValue: dataset_name}
- --dataset-dict
- {outputPath: dataset_dict}
- '----output-paths'
- {outputPath: splits}
| 625 |
0 | kubeflow_public_repos/pipelines/components/contrib/datasets/HuggingFace | kubeflow_public_repos/pipelines/components/contrib/datasets/HuggingFace/_samples/sample.pipeline.py | from kfp import components
from kfp import dsl
load_dataset_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/d0e14a1dad4b851ad2a60a0c1a8201493f3d931c/components/datasets/HuggingFace/Load_dataset/component.yaml')
split_dataset_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/d0e14a1dad4b851ad2a60a0c1a8201493f3d931c/components/datasets/HuggingFace/Split_dataset/component.yaml')
def huggingface_pipeline():
dataset_dict_task = load_dataset_op(dataset_name='imdb')
with dsl.ParallelFor(dataset_dict_task.outputs['splits']) as split_name:
deataset_task = split_dataset_op(
dataset_dict=dataset_dict_task.outputs['dataset_dict'],
split_name=split_name,
)
if __name__ == '__main__':
import kfp
kfp_endpoint = None
kfp.Client(host=kfp_endpoint).create_run_from_pipeline_func(
huggingface_pipeline,
arguments={}
)
| 626 |
0 | kubeflow_public_repos/pipelines/components/contrib/datasets/HuggingFace | kubeflow_public_repos/pipelines/components/contrib/datasets/HuggingFace/Split_dataset/component.py | from kfp.components import create_component_from_func, InputPath, OutputPath
def split_dataset_huggingface(
dataset_dict_path: InputPath('HuggingFaceDatasetDict'),
dataset_split_path: OutputPath('HuggingFaceDataset'),
dataset_path: OutputPath('HuggingFaceArrowDataset'),
# dataset_indices_path: OutputPath('HuggingFaceArrowDataset'),
dataset_info_path: OutputPath(dict),
dataset_state_path: OutputPath(dict),
split_name: str = None,
):
import os
import shutil
from datasets import config as datasets_config
print(f'DatasetDict contents: {os.listdir(dataset_dict_path)}')
shutil.copytree(os.path.join(dataset_dict_path, split_name), dataset_split_path)
print(f'Dataset contents: {os.listdir(os.path.join(dataset_dict_path, split_name))}')
shutil.copy(os.path.join(dataset_dict_path, split_name, datasets_config.DATASET_ARROW_FILENAME), dataset_path)
# shutil.copy(os.path.join(dataset_dict_path, split_name, datasets_config.DATASET_INDICES_FILENAME), dataset_indices_path)
shutil.copy(os.path.join(dataset_dict_path, split_name, datasets_config.DATASET_INFO_FILENAME), dataset_info_path)
shutil.copy(os.path.join(dataset_dict_path, split_name, datasets_config.DATASET_STATE_JSON_FILENAME), dataset_state_path)
if __name__ == '__main__':
split_dataset_op = create_component_from_func(
split_dataset_huggingface,
base_image='python:3.9',
packages_to_install=['datasets==1.6.2'],
annotations={
'author': 'Alexey Volkov <[email protected]>',
"canonical_location": "https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/datasets/HuggingFace/Split_dataset/component.yaml",
},
output_component_file='component.yaml',
)
| 627 |
0 | kubeflow_public_repos/pipelines/components/contrib/datasets/HuggingFace | kubeflow_public_repos/pipelines/components/contrib/datasets/HuggingFace/Split_dataset/component.yaml | name: Split dataset huggingface
metadata:
annotations:
author: Alexey Volkov <[email protected]>
canonical_location: 'https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/datasets/HuggingFace/Split_dataset/component.yaml'
inputs:
- {name: dataset_dict, type: HuggingFaceDatasetDict}
- {name: split_name, type: String, optional: true}
outputs:
- {name: dataset_split, type: HuggingFaceDataset}
- {name: dataset, type: HuggingFaceArrowDataset}
- {name: dataset_info, type: JsonObject}
- {name: dataset_state, type: JsonObject}
implementation:
container:
image: python:3.9
command:
- sh
- -c
- (PIP_DISABLE_PIP_VERSION_CHECK=1 python3 -m pip install --quiet --no-warn-script-location
'datasets==1.6.2' || PIP_DISABLE_PIP_VERSION_CHECK=1 python3 -m pip install
--quiet --no-warn-script-location 'datasets==1.6.2' --user) && "$0" "$@"
- sh
- -ec
- |
program_path=$(mktemp)
printf "%s" "$0" > "$program_path"
python3 -u "$program_path" "$@"
- |
def _make_parent_dirs_and_return_path(file_path: str):
import os
os.makedirs(os.path.dirname(file_path), exist_ok=True)
return file_path
def split_dataset_huggingface(
dataset_dict_path,
dataset_split_path,
dataset_path,
# dataset_indices_path: OutputPath('HuggingFaceArrowDataset'),
dataset_info_path,
dataset_state_path,
split_name = None,
):
import os
import shutil
from datasets import config as datasets_config
print(f'DatasetDict contents: {os.listdir(dataset_dict_path)}')
shutil.copytree(os.path.join(dataset_dict_path, split_name), dataset_split_path)
print(f'Dataset contents: {os.listdir(os.path.join(dataset_dict_path, split_name))}')
shutil.copy(os.path.join(dataset_dict_path, split_name, datasets_config.DATASET_ARROW_FILENAME), dataset_path)
# shutil.copy(os.path.join(dataset_dict_path, split_name, datasets_config.DATASET_INDICES_FILENAME), dataset_indices_path)
shutil.copy(os.path.join(dataset_dict_path, split_name, datasets_config.DATASET_INFO_FILENAME), dataset_info_path)
shutil.copy(os.path.join(dataset_dict_path, split_name, datasets_config.DATASET_STATE_JSON_FILENAME), dataset_state_path)
import argparse
_parser = argparse.ArgumentParser(prog='Split dataset huggingface', description='')
_parser.add_argument("--dataset-dict", dest="dataset_dict_path", type=str, required=True, default=argparse.SUPPRESS)
_parser.add_argument("--split-name", dest="split_name", type=str, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--dataset-split", dest="dataset_split_path", type=_make_parent_dirs_and_return_path, required=True, default=argparse.SUPPRESS)
_parser.add_argument("--dataset", dest="dataset_path", type=_make_parent_dirs_and_return_path, required=True, default=argparse.SUPPRESS)
_parser.add_argument("--dataset-info", dest="dataset_info_path", type=_make_parent_dirs_and_return_path, required=True, default=argparse.SUPPRESS)
_parser.add_argument("--dataset-state", dest="dataset_state_path", type=_make_parent_dirs_and_return_path, required=True, default=argparse.SUPPRESS)
_parsed_args = vars(_parser.parse_args())
_outputs = split_dataset_huggingface(**_parsed_args)
args:
- --dataset-dict
- {inputPath: dataset_dict}
- if:
cond: {isPresent: split_name}
then:
- --split-name
- {inputValue: split_name}
- --dataset-split
- {outputPath: dataset_split}
- --dataset
- {outputPath: dataset}
- --dataset-info
- {outputPath: dataset_info}
- --dataset-state
- {outputPath: dataset_state}
| 628 |
0 | kubeflow_public_repos/pipelines/components/contrib/datasets | kubeflow_public_repos/pipelines/components/contrib/datasets/Chicago_Taxi_Trips/component.yaml | name: Chicago Taxi Trips dataset
description: |
City of Chicago Taxi Trips dataset: https://data.cityofchicago.org/Transportation/Taxi-Trips/wrvz-psew
The input parameters configure the SQL query to the database.
The dataset is pretty big, so limit the number of results using the `Limit` or `Where` parameters.
Read [Socrata dev](https://dev.socrata.com/docs/queries/) for the advanced query syntax
metadata:
annotations:
author: Alexey Volkov <[email protected]>
canonical_location: 'https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/datasets/Chicago_Taxi_Trips/component.yaml'
inputs:
- {name: Where, type: String, default: 'trip_start_timestamp>="1900-01-01" AND trip_start_timestamp<"2100-01-01"'}
- {name: Limit, type: Integer, default: '1000', description: 'Number of rows to return. The rows are randomly sampled.'}
- {name: Select, type: String, default: 'trip_id,taxi_id,trip_start_timestamp,trip_end_timestamp,trip_seconds,trip_miles,pickup_census_tract,dropoff_census_tract,pickup_community_area,dropoff_community_area,fare,tips,tolls,extras,trip_total,payment_type,company,pickup_centroid_latitude,pickup_centroid_longitude,pickup_centroid_location,dropoff_centroid_latitude,dropoff_centroid_longitude,dropoff_centroid_location'}
- {name: Format, type: String, default: 'csv', description: 'Output data format. Suports csv,tsv,cml,rdf,json'}
outputs:
- {name: Table, description: 'Result type depends on format. CSV and TSV have header.'}
implementation:
container:
# image: curlimages/curl # Sets a non-root user which cannot write to mounted volumes. See https://github.com/curl/curl-docker/issues/22
image: byrnedo/alpine-curl@sha256:548379d0a4a0c08b9e55d9d87a592b7d35d9ab3037f4936f5ccd09d0b625a342
command:
- sh
- -c
- |
set -e -x -o pipefail
output_path="$0"
select="$1"
where="$2"
limit="$3"
format="$4"
mkdir -p "$(dirname "$output_path")"
curl --get 'https://data.cityofchicago.org/resource/wrvz-psew.'"${format}" \
--data-urlencode '$limit='"${limit}" \
--data-urlencode '$where='"${where}" \
--data-urlencode '$select='"${select}" \
| tr -d '"' > "$output_path" # Removing unneeded quotes around all numbers
- {outputPath: Table}
- {inputValue: Select}
- {inputValue: Where}
- {inputValue: Limit}
- {inputValue: Format}
| 629 |
0 | kubeflow_public_repos/pipelines/components/contrib/CatBoost/Train_regression | kubeflow_public_repos/pipelines/components/contrib/CatBoost/Train_regression/from_CSV/component.py | from kfp.components import InputPath, OutputPath, create_component_from_func
def catboost_train_regression(
training_data_path: InputPath('CSV'),
model_path: OutputPath('CatBoostModel'),
starting_model_path: InputPath('CatBoostModel') = None,
label_column: int = 0,
loss_function: str = 'RMSE',
num_iterations: int = 500,
learning_rate: float = None,
depth: int = 6,
random_seed: int = 0,
cat_features: list = None,
additional_training_options: dict = {},
):
'''Train a CatBoost classifier model.
Args:
training_data_path: Path for the training data in CSV format.
model_path: Output path for the trained model in binary CatBoostModel format.
starting_model_path: Path for the existing trained model to start from.
label_column: Column containing the label data.
loss_function: The metric to use in training and also selector of the machine learning
problem to solve. Default = 'RMSE'. Possible values:
'RMSE', 'MAE', 'Quantile:alpha=value', 'LogLinQuantile:alpha=value', 'Poisson', 'MAPE', 'Lq:q=value'
num_iterations: Number of trees to add to the ensemble.
learning_rate: Step size shrinkage used in update to prevents overfitting.
Default value is selected automatically for binary classification with other parameters set to default.
In all other cases default is 0.03.
depth: Depth of a tree. All trees are the same depth. Default = 6
random_seed: Random number seed. Default = 0
cat_features: A list of Categorical features (indices or names).
additional_training_options: A dictionary with additional options to pass to CatBoostRegressor
Outputs:
model: Trained model in binary CatBoostModel format.
Annotations:
author: Alexey Volkov <[email protected]>
'''
import tempfile
from pathlib import Path
from catboost import CatBoostRegressor, Pool
column_descriptions = {label_column: 'Label'}
column_description_path = tempfile.NamedTemporaryFile(delete=False).name
with open(column_description_path, 'w') as column_description_file:
for idx, kind in column_descriptions.items():
column_description_file.write('{}\t{}\n'.format(idx, kind))
train_data = Pool(
training_data_path,
column_description=column_description_path,
has_header=True,
delimiter=',',
)
model = CatBoostRegressor(
iterations=num_iterations,
depth=depth,
learning_rate=learning_rate,
loss_function=loss_function,
random_seed=random_seed,
verbose=True,
**additional_training_options,
)
model.fit(
train_data,
cat_features=cat_features,
init_model=starting_model_path,
#verbose=False,
#plot=True,
)
Path(model_path).parent.mkdir(parents=True, exist_ok=True)
model.save_model(model_path)
if __name__ == '__main__':
catboost_train_regression_op = create_component_from_func(
catboost_train_regression,
output_component_file='component.yaml',
base_image='python:3.7',
packages_to_install=['catboost==0.23'],
annotations={
"author": "Alexey Volkov <[email protected]>",
"canonical_location": "https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/CatBoost/Train_regression/from_CSV/component.yaml",
},
)
| 630 |
0 | kubeflow_public_repos/pipelines/components/contrib/CatBoost/Train_regression | kubeflow_public_repos/pipelines/components/contrib/CatBoost/Train_regression/from_CSV/component.yaml | name: Catboost train regression
description: |-
Train a CatBoost classifier model.
Args:
training_data_path: Path for the training data in CSV format.
model_path: Output path for the trained model in binary CatBoostModel format.
starting_model_path: Path for the existing trained model to start from.
label_column: Column containing the label data.
loss_function: The metric to use in training and also selector of the machine learning
problem to solve. Default = 'RMSE'. Possible values:
'RMSE', 'MAE', 'Quantile:alpha=value', 'LogLinQuantile:alpha=value', 'Poisson', 'MAPE', 'Lq:q=value'
num_iterations: Number of trees to add to the ensemble.
learning_rate: Step size shrinkage used in update to prevents overfitting.
Default value is selected automatically for binary classification with other parameters set to default.
In all other cases default is 0.03.
depth: Depth of a tree. All trees are the same depth. Default = 6
random_seed: Random number seed. Default = 0
cat_features: A list of Categorical features (indices or names).
additional_training_options: A dictionary with additional options to pass to CatBoostRegressor
Outputs:
model: Trained model in binary CatBoostModel format.
Annotations:
author: Alexey Volkov <[email protected]>
inputs:
- {name: training_data, type: CSV}
- {name: starting_model, type: CatBoostModel, optional: true}
- {name: label_column, type: Integer, default: '0', optional: true}
- {name: loss_function, type: String, default: RMSE, optional: true}
- {name: num_iterations, type: Integer, default: '500', optional: true}
- {name: learning_rate, type: Float, optional: true}
- {name: depth, type: Integer, default: '6', optional: true}
- {name: random_seed, type: Integer, default: '0', optional: true}
- {name: cat_features, type: JsonArray, optional: true}
- {name: additional_training_options, type: JsonObject, default: '{}', optional: true}
outputs:
- {name: model, type: CatBoostModel}
metadata:
annotations:
author: Alexey Volkov <[email protected]>
canonical_location: 'https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/CatBoost/Train_regression/from_CSV/component.yaml'
implementation:
container:
image: python:3.7
command:
- sh
- -c
- (PIP_DISABLE_PIP_VERSION_CHECK=1 python3 -m pip install --quiet --no-warn-script-location
'catboost==0.23' || PIP_DISABLE_PIP_VERSION_CHECK=1 python3 -m pip install --quiet
--no-warn-script-location 'catboost==0.23' --user) && "$0" "$@"
- python3
- -u
- -c
- |
def _make_parent_dirs_and_return_path(file_path: str):
import os
os.makedirs(os.path.dirname(file_path), exist_ok=True)
return file_path
def catboost_train_regression(
training_data_path,
model_path,
starting_model_path = None,
label_column = 0,
loss_function = 'RMSE',
num_iterations = 500,
learning_rate = None,
depth = 6,
random_seed = 0,
cat_features = None,
additional_training_options = {},
):
'''Train a CatBoost classifier model.
Args:
training_data_path: Path for the training data in CSV format.
model_path: Output path for the trained model in binary CatBoostModel format.
starting_model_path: Path for the existing trained model to start from.
label_column: Column containing the label data.
loss_function: The metric to use in training and also selector of the machine learning
problem to solve. Default = 'RMSE'. Possible values:
'RMSE', 'MAE', 'Quantile:alpha=value', 'LogLinQuantile:alpha=value', 'Poisson', 'MAPE', 'Lq:q=value'
num_iterations: Number of trees to add to the ensemble.
learning_rate: Step size shrinkage used in update to prevents overfitting.
Default value is selected automatically for binary classification with other parameters set to default.
In all other cases default is 0.03.
depth: Depth of a tree. All trees are the same depth. Default = 6
random_seed: Random number seed. Default = 0
cat_features: A list of Categorical features (indices or names).
additional_training_options: A dictionary with additional options to pass to CatBoostRegressor
Outputs:
model: Trained model in binary CatBoostModel format.
Annotations:
author: Alexey Volkov <[email protected]>
'''
import tempfile
from pathlib import Path
from catboost import CatBoostRegressor, Pool
column_descriptions = {label_column: 'Label'}
column_description_path = tempfile.NamedTemporaryFile(delete=False).name
with open(column_description_path, 'w') as column_description_file:
for idx, kind in column_descriptions.items():
column_description_file.write('{}\t{}\n'.format(idx, kind))
train_data = Pool(
training_data_path,
column_description=column_description_path,
has_header=True,
delimiter=',',
)
model = CatBoostRegressor(
iterations=num_iterations,
depth=depth,
learning_rate=learning_rate,
loss_function=loss_function,
random_seed=random_seed,
verbose=True,
**additional_training_options,
)
model.fit(
train_data,
cat_features=cat_features,
init_model=starting_model_path,
#verbose=False,
#plot=True,
)
Path(model_path).parent.mkdir(parents=True, exist_ok=True)
model.save_model(model_path)
import json
import argparse
_parser = argparse.ArgumentParser(prog='Catboost train regression', description="Train a CatBoost classifier model.\n\n Args:\n training_data_path: Path for the training data in CSV format.\n model_path: Output path for the trained model in binary CatBoostModel format.\n starting_model_path: Path for the existing trained model to start from.\n label_column: Column containing the label data.\n\n loss_function: The metric to use in training and also selector of the machine learning\n problem to solve. Default = 'RMSE'. Possible values:\n 'RMSE', 'MAE', 'Quantile:alpha=value', 'LogLinQuantile:alpha=value', 'Poisson', 'MAPE', 'Lq:q=value'\n num_iterations: Number of trees to add to the ensemble.\n learning_rate: Step size shrinkage used in update to prevents overfitting.\n Default value is selected automatically for binary classification with other parameters set to default.\n In all other cases default is 0.03.\n depth: Depth of a tree. All trees are the same depth. Default = 6\n random_seed: Random number seed. Default = 0\n\n cat_features: A list of Categorical features (indices or names).\n additional_training_options: A dictionary with additional options to pass to CatBoostRegressor\n\n Outputs:\n model: Trained model in binary CatBoostModel format.\n\n Annotations:\n author: Alexey Volkov <[email protected]>")
_parser.add_argument("--training-data", dest="training_data_path", type=str, required=True, default=argparse.SUPPRESS)
_parser.add_argument("--starting-model", dest="starting_model_path", type=str, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--label-column", dest="label_column", type=int, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--loss-function", dest="loss_function", type=str, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--num-iterations", dest="num_iterations", type=int, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--learning-rate", dest="learning_rate", type=float, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--depth", dest="depth", type=int, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--random-seed", dest="random_seed", type=int, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--cat-features", dest="cat_features", type=json.loads, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--additional-training-options", dest="additional_training_options", type=json.loads, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--model", dest="model_path", type=_make_parent_dirs_and_return_path, required=True, default=argparse.SUPPRESS)
_parsed_args = vars(_parser.parse_args())
_outputs = catboost_train_regression(**_parsed_args)
args:
- --training-data
- {inputPath: training_data}
- if:
cond: {isPresent: starting_model}
then:
- --starting-model
- {inputPath: starting_model}
- if:
cond: {isPresent: label_column}
then:
- --label-column
- {inputValue: label_column}
- if:
cond: {isPresent: loss_function}
then:
- --loss-function
- {inputValue: loss_function}
- if:
cond: {isPresent: num_iterations}
then:
- --num-iterations
- {inputValue: num_iterations}
- if:
cond: {isPresent: learning_rate}
then:
- --learning-rate
- {inputValue: learning_rate}
- if:
cond: {isPresent: depth}
then:
- --depth
- {inputValue: depth}
- if:
cond: {isPresent: random_seed}
then:
- --random-seed
- {inputValue: random_seed}
- if:
cond: {isPresent: cat_features}
then:
- --cat-features
- {inputValue: cat_features}
- if:
cond: {isPresent: additional_training_options}
then:
- --additional-training-options
- {inputValue: additional_training_options}
- --model
- {outputPath: model}
| 631 |
0 | kubeflow_public_repos/pipelines/components/contrib/CatBoost/Predict_values | kubeflow_public_repos/pipelines/components/contrib/CatBoost/Predict_values/from_CSV/component.py | from kfp.components import InputPath, OutputPath, create_component_from_func
def catboost_predict_values(
data_path: InputPath('CSV'),
model_path: InputPath('CatBoostModel'),
predictions_path: OutputPath(),
label_column: int = None,
):
'''Predict values with a CatBoost model.
Args:
data_path: Path for the data in CSV format.
model_path: Path for the trained model in binary CatBoostModel format.
label_column: Column containing the label data.
predictions_path: Output path for the predictions.
Outputs:
predictions: Predictions in text format.
Annotations:
author: Alexey Volkov <[email protected]>
'''
import tempfile
from catboost import CatBoost, Pool
import numpy
if label_column:
column_descriptions = {label_column: 'Label'}
column_description_path = tempfile.NamedTemporaryFile(delete=False).name
with open(column_description_path, 'w') as column_description_file:
for idx, kind in column_descriptions.items():
column_description_file.write('{}\t{}\n'.format(idx, kind))
else:
column_description_path = None
eval_data = Pool(
data_path,
column_description=column_description_path,
has_header=True,
delimiter=',',
)
model = CatBoost()
model.load_model(model_path)
predictions = model.predict(eval_data, prediction_type='RawFormulaVal')
numpy.savetxt(predictions_path, predictions)
if __name__ == '__main__':
catboost_predict_values_op = create_component_from_func(
catboost_predict_values,
output_component_file='component.yaml',
base_image='python:3.7',
packages_to_install=['catboost==0.23'],
annotations={
"author": "Alexey Volkov <[email protected]>",
"canonical_location": "https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/CatBoost/Predict_values/from_CSV/component.yaml",
},
)
| 632 |
0 | kubeflow_public_repos/pipelines/components/contrib/CatBoost/Predict_values | kubeflow_public_repos/pipelines/components/contrib/CatBoost/Predict_values/from_CSV/component.yaml | name: Catboost predict values
description: |-
Predict values with a CatBoost model.
Args:
data_path: Path for the data in CSV format.
model_path: Path for the trained model in binary CatBoostModel format.
label_column: Column containing the label data.
predictions_path: Output path for the predictions.
Outputs:
predictions: Predictions in text format.
Annotations:
author: Alexey Volkov <[email protected]>
inputs:
- {name: data, type: CSV}
- {name: model, type: CatBoostModel}
- {name: label_column, type: Integer, optional: true}
outputs:
- {name: predictions}
metadata:
annotations:
author: Alexey Volkov <[email protected]>
canonical_location: 'https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/CatBoost/Predict_values/from_CSV/component.yaml'
implementation:
container:
image: python:3.7
command:
- sh
- -c
- (PIP_DISABLE_PIP_VERSION_CHECK=1 python3 -m pip install --quiet --no-warn-script-location
'catboost==0.23' || PIP_DISABLE_PIP_VERSION_CHECK=1 python3 -m pip install --quiet
--no-warn-script-location 'catboost==0.23' --user) && "$0" "$@"
- python3
- -u
- -c
- |
def _make_parent_dirs_and_return_path(file_path: str):
import os
os.makedirs(os.path.dirname(file_path), exist_ok=True)
return file_path
def catboost_predict_values(
data_path,
model_path,
predictions_path,
label_column = None,
):
'''Predict values with a CatBoost model.
Args:
data_path: Path for the data in CSV format.
model_path: Path for the trained model in binary CatBoostModel format.
label_column: Column containing the label data.
predictions_path: Output path for the predictions.
Outputs:
predictions: Predictions in text format.
Annotations:
author: Alexey Volkov <[email protected]>
'''
import tempfile
from catboost import CatBoost, Pool
import numpy
if label_column:
column_descriptions = {label_column: 'Label'}
column_description_path = tempfile.NamedTemporaryFile(delete=False).name
with open(column_description_path, 'w') as column_description_file:
for idx, kind in column_descriptions.items():
column_description_file.write('{}\t{}\n'.format(idx, kind))
else:
column_description_path = None
eval_data = Pool(
data_path,
column_description=column_description_path,
has_header=True,
delimiter=',',
)
model = CatBoost()
model.load_model(model_path)
predictions = model.predict(eval_data, prediction_type='RawFormulaVal')
numpy.savetxt(predictions_path, predictions)
import argparse
_parser = argparse.ArgumentParser(prog='Catboost predict values', description='Predict values with a CatBoost model.\n\n Args:\n data_path: Path for the data in CSV format.\n model_path: Path for the trained model in binary CatBoostModel format.\n label_column: Column containing the label data.\n predictions_path: Output path for the predictions.\n\n Outputs:\n predictions: Predictions in text format.\n\n Annotations:\n author: Alexey Volkov <[email protected]>')
_parser.add_argument("--data", dest="data_path", type=str, required=True, default=argparse.SUPPRESS)
_parser.add_argument("--model", dest="model_path", type=str, required=True, default=argparse.SUPPRESS)
_parser.add_argument("--label-column", dest="label_column", type=int, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--predictions", dest="predictions_path", type=_make_parent_dirs_and_return_path, required=True, default=argparse.SUPPRESS)
_parsed_args = vars(_parser.parse_args())
_outputs = catboost_predict_values(**_parsed_args)
args:
- --data
- {inputPath: data}
- --model
- {inputPath: model}
- if:
cond: {isPresent: label_column}
then:
- --label-column
- {inputValue: label_column}
- --predictions
- {outputPath: predictions}
| 633 |
0 | kubeflow_public_repos/pipelines/components/contrib/CatBoost | kubeflow_public_repos/pipelines/components/contrib/CatBoost/_samples/sample_pipeline.py | import kfp
from kfp import components
chicago_taxi_dataset_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/e3337b8bdcd63636934954e592d4b32c95b49129/components/datasets/Chicago%20Taxi/component.yaml')
pandas_transform_csv_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/e69a6694/components/pandas/Transform_DataFrame/in_CSV_format/component.yaml')
catboost_train_classifier_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/f97ad2/components/CatBoost/Train_classifier/from_CSV/component.yaml')
catboost_train_regression_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/f97ad2/components/CatBoost/Train_regression/from_CSV/component.yaml')
catboost_predict_classes_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/f97ad2/components/CatBoost/Predict_classes/from_CSV/component.yaml')
catboost_predict_values_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/f97ad2/components/CatBoost/Predict_values/from_CSV/component.yaml')
catboost_predict_class_probabilities_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/f97ad2/components/CatBoost/Predict_class_probabilities/from_CSV/component.yaml')
catboost_to_apple_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/f97ad2/components/CatBoost/convert_CatBoostModel_to_AppleCoreMLModel/component.yaml')
catboost_to_onnx_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/f97ad2/components/CatBoost/convert_CatBoostModel_to_ONNX/component.yaml')
def catboost_pipeline():
training_data_in_csv = chicago_taxi_dataset_op(
where='trip_start_timestamp >= "2019-01-01" AND trip_start_timestamp < "2019-02-01"',
select='tips,trip_seconds,trip_miles,pickup_community_area,dropoff_community_area,fare,tolls,extras,trip_total',
limit=10000,
).output
training_data_for_classification_in_csv = pandas_transform_csv_op(
table=training_data_in_csv,
transform_code='''df.insert(0, "was_tipped", df["tips"] > 0); del df["tips"]''',
).output
catboost_train_regression_task = catboost_train_regression_op(
training_data=training_data_in_csv,
loss_function='RMSE',
label_column=0,
num_iterations=200,
)
regression_model = catboost_train_regression_task.outputs['model']
catboost_train_classifier_task = catboost_train_classifier_op(
training_data=training_data_for_classification_in_csv,
label_column=0,
num_iterations=200,
)
classification_model = catboost_train_classifier_task.outputs['model']
evaluation_data_for_regression_in_csv = training_data_in_csv
evaluation_data_for_classification_in_csv = training_data_for_classification_in_csv
catboost_predict_values_op(
data=evaluation_data_for_regression_in_csv,
model=regression_model,
label_column=0,
)
catboost_predict_classes_op(
data=evaluation_data_for_classification_in_csv,
model=classification_model,
label_column=0,
)
catboost_predict_class_probabilities_op(
data=evaluation_data_for_classification_in_csv,
model=classification_model,
label_column=0,
)
catboost_to_apple_op(regression_model)
catboost_to_apple_op(classification_model)
catboost_to_onnx_op(regression_model)
catboost_to_onnx_op(classification_model)
if __name__ == '__main__':
kfp_endpoint=None
kfp.Client(host=kfp_endpoint).create_run_from_pipeline_func(catboost_pipeline, arguments={})
| 634 |
0 | kubeflow_public_repos/pipelines/components/contrib/CatBoost | kubeflow_public_repos/pipelines/components/contrib/CatBoost/convert_CatBoostModel_to_AppleCoreMLModel/component.py | from kfp.components import InputPath, OutputPath, create_component_from_func
def convert_CatBoostModel_to_AppleCoreMLModel(
model_path: InputPath('CatBoostModel'),
converted_model_path: OutputPath('AppleCoreMLModel'),
):
'''Convert CatBoost model to Apple CoreML format.
Args:
model_path: Path of a trained model in binary CatBoost model format.
converted_model_path: Output path for the converted model.
Outputs:
converted_model: Model in Apple CoreML format.
Annotations:
author: Alexey Volkov <[email protected]>
'''
from catboost import CatBoost
model = CatBoost()
model.load_model(model_path)
model.save_model(
converted_model_path,
format="coreml",
# export_parameters={'prediction_type': 'probability'},
# export_parameters={'prediction_type': 'raw'},
)
if __name__ == '__main__':
create_component_from_func(
convert_CatBoostModel_to_AppleCoreMLModel,
output_component_file='component.yaml',
base_image='python:3.7',
packages_to_install=['catboost==0.22'],
annotations={
"author": "Alexey Volkov <[email protected]>",
"canonical_location": "https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/CatBoost/convert_CatBoostModel_to_AppleCoreMLModel/component.yaml",
},
)
| 635 |
0 | kubeflow_public_repos/pipelines/components/contrib/CatBoost | kubeflow_public_repos/pipelines/components/contrib/CatBoost/convert_CatBoostModel_to_AppleCoreMLModel/component.yaml | name: Convert CatBoostModel to AppleCoreMLModel
description: |-
Convert CatBoost model to Apple CoreML format.
Args:
model_path: Path of a trained model in binary CatBoost model format.
converted_model_path: Output path for the converted model.
Outputs:
converted_model: Model in Apple CoreML format.
Annotations:
author: Alexey Volkov <[email protected]>
inputs:
- {name: model, type: CatBoostModel}
outputs:
- {name: converted_model, type: AppleCoreMLModel}
metadata:
annotations:
author: Alexey Volkov <[email protected]>
canonical_location: 'https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/CatBoost/convert_CatBoostModel_to_AppleCoreMLModel/component.yaml'
implementation:
container:
image: python:3.7
command:
- sh
- -c
- (PIP_DISABLE_PIP_VERSION_CHECK=1 python3 -m pip install --quiet --no-warn-script-location
'catboost==0.22' || PIP_DISABLE_PIP_VERSION_CHECK=1 python3 -m pip install --quiet
--no-warn-script-location 'catboost==0.22' --user) && "$0" "$@"
- python3
- -u
- -c
- |
def _make_parent_dirs_and_return_path(file_path: str):
import os
os.makedirs(os.path.dirname(file_path), exist_ok=True)
return file_path
def convert_CatBoostModel_to_AppleCoreMLModel(
model_path,
converted_model_path,
):
'''Convert CatBoost model to Apple CoreML format.
Args:
model_path: Path of a trained model in binary CatBoost model format.
converted_model_path: Output path for the converted model.
Outputs:
converted_model: Model in Apple CoreML format.
Annotations:
author: Alexey Volkov <[email protected]>
'''
from catboost import CatBoost
model = CatBoost()
model.load_model(model_path)
model.save_model(
converted_model_path,
format="coreml",
# export_parameters={'prediction_type': 'probability'},
# export_parameters={'prediction_type': 'raw'},
)
import argparse
_parser = argparse.ArgumentParser(prog='Convert CatBoostModel to AppleCoreMLModel', description='Convert CatBoost model to Apple CoreML format.\n\n Args:\n model_path: Path of a trained model in binary CatBoost model format.\n converted_model_path: Output path for the converted model.\n\n Outputs:\n converted_model: Model in Apple CoreML format.\n\n Annotations:\n author: Alexey Volkov <[email protected]>')
_parser.add_argument("--model", dest="model_path", type=str, required=True, default=argparse.SUPPRESS)
_parser.add_argument("--converted-model", dest="converted_model_path", type=_make_parent_dirs_and_return_path, required=True, default=argparse.SUPPRESS)
_parsed_args = vars(_parser.parse_args())
_outputs = convert_CatBoostModel_to_AppleCoreMLModel(**_parsed_args)
args:
- --model
- {inputPath: model}
- --converted-model
- {outputPath: converted_model}
| 636 |
0 | kubeflow_public_repos/pipelines/components/contrib/CatBoost/Predict_class_probabilities | kubeflow_public_repos/pipelines/components/contrib/CatBoost/Predict_class_probabilities/from_CSV/component.py | from kfp.components import InputPath, OutputPath, create_component_from_func
def catboost_predict_class_probabilities(
data_path: InputPath('CSV'),
model_path: InputPath('CatBoostModel'),
predictions_path: OutputPath(),
label_column: int = None,
):
'''Predict class probabilities with a CatBoost model.
Args:
data_path: Path for the data in CSV format.
model_path: Path for the trained model in binary CatBoostModel format.
label_column: Column containing the label data.
predictions_path: Output path for the predictions.
Outputs:
predictions: Predictions in text format.
Annotations:
author: Alexey Volkov <[email protected]>
'''
import tempfile
from catboost import CatBoost, Pool
import numpy
if label_column:
column_descriptions = {label_column: 'Label'}
column_description_path = tempfile.NamedTemporaryFile(delete=False).name
with open(column_description_path, 'w') as column_description_file:
for idx, kind in column_descriptions.items():
column_description_file.write('{}\t{}\n'.format(idx, kind))
else:
column_description_path = None
eval_data = Pool(
data_path,
column_description=column_description_path,
has_header=True,
delimiter=',',
)
model = CatBoost()
model.load_model(model_path)
predictions = model.predict(eval_data, prediction_type='Probability')
numpy.savetxt(predictions_path, predictions)
if __name__ == '__main__':
catboost_predict_class_probabilities_op = create_component_from_func(
catboost_predict_class_probabilities,
output_component_file='component.yaml',
base_image='python:3.7',
packages_to_install=['catboost==0.23'],
annotations={
"author": "Alexey Volkov <[email protected]>",
"canonical_location": "https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/CatBoost/Predict_class_probabilities/from_CSV/component.yaml",
},
)
| 637 |
0 | kubeflow_public_repos/pipelines/components/contrib/CatBoost/Predict_class_probabilities | kubeflow_public_repos/pipelines/components/contrib/CatBoost/Predict_class_probabilities/from_CSV/component.yaml | name: Catboost predict class probabilities
description: |-
Predict class probabilities with a CatBoost model.
Args:
data_path: Path for the data in CSV format.
model_path: Path for the trained model in binary CatBoostModel format.
label_column: Column containing the label data.
predictions_path: Output path for the predictions.
Outputs:
predictions: Predictions in text format.
Annotations:
author: Alexey Volkov <[email protected]>
inputs:
- {name: data, type: CSV}
- {name: model, type: CatBoostModel}
- {name: label_column, type: Integer, optional: true}
outputs:
- {name: predictions}
metadata:
annotations:
author: Alexey Volkov <[email protected]>
canonical_location: 'https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/CatBoost/Predict_class_probabilities/from_CSV/component.yaml'
implementation:
container:
image: python:3.7
command:
- sh
- -c
- (PIP_DISABLE_PIP_VERSION_CHECK=1 python3 -m pip install --quiet --no-warn-script-location
'catboost==0.23' || PIP_DISABLE_PIP_VERSION_CHECK=1 python3 -m pip install --quiet
--no-warn-script-location 'catboost==0.23' --user) && "$0" "$@"
- python3
- -u
- -c
- |
def _make_parent_dirs_and_return_path(file_path: str):
import os
os.makedirs(os.path.dirname(file_path), exist_ok=True)
return file_path
def catboost_predict_class_probabilities(
data_path,
model_path,
predictions_path,
label_column = None,
):
'''Predict class probabilities with a CatBoost model.
Args:
data_path: Path for the data in CSV format.
model_path: Path for the trained model in binary CatBoostModel format.
label_column: Column containing the label data.
predictions_path: Output path for the predictions.
Outputs:
predictions: Predictions in text format.
Annotations:
author: Alexey Volkov <[email protected]>
'''
import tempfile
from catboost import CatBoost, Pool
import numpy
if label_column:
column_descriptions = {label_column: 'Label'}
column_description_path = tempfile.NamedTemporaryFile(delete=False).name
with open(column_description_path, 'w') as column_description_file:
for idx, kind in column_descriptions.items():
column_description_file.write('{}\t{}\n'.format(idx, kind))
else:
column_description_path = None
eval_data = Pool(
data_path,
column_description=column_description_path,
has_header=True,
delimiter=',',
)
model = CatBoost()
model.load_model(model_path)
predictions = model.predict(eval_data, prediction_type='Probability')
numpy.savetxt(predictions_path, predictions)
import argparse
_parser = argparse.ArgumentParser(prog='Catboost predict class probabilities', description='Predict class probabilities with a CatBoost model.\n\n Args:\n data_path: Path for the data in CSV format.\n model_path: Path for the trained model in binary CatBoostModel format.\n label_column: Column containing the label data.\n predictions_path: Output path for the predictions.\n\n Outputs:\n predictions: Predictions in text format.\n\n Annotations:\n author: Alexey Volkov <[email protected]>')
_parser.add_argument("--data", dest="data_path", type=str, required=True, default=argparse.SUPPRESS)
_parser.add_argument("--model", dest="model_path", type=str, required=True, default=argparse.SUPPRESS)
_parser.add_argument("--label-column", dest="label_column", type=int, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--predictions", dest="predictions_path", type=_make_parent_dirs_and_return_path, required=True, default=argparse.SUPPRESS)
_parsed_args = vars(_parser.parse_args())
_outputs = catboost_predict_class_probabilities(**_parsed_args)
args:
- --data
- {inputPath: data}
- --model
- {inputPath: model}
- if:
cond: {isPresent: label_column}
then:
- --label-column
- {inputValue: label_column}
- --predictions
- {outputPath: predictions}
| 638 |
0 | kubeflow_public_repos/pipelines/components/contrib/CatBoost | kubeflow_public_repos/pipelines/components/contrib/CatBoost/convert_CatBoostModel_to_ONNX/component.py | from kfp.components import InputPath, OutputPath, create_component_from_func
def convert_CatBoostModel_to_ONNX(
model_path: InputPath('CatBoostModel'),
converted_model_path: OutputPath('ONNX'),
):
'''Convert CatBoost model to ONNX format.
Args:
model_path: Path of a trained model in binary CatBoost model format.
converted_model_path: Output path for the converted model.
Outputs:
converted_model: Model in ONNX format.
Annotations:
author: Alexey Volkov <[email protected]>
'''
from catboost import CatBoost
model = CatBoost()
model.load_model(model_path)
model.save_model(converted_model_path, format="onnx")
if __name__ == '__main__':
create_component_from_func(
convert_CatBoostModel_to_ONNX,
output_component_file='component.yaml',
base_image='python:3.7',
packages_to_install=['catboost==0.22'],
annotations={
"author": "Alexey Volkov <[email protected]>",
"canonical_location": "https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/CatBoost/convert_CatBoostModel_to_ONNX/component.yaml",
},
)
| 639 |
0 | kubeflow_public_repos/pipelines/components/contrib/CatBoost | kubeflow_public_repos/pipelines/components/contrib/CatBoost/convert_CatBoostModel_to_ONNX/component.yaml | name: Convert CatBoostModel to ONNX
description: |-
Convert CatBoost model to ONNX format.
Args:
model_path: Path of a trained model in binary CatBoost model format.
converted_model_path: Output path for the converted model.
Outputs:
converted_model: Model in ONNX format.
Annotations:
author: Alexey Volkov <[email protected]>
inputs:
- {name: model, type: CatBoostModel}
outputs:
- {name: converted_model, type: ONNX}
metadata:
annotations:
author: Alexey Volkov <[email protected]>
canonical_location: 'https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/CatBoost/convert_CatBoostModel_to_ONNX/component.yaml'
implementation:
container:
image: python:3.7
command:
- sh
- -c
- (PIP_DISABLE_PIP_VERSION_CHECK=1 python3 -m pip install --quiet --no-warn-script-location
'catboost==0.22' || PIP_DISABLE_PIP_VERSION_CHECK=1 python3 -m pip install --quiet
--no-warn-script-location 'catboost==0.22' --user) && "$0" "$@"
- python3
- -u
- -c
- |
def _make_parent_dirs_and_return_path(file_path: str):
import os
os.makedirs(os.path.dirname(file_path), exist_ok=True)
return file_path
def convert_CatBoostModel_to_ONNX(
model_path,
converted_model_path,
):
'''Convert CatBoost model to ONNX format.
Args:
model_path: Path of a trained model in binary CatBoost model format.
converted_model_path: Output path for the converted model.
Outputs:
converted_model: Model in ONNX format.
Annotations:
author: Alexey Volkov <[email protected]>
'''
from catboost import CatBoost
model = CatBoost()
model.load_model(model_path)
model.save_model(converted_model_path, format="onnx")
import argparse
_parser = argparse.ArgumentParser(prog='Convert CatBoostModel to ONNX', description='Convert CatBoost model to ONNX format.\n\n Args:\n model_path: Path of a trained model in binary CatBoost model format.\n converted_model_path: Output path for the converted model.\n\n Outputs:\n converted_model: Model in ONNX format.\n\n Annotations:\n author: Alexey Volkov <[email protected]>')
_parser.add_argument("--model", dest="model_path", type=str, required=True, default=argparse.SUPPRESS)
_parser.add_argument("--converted-model", dest="converted_model_path", type=_make_parent_dirs_and_return_path, required=True, default=argparse.SUPPRESS)
_parsed_args = vars(_parser.parse_args())
_outputs = convert_CatBoostModel_to_ONNX(**_parsed_args)
args:
- --model
- {inputPath: model}
- --converted-model
- {outputPath: converted_model}
| 640 |
0 | kubeflow_public_repos/pipelines/components/contrib/CatBoost/Train_classifier | kubeflow_public_repos/pipelines/components/contrib/CatBoost/Train_classifier/from_CSV/component.py | from kfp.components import InputPath, OutputPath, create_component_from_func
def catboost_train_classifier(
training_data_path: InputPath('CSV'),
model_path: OutputPath('CatBoostModel'),
starting_model_path: InputPath('CatBoostModel') = None,
label_column: int = 0,
loss_function: str = 'Logloss',
num_iterations: int = 500,
learning_rate: float = None,
depth: int = 6,
random_seed: int = 0,
cat_features: list = None,
text_features: list = None,
additional_training_options: dict = {},
):
'''Train a CatBoost classifier model.
Args:
training_data_path: Path for the training data in CSV format.
model_path: Output path for the trained model in binary CatBoostModel format.
starting_model_path: Path for the existing trained model to start from.
label_column: Column containing the label data.
loss_function: The metric to use in training and also selector of the machine learning
problem to solve. Default = 'Logloss'
num_iterations: Number of trees to add to the ensemble.
learning_rate: Step size shrinkage used in update to prevents overfitting.
Default value is selected automatically for binary classification with other parameters set to default.
In all other cases default is 0.03.
depth: Depth of a tree. All trees are the same depth. Default = 6
random_seed: Random number seed. Default = 0
cat_features: A list of Categorical features (indices or names).
text_features: A list of Text features (indices or names).
additional_training_options: A dictionary with additional options to pass to CatBoostClassifier
Outputs:
model: Trained model in binary CatBoostModel format.
Annotations:
author: Alexey Volkov <[email protected]>
'''
import tempfile
from pathlib import Path
from catboost import CatBoostClassifier, Pool
column_descriptions = {label_column: 'Label'}
column_description_path = tempfile.NamedTemporaryFile(delete=False).name
with open(column_description_path, 'w') as column_description_file:
for idx, kind in column_descriptions.items():
column_description_file.write('{}\t{}\n'.format(idx, kind))
train_data = Pool(
training_data_path,
column_description=column_description_path,
has_header=True,
delimiter=',',
)
model = CatBoostClassifier(
iterations=num_iterations,
depth=depth,
learning_rate=learning_rate,
loss_function=loss_function,
random_seed=random_seed,
verbose=True,
**additional_training_options,
)
model.fit(
train_data,
cat_features=cat_features,
text_features=text_features,
init_model=starting_model_path,
#verbose=False,
#plot=True,
)
Path(model_path).parent.mkdir(parents=True, exist_ok=True)
model.save_model(model_path)
if __name__ == '__main__':
catboost_train_classifier_op = create_component_from_func(
catboost_train_classifier,
output_component_file='component.yaml',
base_image='python:3.7',
packages_to_install=['catboost==0.23'],
annotations={
"author": "Alexey Volkov <[email protected]>",
"canonical_location": "https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/CatBoost/Train_classifier/from_CSV/component.yaml",
},
)
| 641 |
0 | kubeflow_public_repos/pipelines/components/contrib/CatBoost/Train_classifier | kubeflow_public_repos/pipelines/components/contrib/CatBoost/Train_classifier/from_CSV/component.yaml | name: Catboost train classifier
description: |-
Train a CatBoost classifier model.
Args:
training_data_path: Path for the training data in CSV format.
model_path: Output path for the trained model in binary CatBoostModel format.
starting_model_path: Path for the existing trained model to start from.
label_column: Column containing the label data.
loss_function: The metric to use in training and also selector of the machine learning
problem to solve. Default = 'Logloss'
num_iterations: Number of trees to add to the ensemble.
learning_rate: Step size shrinkage used in update to prevents overfitting.
Default value is selected automatically for binary classification with other parameters set to default.
In all other cases default is 0.03.
depth: Depth of a tree. All trees are the same depth. Default = 6
random_seed: Random number seed. Default = 0
cat_features: A list of Categorical features (indices or names).
text_features: A list of Text features (indices or names).
additional_training_options: A dictionary with additional options to pass to CatBoostClassifier
Outputs:
model: Trained model in binary CatBoostModel format.
Annotations:
author: Alexey Volkov <[email protected]>
inputs:
- {name: training_data, type: CSV}
- {name: starting_model, type: CatBoostModel, optional: true}
- {name: label_column, type: Integer, default: '0', optional: true}
- {name: loss_function, type: String, default: Logloss, optional: true}
- {name: num_iterations, type: Integer, default: '500', optional: true}
- {name: learning_rate, type: Float, optional: true}
- {name: depth, type: Integer, default: '6', optional: true}
- {name: random_seed, type: Integer, default: '0', optional: true}
- {name: cat_features, type: JsonArray, optional: true}
- {name: text_features, type: JsonArray, optional: true}
- {name: additional_training_options, type: JsonObject, default: '{}', optional: true}
outputs:
- {name: model, type: CatBoostModel}
metadata:
annotations:
author: Alexey Volkov <[email protected]>
canonical_location: 'https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/CatBoost/Train_classifier/from_CSV/component.yaml'
implementation:
container:
image: python:3.7
command:
- sh
- -c
- (PIP_DISABLE_PIP_VERSION_CHECK=1 python3 -m pip install --quiet --no-warn-script-location
'catboost==0.23' || PIP_DISABLE_PIP_VERSION_CHECK=1 python3 -m pip install --quiet
--no-warn-script-location 'catboost==0.23' --user) && "$0" "$@"
- python3
- -u
- -c
- |
def _make_parent_dirs_and_return_path(file_path: str):
import os
os.makedirs(os.path.dirname(file_path), exist_ok=True)
return file_path
def catboost_train_classifier(
training_data_path,
model_path,
starting_model_path = None,
label_column = 0,
loss_function = 'Logloss',
num_iterations = 500,
learning_rate = None,
depth = 6,
random_seed = 0,
cat_features = None,
text_features = None,
additional_training_options = {},
):
'''Train a CatBoost classifier model.
Args:
training_data_path: Path for the training data in CSV format.
model_path: Output path for the trained model in binary CatBoostModel format.
starting_model_path: Path for the existing trained model to start from.
label_column: Column containing the label data.
loss_function: The metric to use in training and also selector of the machine learning
problem to solve. Default = 'Logloss'
num_iterations: Number of trees to add to the ensemble.
learning_rate: Step size shrinkage used in update to prevents overfitting.
Default value is selected automatically for binary classification with other parameters set to default.
In all other cases default is 0.03.
depth: Depth of a tree. All trees are the same depth. Default = 6
random_seed: Random number seed. Default = 0
cat_features: A list of Categorical features (indices or names).
text_features: A list of Text features (indices or names).
additional_training_options: A dictionary with additional options to pass to CatBoostClassifier
Outputs:
model: Trained model in binary CatBoostModel format.
Annotations:
author: Alexey Volkov <[email protected]>
'''
import tempfile
from pathlib import Path
from catboost import CatBoostClassifier, Pool
column_descriptions = {label_column: 'Label'}
column_description_path = tempfile.NamedTemporaryFile(delete=False).name
with open(column_description_path, 'w') as column_description_file:
for idx, kind in column_descriptions.items():
column_description_file.write('{}\t{}\n'.format(idx, kind))
train_data = Pool(
training_data_path,
column_description=column_description_path,
has_header=True,
delimiter=',',
)
model = CatBoostClassifier(
iterations=num_iterations,
depth=depth,
learning_rate=learning_rate,
loss_function=loss_function,
random_seed=random_seed,
verbose=True,
**additional_training_options,
)
model.fit(
train_data,
cat_features=cat_features,
text_features=text_features,
init_model=starting_model_path,
#verbose=False,
#plot=True,
)
Path(model_path).parent.mkdir(parents=True, exist_ok=True)
model.save_model(model_path)
import json
import argparse
_parser = argparse.ArgumentParser(prog='Catboost train classifier', description="Train a CatBoost classifier model.\n\n Args:\n training_data_path: Path for the training data in CSV format.\n model_path: Output path for the trained model in binary CatBoostModel format.\n starting_model_path: Path for the existing trained model to start from.\n label_column: Column containing the label data.\n\n loss_function: The metric to use in training and also selector of the machine learning\n problem to solve. Default = 'Logloss'\n num_iterations: Number of trees to add to the ensemble.\n learning_rate: Step size shrinkage used in update to prevents overfitting.\n Default value is selected automatically for binary classification with other parameters set to default.\n In all other cases default is 0.03.\n depth: Depth of a tree. All trees are the same depth. Default = 6\n random_seed: Random number seed. Default = 0\n\n cat_features: A list of Categorical features (indices or names).\n text_features: A list of Text features (indices or names).\n additional_training_options: A dictionary with additional options to pass to CatBoostClassifier\n\n Outputs:\n model: Trained model in binary CatBoostModel format.\n\n Annotations:\n author: Alexey Volkov <[email protected]>")
_parser.add_argument("--training-data", dest="training_data_path", type=str, required=True, default=argparse.SUPPRESS)
_parser.add_argument("--starting-model", dest="starting_model_path", type=str, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--label-column", dest="label_column", type=int, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--loss-function", dest="loss_function", type=str, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--num-iterations", dest="num_iterations", type=int, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--learning-rate", dest="learning_rate", type=float, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--depth", dest="depth", type=int, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--random-seed", dest="random_seed", type=int, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--cat-features", dest="cat_features", type=json.loads, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--text-features", dest="text_features", type=json.loads, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--additional-training-options", dest="additional_training_options", type=json.loads, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--model", dest="model_path", type=_make_parent_dirs_and_return_path, required=True, default=argparse.SUPPRESS)
_parsed_args = vars(_parser.parse_args())
_outputs = catboost_train_classifier(**_parsed_args)
args:
- --training-data
- {inputPath: training_data}
- if:
cond: {isPresent: starting_model}
then:
- --starting-model
- {inputPath: starting_model}
- if:
cond: {isPresent: label_column}
then:
- --label-column
- {inputValue: label_column}
- if:
cond: {isPresent: loss_function}
then:
- --loss-function
- {inputValue: loss_function}
- if:
cond: {isPresent: num_iterations}
then:
- --num-iterations
- {inputValue: num_iterations}
- if:
cond: {isPresent: learning_rate}
then:
- --learning-rate
- {inputValue: learning_rate}
- if:
cond: {isPresent: depth}
then:
- --depth
- {inputValue: depth}
- if:
cond: {isPresent: random_seed}
then:
- --random-seed
- {inputValue: random_seed}
- if:
cond: {isPresent: cat_features}
then:
- --cat-features
- {inputValue: cat_features}
- if:
cond: {isPresent: text_features}
then:
- --text-features
- {inputValue: text_features}
- if:
cond: {isPresent: additional_training_options}
then:
- --additional-training-options
- {inputValue: additional_training_options}
- --model
- {outputPath: model}
| 642 |
0 | kubeflow_public_repos/pipelines/components/contrib/CatBoost/Predict_classes | kubeflow_public_repos/pipelines/components/contrib/CatBoost/Predict_classes/from_CSV/component.py | from kfp.components import InputPath, OutputPath, create_component_from_func
def catboost_predict_classes(
data_path: InputPath('CSV'),
model_path: InputPath('CatBoostModel'),
predictions_path: OutputPath(),
label_column: int = None,
):
'''Predict classes using the CatBoost classifier model.
Args:
data_path: Path for the data in CSV format.
model_path: Path for the trained model in binary CatBoostModel format.
label_column: Column containing the label data.
predictions_path: Output path for the predictions.
Outputs:
predictions: Class predictions in text format.
Annotations:
author: Alexey Volkov <[email protected]>
'''
import tempfile
from catboost import CatBoostClassifier, Pool
import numpy
if label_column:
column_descriptions = {label_column: 'Label'}
column_description_path = tempfile.NamedTemporaryFile(delete=False).name
with open(column_description_path, 'w') as column_description_file:
for idx, kind in column_descriptions.items():
column_description_file.write('{}\t{}\n'.format(idx, kind))
else:
column_description_path = None
eval_data = Pool(
data_path,
column_description=column_description_path,
has_header=True,
delimiter=',',
)
model = CatBoostClassifier()
model.load_model(model_path)
predictions = model.predict(eval_data)
numpy.savetxt(predictions_path, predictions, fmt='%s')
if __name__ == '__main__':
catboost_predict_classes_op = create_component_from_func(
catboost_predict_classes,
output_component_file='component.yaml',
base_image='python:3.7',
packages_to_install=['catboost==0.22'],
annotations={
"author": "Alexey Volkov <[email protected]>",
"canonical_location": "https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/CatBoost/Predict_classes/from_CSV/component.yaml",
},
)
| 643 |
0 | kubeflow_public_repos/pipelines/components/contrib/CatBoost/Predict_classes | kubeflow_public_repos/pipelines/components/contrib/CatBoost/Predict_classes/from_CSV/component.yaml | name: Catboost predict classes
description: |-
Predict classes using the CatBoost classifier model.
Args:
data_path: Path for the data in CSV format.
model_path: Path for the trained model in binary CatBoostModel format.
label_column: Column containing the label data.
predictions_path: Output path for the predictions.
Outputs:
predictions: Class predictions in text format.
Annotations:
author: Alexey Volkov <[email protected]>
inputs:
- {name: data, type: CSV}
- {name: model, type: CatBoostModel}
- {name: label_column, type: Integer, optional: true}
outputs:
- {name: predictions}
metadata:
annotations:
author: Alexey Volkov <[email protected]>
canonical_location: 'https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/CatBoost/Predict_classes/from_CSV/component.yaml'
implementation:
container:
image: python:3.7
command:
- sh
- -c
- (PIP_DISABLE_PIP_VERSION_CHECK=1 python3 -m pip install --quiet --no-warn-script-location
'catboost==0.22' || PIP_DISABLE_PIP_VERSION_CHECK=1 python3 -m pip install --quiet
--no-warn-script-location 'catboost==0.22' --user) && "$0" "$@"
- python3
- -u
- -c
- |
def _make_parent_dirs_and_return_path(file_path: str):
import os
os.makedirs(os.path.dirname(file_path), exist_ok=True)
return file_path
def catboost_predict_classes(
data_path,
model_path,
predictions_path,
label_column = None,
):
'''Predict classes using the CatBoost classifier model.
Args:
data_path: Path for the data in CSV format.
model_path: Path for the trained model in binary CatBoostModel format.
label_column: Column containing the label data.
predictions_path: Output path for the predictions.
Outputs:
predictions: Class predictions in text format.
Annotations:
author: Alexey Volkov <[email protected]>
'''
import tempfile
from catboost import CatBoostClassifier, Pool
import numpy
if label_column:
column_descriptions = {label_column: 'Label'}
column_description_path = tempfile.NamedTemporaryFile(delete=False).name
with open(column_description_path, 'w') as column_description_file:
for idx, kind in column_descriptions.items():
column_description_file.write('{}\t{}\n'.format(idx, kind))
else:
column_description_path = None
eval_data = Pool(
data_path,
column_description=column_description_path,
has_header=True,
delimiter=',',
)
model = CatBoostClassifier()
model.load_model(model_path)
predictions = model.predict(eval_data)
numpy.savetxt(predictions_path, predictions, fmt='%s')
import argparse
_parser = argparse.ArgumentParser(prog='Catboost predict classes', description='Predict classes using the CatBoost classifier model.\n\n Args:\n data_path: Path for the data in CSV format.\n model_path: Path for the trained model in binary CatBoostModel format.\n label_column: Column containing the label data.\n predictions_path: Output path for the predictions.\n\n Outputs:\n predictions: Class predictions in text format.\n\n Annotations:\n author: Alexey Volkov <[email protected]>')
_parser.add_argument("--data", dest="data_path", type=str, required=True, default=argparse.SUPPRESS)
_parser.add_argument("--model", dest="model_path", type=str, required=True, default=argparse.SUPPRESS)
_parser.add_argument("--label-column", dest="label_column", type=int, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--predictions", dest="predictions_path", type=_make_parent_dirs_and_return_path, required=True, default=argparse.SUPPRESS)
_parsed_args = vars(_parser.parse_args())
_outputs = catboost_predict_classes(**_parsed_args)
args:
- --data
- {inputPath: data}
- --model
- {inputPath: model}
- if:
cond: {isPresent: label_column}
then:
- --label-column
- {inputValue: label_column}
- --predictions
- {outputPath: predictions}
| 644 |
0 | kubeflow_public_repos/pipelines/components/contrib/notebooks | kubeflow_public_repos/pipelines/components/contrib/notebooks/samples/sample_pipeline.py | kfp_endpoint = None
import kfp
from kfp import components
download_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/240543e483076ae718f82c6f280441daa2f041fd/components/web/Download/component.yaml')
run_notebook_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/4ebce5f643b6af5639053ea7eaed52b02bf7e928/components/notebooks/Run_notebook_using_papermill/component.yaml')
def notebook_pipeline():
notebook = download_op('https://raw.githubusercontent.com/kubeflow/pipelines/93fc34474bf989998cf19445149aca2847eee763/components/notebooks/samples/test_notebook.ipynb').output
run_notebook_op(
notebook=notebook,
parameters={'param1': 'value 1'},
input_data="Optional. Pass output of any component here. Can be a directory.",
packages_to_install=["matplotlib"],
)
if __name__ == '__main__':
pipelin_run = kfp.Client(host=kfp_endpoint).create_run_from_pipeline_func(notebook_pipeline, arguments={})
| 645 |
0 | kubeflow_public_repos/pipelines/components/contrib/notebooks | kubeflow_public_repos/pipelines/components/contrib/notebooks/samples/test_notebook.ipynb | # Parameters
INPUT_DATA_PATH = INPUT_DATA_PATH or ""
OUTPUT_DATA_PATH = OUTPUT_DATA_PATH or ""# Show the parameter values
print('INPUT_DATA_PATH = ' + INPUT_DATA_PATH)
print('OUTPUT_DATA_PATH = ' + OUTPUT_DATA_PATH)
print('locals() = ' + str(locals()))# Checking the input data
import os
if INPUT_DATA_PATH:
if os.path.isdir(INPUT_DATA_PATH):
print('os.listdir(INPUT_DATA_PATH):')
print(os.listdir(INPUT_DATA_PATH))
if os.path.isfile(INPUT_DATA_PATH):
print('os.stat(INPUT_DATA_PATH):')
print(os.stat(INPUT_DATA_PATH))
else:
print('INPUT_DATA_PATH is empty')# Writing some output data
from pathlib import Path
(Path(OUTPUT_DATA_PATH) / 'output.txt').write_text("Hello world!") | 646 |
0 | kubeflow_public_repos/pipelines/components/contrib/notebooks | kubeflow_public_repos/pipelines/components/contrib/notebooks/Run_notebook_using_papermill/component.yaml | name: Run notebook using papermill
description: |
Run Jupyter notebook using papermill.
The notebook will receive the parameter values passed to it as well as the INPUT_DATA_PATH and OUTPUT_DATA_PATH variables that will be set to the input data path (if provided) and directory for the optional output data.
inputs:
- {name: Notebook, type: JupyterNotebook, description: 'Notebook to execute.'}
- {name: Parameters, type: JsonObject, default: '{}', description: 'Map with notebook paramater values.'}
- {name: Packages to install, type: JsonArray, default: '', description: 'Python packages to install'}
- {name: Input data, optional: true, description: 'Optional data that can be passed to notebook. In notebook, the INPUT_DATA_PATH variable will point to the data (if passed).'}
outputs:
- {name: Notebook, type: JupyterNotebook, description: 'Executed notebook.'}
- {name: Output data, description: 'Directory with any output data. In notebook, the OUTPUT_DATA_PATH variable will point to this directory, so that the notebook can write output data there.'}
metadata:
annotations:
author: Alexey Volkov <[email protected]>
canonical_location: 'https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/notebooks/Run_notebook_using_papermill/component.yaml'
implementation:
container:
image: python:3.7
command:
- sh
- -exc
- |
input_notebook_path="$0"
output_notebook_path="$1"
arguments="$2"
packages_to_install="$3"
input_data_path="$4"
output_data_path="$5"
mkdir -p "$(dirname "$output_notebook_path")"
mkdir -p "$output_data_path"
# Converting packages_to_install from JSON to command-line arguments
packages_to_install=$(echo "$packages_to_install" | sed -E -e 's/^\[//' -e 's/]$//' -e 's/",/" /g' -e "s/\"/'/g")
# Installing packages
sh -c "python3 -m pip install --upgrade --quiet jupyter papermill==2.2.0 ${packages_to_install}"
# Running the notebook using papermill
papermill --parameters_yaml "$arguments" --parameters INPUT_DATA_PATH "$input_data_path" --parameters OUTPUT_DATA_PATH "$output_data_path" "$input_notebook_path" "$output_notebook_path"
- {inputPath: Notebook}
- {outputPath: Notebook}
- {inputValue: Parameters}
- if:
cond: {isPresent: Packages to install}
then: [{inputValue: Packages to install}]
else: "{}"
- if:
cond: {isPresent: Input data}
then: [{inputPath: Input data}]
else: ""
- {outputPath: Output data}
| 647 |
0 | kubeflow_public_repos/pipelines/components/contrib/sample | kubeflow_public_repos/pipelines/components/contrib/sample/Python_script/component.yaml | name: Filter text
inputs:
- {name: Text}
- {name: Pattern, default: '.*'}
outputs:
- {name: Filtered text}
metadata:
annotations:
author: Alexey Volkov <[email protected]>
canonical_location: 'https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/sample/Python_script/component.yaml'
implementation:
container:
image: python:3.8
command:
- sh
- -ec
- |
# This is how additional packages can be installed dynamically
python3 -m pip install pip six
# Run the rest of the command after installing the packages.
"$0" "$@"
- python3
- -u # Auto-flush. We want the logs to appear in the console immediately.
- -c # Inline scripts are easy, but have size limitaions and the error traces do not show source lines.
- |
import os
import re
import sys
text_path = sys.argv[1]
pattern = sys.argv[2]
filtered_text_path = sys.argv[3]
regex = re.compile(pattern)
os.makedirs(os.path.dirname(filtered_text_path), exist_ok=True)
with open(text_path, 'r') as reader:
with open(filtered_text_path, 'w') as writer:
for line in reader:
if regex.search(line):
writer.write(line)
- {inputPath: Text}
- {inputValue: Pattern}
- {outputPath: Filtered text}
| 648 |
0 | kubeflow_public_repos/pipelines/components/contrib/sample | kubeflow_public_repos/pipelines/components/contrib/sample/C#_script/component.yaml | name: Filter text
inputs:
- {name: Text}
- {name: Pattern, default: '.*'}
outputs:
- {name: Filtered text}
metadata:
annotations:
author: Alexey Volkov <[email protected]>
canonical_location: 'https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/sample/C#_script/component.yaml'
implementation:
container:
image: mcr.microsoft.com/dotnet/sdk:5.0
command:
- sh
- -ec
- |
dotnet tool install dotnet-script --tool-path /usr/bin
"$0" "$@"
- dotnet
- script
- eval
- |
string textPath = Args[0];
string pattern = Args[1];
string filteredTextPath = Args[2];
var regex = new System.Text.RegularExpressions.Regex(pattern);
Directory.CreateDirectory(Path.GetDirectoryName(filteredTextPath));
using(var writer = new StreamWriter(filteredTextPath)) {
foreach (var line in File.ReadLines(textPath)) {
if (regex.IsMatch(line)) {
writer.WriteLine(line);
}
}
}
- --
- {inputPath: Text}
- {inputValue: Pattern}
- {outputPath: Filtered text}
| 649 |
0 | kubeflow_public_repos/pipelines/components/contrib/sample | kubeflow_public_repos/pipelines/components/contrib/sample/R_script/component.yaml | name: Filter text
inputs:
- {name: Text}
- {name: Pattern, default: '.*'}
outputs:
- {name: Filtered text}
metadata:
annotations:
author: Alexey Volkov <[email protected]>
canonical_location: 'https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/sample/R_script/component.yaml'
implementation:
container:
image: r-base:4.0.2
command:
- Rscript
- -e
- |
args <- commandArgs(trailingOnly = TRUE)
textPath <- args[1]
pattern <- args[2]
filteredTextPath <- args[3]
dir.create(dirname(filteredTextPath), showWarnings = FALSE, recursive = TRUE)
inputFile = file(textPath, "r")
outputFile = file(filteredTextPath, "w")
while ( TRUE ) {
lines = readLines(inputFile, n = 1)
if ( length(lines) == 0 ) {
break
}
if ( grepl(pattern = pattern, lines) ) {
writeLines(lines, outputFile)
}
}
close(outputFile)
close(inputFile)
- {inputPath: Text}
- {inputValue: Pattern}
- {outputPath: Filtered text}
| 650 |
0 | kubeflow_public_repos/pipelines/components/contrib/sample/keras | kubeflow_public_repos/pipelines/components/contrib/sample/keras/train_classifier/README.md | # Keras - Train classifier
### Trains classifier using Keras sequential model
## Inputs
|Name|Type|Default|Description|
|---|---|---|---|
|training_set_features_path|GcsPath: {data_type: TSV}||Local or GCS path to the training set features table.|
|training_set_labels_path|GcsPath: {data_type: TSV}||Local or GCS path to the training set labels (each label is a class index from 0 to num-classes - 1).|
|output_model_uri|GcsPath: {data_type: Keras model}||Local or GCS path specifying where to save the trained model. The model (topology + weights + optimizer state) is saved in HDF5 format and can be loaded back by calling keras.models.load_model|
|model_config|GcsPath: {data_type: Keras model config json}||JSON string containing the serialized model structure. Can be obtained by calling model.to_json() on a Keras model.|
|number_of_classes|Integer||Number of classifier classes.|
|number_of_epochs|Integer|100|Number of epochs to train the model. An epoch is an iteration over the entire `x` and `y` data provided.|
|batch_size|Integer|32|Number of samples per gradient update|
## Outputs
|Name|Type|Default|Description|
|---|---|---|---|
|output_model_uri|GcsPath: {data_type: Keras model}||GCS path where the trained model has been saved. The model (topology + weights + optimizer state) is saved in HDF5 format and can be loaded back by calling keras.models.load_model|
## Container image
gcr.io/ml-pipeline/components/sample/keras/train_classifier
## Usage:
```python
import os
from pathlib import Path
import requests
import kfp
component_url_prefix = 'https://raw.githubusercontent.com/kubeflow/pipelines/master/components/sample/keras/train_classifier/'
test_data_url_prefix = component_url_prefix + 'tests/testdata/'
#Prepare input/output paths and data
input_data_gcs_dir = 'gs://<my bucket>/<path>/'
output_data_gcs_dir = 'gs://<my bucket>/<path>/'
#Downloading the training set (to upload to GCS later)
training_set_features_local_path = os.path.join('.', 'training_set_features.tsv')
training_set_labels_local_path = os.path.join('.', 'training_set_labels.tsv')
training_set_features_url = test_data_url_prefix + '/training_set_features.tsv'
training_set_labels_url = test_data_url_prefix + '/training_set_labels.tsv'
Path(training_set_features_local_path).write_bytes(requests.get(training_set_features_url).content)
Path(training_set_labels_local_path).write_bytes(requests.get(training_set_labels_url).content)
#Uploading the data to GCS where it can be read by the trainer
training_set_features_gcs_path = os.path.join(input_data_gcs_dir, 'training_set_features.tsv')
training_set_labels_gcs_path = os.path.join(input_data_gcs_dir, 'training_set_labels.tsv')
gfile.Copy(training_set_features_local_path, training_set_features_gcs_path)
gfile.Copy(training_set_labels_local_path, training_set_labels_gcs_path)
output_model_uri_template = os.path.join(output_data_gcs_dir, kfp.dsl.EXECUTION_ID_PLACEHOLDER, 'output_model_uri', 'data')
xor_model_config = requests.get(test_data_url_prefix + 'model_config.json').content
#Load the component
train_op = kfp.components.load_component_from_url(component_url_prefix + 'component.yaml')
#Use the component as part of the pipeline
@kfp.dsl.pipeline(name='Test keras/train_classifier', description='Pipeline to test keras/train_classifier component')
def pipeline_to_test_keras_train_classifier():
train_task = train_op(
training_set_features_path=training_set_features_gcs_path,
training_set_labels_path=training_set_labels_gcs_path,
output_model_uri=output_model_uri_template,
model_config=xor_model_config,
number_of_classes=2,
number_of_epochs=10,
batch_size=32,
)
#Use train_task.outputs['output_model_uri'] to obtain the reference to the trained model URI that can be a passed to other pipeline tasks (e.g. for prediction or analysis)
```
| 651 |
0 | kubeflow_public_repos/pipelines/components/contrib/sample/keras | kubeflow_public_repos/pipelines/components/contrib/sample/keras/train_classifier/build_image.sh | #!/bin/bash -e
# Copyright 2018 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
image_name=gcr.io/ml-pipeline/sample/keras/train_classifier
image_tag=latest
full_image_name=${image_name}:${image_tag}
base_image_tag=1.12.0-py3
cd "$(dirname "$0")"
docker build --build-arg BASE_IMAGE_TAG=$base_image_tag -t "$full_image_name" .
docker push "$full_image_name"
#Output the strict image name (which contains the sha256 image digest)
#This name can be used by the subsequent steps to refer to the exact image that was built even if another image with the same name was pushed.
image_name_with_digest=$(docker inspect --format="{{index .RepoDigests 0}}" "$IMAGE_NAME")
strict_image_name_output_file=./versions/image_digests_for_tags/$image_tag
mkdir -p "$(dirname "$strict_image_name_output_file")"
echo $image_name_with_digest | tee "$strict_image_name_output_file"
| 652 |
0 | kubeflow_public_repos/pipelines/components/contrib/sample/keras | kubeflow_public_repos/pipelines/components/contrib/sample/keras/train_classifier/requirements.txt | keras
| 653 |
0 | kubeflow_public_repos/pipelines/components/contrib/sample/keras | kubeflow_public_repos/pipelines/components/contrib/sample/keras/train_classifier/run_tests.sh | #!/bin/bash -e
# Copyright 2018 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
cd $(dirname $0)
python3 -m unittest discover --verbose --start-dir tests --top-level-directory=..
| 654 |
0 | kubeflow_public_repos/pipelines/components/contrib/sample/keras | kubeflow_public_repos/pipelines/components/contrib/sample/keras/train_classifier/component.yaml | name: Keras - Train classifier
description: Trains classifier using Keras sequential model
inputs:
- {name: training_set_features_path, type: {GcsPath: {data_type: TSV}}, description: 'Local or GCS path to the training set features table.'}
- {name: training_set_labels_path, type: {GcsPath: {data_type: TSV}}, description: 'Local or GCS path to the training set labels (each label is a class index from 0 to num-classes - 1).'}
- {name: output_model_uri, type: {GcsPath: {data_type: Keras model}}, description: 'Local or GCS path specifying where to save the trained model. The model (topology + weights + optimizer state) is saved in HDF5 format and can be loaded back by calling keras.models.load_model'} #Remove GcsUri and move to outputs once artifact passing support is checked in.
- {name: model_config, type: {GcsPath: {data_type: Keras model config json}}, description: 'JSON string containing the serialized model structure. Can be obtained by calling model.to_json() on a Keras model.'}
- {name: number_of_classes, type: Integer, description: 'Number of classifier classes.'}
- {name: number_of_epochs, type: Integer, default: '100', description: 'Number of epochs to train the model. An epoch is an iteration over the entire `x` and `y` data provided.'}
- {name: batch_size, type: Integer, default: '32', description: 'Number of samples per gradient update.'}
outputs:
- {name: output_model_uri, type: {GcsPath: {data_type: Keras model}}, description: 'GCS path where the trained model has been saved. The model (topology + weights + optimizer state) is saved in HDF5 format and can be loaded back by calling keras.models.load_model'} #Remove GcsUri and make it a proper output once artifact passing support is checked in.
metadata:
annotations:
author: Alexey Volkov <[email protected]>
canonical_location: 'https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/sample/keras/train_classifier/component.yaml'
implementation:
container:
image: gcr.io/ml-pipeline/sample/keras/train_classifier
command: [python3, /pipelines/component/src/train.py]
args: [
--training-set-features-path, {inputValue: training_set_features_path},
--training-set-labels-path, {inputValue: training_set_labels_path},
--output-model-path, {inputValue: output_model_uri},
--model-config-json, {inputValue: model_config},
--num-classes, {inputValue: number_of_classes},
--num-epochs, {inputValue: number_of_epochs},
--batch-size, {inputValue: batch_size},
--output-model-path-file, {outputPath: output_model_uri},
]
| 655 |
0 | kubeflow_public_repos/pipelines/components/contrib/sample/keras | kubeflow_public_repos/pipelines/components/contrib/sample/keras/train_classifier/Dockerfile | ARG BASE_IMAGE_TAG=1.12.0-py3
FROM tensorflow/tensorflow:$BASE_IMAGE_TAG
COPY requirements.txt .
RUN python3 -m pip install -r \
requirements.txt --quiet --no-cache-dir \
&& rm -f requirements.txt
COPY ./src /pipelines/component/src
ENTRYPOINT python3 /pipelines/component/src/train.py
| 656 |
0 | kubeflow_public_repos/pipelines/components/contrib/sample/keras/train_classifier | kubeflow_public_repos/pipelines/components/contrib/sample/keras/train_classifier/tests/test_component.py | # Copyright 2018 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import subprocess
import tempfile
import unittest
from contextlib import contextmanager
from pathlib import Path
import kfp.components as comp
@contextmanager
def components_local_output_dir_context(output_dir: str):
old_dir = comp._components._outputs_dir
try:
comp._components._outputs_dir = output_dir
yield output_dir
finally:
comp._components._outputs_dir = old_dir
class KerasTrainClassifierTestCase(unittest.TestCase):
def test_handle_training_xor(self):
tests_root = os.path.abspath(os.path.dirname(__file__))
component_root = os.path.abspath(os.path.join(tests_root, '..'))
testdata_root = os.path.abspath(os.path.join(tests_root, 'testdata'))
train_op = comp.load_component(os.path.join(component_root, 'component.yaml'))
with tempfile.TemporaryDirectory() as temp_dir_name:
with components_local_output_dir_context(temp_dir_name):
train_task = train_op(
training_set_features_path=os.path.join(testdata_root, 'training_set_features.tsv'),
training_set_labels_path=os.path.join(testdata_root, 'training_set_labels.tsv'),
output_model_uri=os.path.join(temp_dir_name, 'outputs/output_model/data'),
model_config=Path(testdata_root).joinpath('model_config.json').read_text(),
number_of_classes=2,
number_of_epochs=10,
batch_size=32,
)
full_command = train_task.command + train_task.arguments
full_command[0] = 'python'
full_command[1] = os.path.join(component_root, 'src', 'train.py')
process = subprocess.run(full_command)
(output_model_uri_file, ) = (train_task.file_outputs['output-model-uri'], )
output_model_uri = Path(output_model_uri_file).read_text()
if __name__ == '__main__':
unittest.main()
| 657 |
0 | kubeflow_public_repos/pipelines/components/contrib/sample/keras/train_classifier/tests | kubeflow_public_repos/pipelines/components/contrib/sample/keras/train_classifier/tests/testdata/training_set_features.tsv | 0 0
0 1
1 0
1 1
| 658 |
0 | kubeflow_public_repos/pipelines/components/contrib/sample/keras/train_classifier/tests | kubeflow_public_repos/pipelines/components/contrib/sample/keras/train_classifier/tests/testdata/model_config.json | {
"class_name": "Sequential",
"config": {
"name": "sequential_1",
"layers": [
{
"class_name": "Dense",
"config": {
"name": "dense_1",
"trainable": true,
"units": 2,
"activation": "linear",
"use_bias": true,
"kernel_initializer": {
"class_name": "VarianceScaling",
"config": {
"scale": 1.0,
"mode": "fan_avg",
"distribution": "uniform",
"seed": null
}
},
"bias_initializer": {
"class_name": "Zeros",
"config": {}
},
"kernel_regularizer": null,
"bias_regularizer": null,
"activity_regularizer": null,
"kernel_constraint": null,
"bias_constraint": null
}
},
{
"class_name": "Dense",
"config": {
"name": "dense_2",
"trainable": true,
"units": 2,
"activation": "linear",
"use_bias": true,
"kernel_initializer": {
"class_name": "VarianceScaling",
"config": {
"scale": 1.0,
"mode": "fan_avg",
"distribution": "uniform",
"seed": null
}
},
"bias_initializer": {
"class_name": "Zeros",
"config": {}
},
"kernel_regularizer": null,
"bias_regularizer": null,
"activity_regularizer": null,
"kernel_constraint": null,
"bias_constraint": null
}
}
]
},
"keras_version": "2.2.4",
"backend": "tensorflow"
} | 659 |
0 | kubeflow_public_repos/pipelines/components/contrib/sample/keras/train_classifier/tests | kubeflow_public_repos/pipelines/components/contrib/sample/keras/train_classifier/tests/testdata/training_set_labels.tsv | 0
1
1
0
| 660 |
0 | kubeflow_public_repos/pipelines/components/contrib/sample/keras/train_classifier | kubeflow_public_repos/pipelines/components/contrib/sample/keras/train_classifier/src/train.py | # Copyright 2018 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import os
from pathlib import Path
import keras
import numpy as np
parser = argparse.ArgumentParser(description='Train classifier model using Keras')
parser.add_argument('--training-set-features-path', type=str, help='Local or GCS path to the training set features table.')
parser.add_argument('--training-set-labels-path', type=str, help='Local or GCS path to the training set labels (each label is a class index from 0 to num-classes - 1).')
parser.add_argument('--output-model-path', type=str, help='Local or GCS path specifying where to save the trained model. The model (topology + weights + optimizer state) is saved in HDF5 format and can be loaded back by calling keras.models.load_model')
parser.add_argument('--model-config-json', type=str, help='JSON string containing the serialized model structure. Can be obtained by calling model.to_json() on a Keras model.')
parser.add_argument('--num-classes', type=int, help='Number of classifier classes.')
parser.add_argument('--num-epochs', type=int, default=100, help='Number of epochs to train the model. An epoch is an iteration over the entire `x` and `y` data provided.')
parser.add_argument('--batch-size', type=int, default=32, help='Number of samples per gradient update.')
parser.add_argument('--output-model-path-file', type=str, help='Path to a local file containing the output model URI. Needed for data passing until the artifact support is checked in.') #TODO: Remove after the team agrees to let me check in artifact support.
args = parser.parse_args()
# The data, split between train and test sets:
#(x_train, y_train), (x_test, y_test) = cifar10.load_data()
x_train = np.loadtxt(args.training_set_features_path)
y_train = np.loadtxt(args.training_set_labels_path)
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, args.num_classes)
model = keras.models.model_from_json(args.model_config_json)
model.add(keras.layers.Activation('softmax'))
# initiate RMSprop optimizer
opt = keras.optimizers.rmsprop(lr=0.0001, decay=1e-6)
# Let's train the model using RMSprop
model.compile(loss='categorical_crossentropy',
optimizer=opt,
metrics=['accuracy'])
x_train = x_train.astype('float32')
x_train /= 255
model.fit(
x_train,
y_train,
batch_size=args.batch_size,
epochs=args.num_epochs,
shuffle=True
)
# Save model and weights
if not args.output_model_path.startswith('gs://'):
save_dir = os.path.dirname(args.output_model_path)
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
model.save(args.output_model_path)
print('Saved trained model at %s ' % args.output_model_path)
Path(args.output_model_path_file).parent.mkdir(parents=True, exist_ok=True)
Path(args.output_model_path_file).write_text(args.output_model_path)
| 661 |
0 | kubeflow_public_repos/pipelines/components/contrib/sample | kubeflow_public_repos/pipelines/components/contrib/sample/Shell_script/component.yaml | name: Filter text using shell and grep
inputs:
- {name: Text}
- {name: Pattern, default: '.*'}
outputs:
- {name: Filtered text}
metadata:
annotations:
author: Alexey Volkov <[email protected]>
canonical_location: 'https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/sample/Shell_script/component.yaml'
implementation:
container:
image: alpine
command:
- sh
- -ec
- |
text_path=$0
pattern=$1
filtered_text_path=$2
mkdir -p "$(dirname "$filtered_text_path")"
grep "$pattern" < "$text_path" > "$filtered_text_path"
- {inputPath: Text}
- {inputValue: Pattern}
- {outputPath: Filtered text}
| 662 |
0 | kubeflow_public_repos/pipelines/components/contrib/google-cloud/automl | kubeflow_public_repos/pipelines/components/contrib/google-cloud/automl/create_model_for_tables/component.py | # Copyright 2019 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import NamedTuple
def automl_create_model_for_tables(
gcp_project_id: str,
gcp_region: str,
display_name: str,
dataset_id: str,
target_column_path: str = None,
input_feature_column_paths: list = None,
optimization_objective: str = 'MAXIMIZE_AU_PRC',
train_budget_milli_node_hours: int = 1000,
) -> NamedTuple('Outputs', [('model_path', str), ('model_id', str), ('model_page_url', 'URI'),]):
from google.cloud import automl
client = automl.AutoMlClient()
location_path = client.location_path(gcp_project_id, gcp_region)
model_dict = {
'display_name': display_name,
'dataset_id': dataset_id,
'tables_model_metadata': {
'target_column_spec': automl.types.ColumnSpec(name=target_column_path),
'input_feature_column_specs': [automl.types.ColumnSpec(name=path) for path in input_feature_column_paths] if input_feature_column_paths else None,
'optimization_objective': optimization_objective,
'train_budget_milli_node_hours': train_budget_milli_node_hours,
},
}
create_model_response = client.create_model(location_path, model_dict)
print('Create model operation: {}'.format(create_model_response.operation))
result = create_model_response.result()
print(result)
model_name = result.name
model_id = model_name.rsplit('/', 1)[-1]
model_url = 'https://console.cloud.google.com/automl-tables/locations/{region}/datasets/{dataset_id};modelId={model_id};task=basic/train?project={project_id}'.format(
project_id=gcp_project_id,
region=gcp_region,
dataset_id=dataset_id,
model_id=model_id,
)
return (model_name, model_id, model_url)
if __name__ == '__main__':
from kfp.components import create_component_from_func
automl_create_model_for_tables_op = create_component_from_func(
automl_create_model_for_tables,
output_component_file='component.yaml',
base_image='python:3.7',
packages_to_install=['google-cloud-automl==0.4.0'],
annotations={
"author": "Alexey Volkov <[email protected]>",
"canonical_location": "https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/gcp/automl/create_model_for_tables/component.yaml",
},
)
| 663 |
0 | kubeflow_public_repos/pipelines/components/contrib/google-cloud/automl | kubeflow_public_repos/pipelines/components/contrib/google-cloud/automl/create_model_for_tables/component.yaml | name: Automl create model for tables
inputs:
- {name: gcp_project_id, type: String}
- {name: gcp_region, type: String}
- {name: display_name, type: String}
- {name: dataset_id, type: String}
- {name: target_column_path, type: String, optional: true}
- {name: input_feature_column_paths, type: JsonArray, optional: true}
- {name: optimization_objective, type: String, default: MAXIMIZE_AU_PRC, optional: true}
- {name: train_budget_milli_node_hours, type: Integer, default: '1000', optional: true}
outputs:
- {name: model_path, type: String}
- {name: model_id, type: String}
- {name: model_page_url, type: URI}
metadata:
annotations:
author: Alexey Volkov <[email protected]>
canonical_location: 'https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/gcp/automl/create_model_for_tables/component.yaml'
implementation:
container:
image: python:3.7
command:
- sh
- -c
- (PIP_DISABLE_PIP_VERSION_CHECK=1 python3 -m pip install --quiet --no-warn-script-location
'google-cloud-automl==0.4.0' || PIP_DISABLE_PIP_VERSION_CHECK=1 python3 -m pip
install --quiet --no-warn-script-location 'google-cloud-automl==0.4.0' --user)
&& "$0" "$@"
- python3
- -u
- -c
- |
def automl_create_model_for_tables(
gcp_project_id ,
gcp_region ,
display_name ,
dataset_id ,
target_column_path = None,
input_feature_column_paths = None,
optimization_objective = 'MAXIMIZE_AU_PRC',
train_budget_milli_node_hours = 1000,
) :
from google.cloud import automl
client = automl.AutoMlClient()
location_path = client.location_path(gcp_project_id, gcp_region)
model_dict = {
'display_name': display_name,
'dataset_id': dataset_id,
'tables_model_metadata': {
'target_column_spec': automl.types.ColumnSpec(name=target_column_path),
'input_feature_column_specs': [automl.types.ColumnSpec(name=path) for path in input_feature_column_paths] if input_feature_column_paths else None,
'optimization_objective': optimization_objective,
'train_budget_milli_node_hours': train_budget_milli_node_hours,
},
}
create_model_response = client.create_model(location_path, model_dict)
print('Create model operation: {}'.format(create_model_response.operation))
result = create_model_response.result()
print(result)
model_name = result.name
model_id = model_name.rsplit('/', 1)[-1]
model_url = 'https://console.cloud.google.com/automl-tables/locations/{region}/datasets/{dataset_id};modelId={model_id};task=basic/train?project={project_id}'.format(
project_id=gcp_project_id,
region=gcp_region,
dataset_id=dataset_id,
model_id=model_id,
)
return (model_name, model_id, model_url)
def _serialize_str(str_value: str) -> str:
if not isinstance(str_value, str):
raise TypeError('Value "{}" has type "{}" instead of str.'.format(str(str_value), str(type(str_value))))
return str_value
import json
import argparse
_parser = argparse.ArgumentParser(prog='Automl create model for tables', description='')
_parser.add_argument("--gcp-project-id", dest="gcp_project_id", type=str, required=True, default=argparse.SUPPRESS)
_parser.add_argument("--gcp-region", dest="gcp_region", type=str, required=True, default=argparse.SUPPRESS)
_parser.add_argument("--display-name", dest="display_name", type=str, required=True, default=argparse.SUPPRESS)
_parser.add_argument("--dataset-id", dest="dataset_id", type=str, required=True, default=argparse.SUPPRESS)
_parser.add_argument("--target-column-path", dest="target_column_path", type=str, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--input-feature-column-paths", dest="input_feature_column_paths", type=json.loads, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--optimization-objective", dest="optimization_objective", type=str, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--train-budget-milli-node-hours", dest="train_budget_milli_node_hours", type=int, required=False, default=argparse.SUPPRESS)
_parser.add_argument("----output-paths", dest="_output_paths", type=str, nargs=3)
_parsed_args = vars(_parser.parse_args())
_output_files = _parsed_args.pop("_output_paths", [])
_outputs = automl_create_model_for_tables(**_parsed_args)
_output_serializers = [
_serialize_str,
_serialize_str,
str,
]
import os
for idx, output_file in enumerate(_output_files):
try:
os.makedirs(os.path.dirname(output_file))
except OSError:
pass
with open(output_file, 'w') as f:
f.write(_output_serializers[idx](_outputs[idx]))
args:
- --gcp-project-id
- {inputValue: gcp_project_id}
- --gcp-region
- {inputValue: gcp_region}
- --display-name
- {inputValue: display_name}
- --dataset-id
- {inputValue: dataset_id}
- if:
cond: {isPresent: target_column_path}
then:
- --target-column-path
- {inputValue: target_column_path}
- if:
cond: {isPresent: input_feature_column_paths}
then:
- --input-feature-column-paths
- {inputValue: input_feature_column_paths}
- if:
cond: {isPresent: optimization_objective}
then:
- --optimization-objective
- {inputValue: optimization_objective}
- if:
cond: {isPresent: train_budget_milli_node_hours}
then:
- --train-budget-milli-node-hours
- {inputValue: train_budget_milli_node_hours}
- '----output-paths'
- {outputPath: model_path}
- {outputPath: model_id}
- {outputPath: model_page_url}
| 664 |
0 | kubeflow_public_repos/pipelines/components/contrib/google-cloud/automl | kubeflow_public_repos/pipelines/components/contrib/google-cloud/automl/prediction_service_batch_predict/component.py | # Copyright 2019 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import NamedTuple
def automl_prediction_service_batch_predict(
model_path,
gcs_input_uris: list = None,
gcs_output_uri_prefix: str = None,
bq_input_uri: str = None,
bq_output_uri: str = None,
params=None,
retry=None, #google.api_core.gapic_v1.method.DEFAULT,
timeout=None, #google.api_core.gapic_v1.method.DEFAULT,
metadata: dict = None,
) -> NamedTuple('Outputs', [('gcs_output_directory', str), ('bigquery_output_dataset', str)]):
import sys
import subprocess
subprocess.run([sys.executable, '-m', 'pip', 'install', 'google-cloud-automl==0.4.0', '--quiet', '--no-warn-script-location'], env={'PIP_DISABLE_PIP_VERSION_CHECK': '1'}, check=True)
input_config = {}
if gcs_input_uris:
input_config['gcs_source'] = {'input_uris': gcs_input_uris}
if bq_input_uri:
input_config['bigquery_source'] = {'input_uri': bq_input_uri}
output_config = {}
if gcs_output_uri_prefix:
output_config['gcs_destination'] = {'output_uri_prefix': gcs_output_uri_prefix}
if bq_output_uri:
output_config['bigquery_destination'] = {'output_uri': bq_output_uri}
from google.cloud import automl
client = automl.PredictionServiceClient()
response = client.batch_predict(
model_path,
input_config,
output_config,
params,
retry,
timeout,
metadata,
)
print('Operation started:')
print(response.operation)
result = response.result()
metadata = response.metadata
print('Operation finished:')
print(metadata)
output_info = metadata.batch_predict_details.output_info
# Workaround for Argo issue - it fails when output is empty: https://github.com/argoproj/argo-workflows/pull/1277/files#r326028422
return (output_info.gcs_output_directory or '-', output_info.bigquery_output_dataset or '-')
if __name__ == '__main__':
from kfp.components import create_component_from_func
automl_prediction_service_batch_predict_op = create_component_from_func(
automl_prediction_service_batch_predict,
output_component_file='component.yaml',
base_image='python:3.7',
annotations={
"author": "Alexey Volkov <[email protected]>",
"canonical_location": "https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/gcp/automl/prediction_service_batch_predict/component.yaml",
},
)
| 665 |
0 | kubeflow_public_repos/pipelines/components/contrib/google-cloud/automl | kubeflow_public_repos/pipelines/components/contrib/google-cloud/automl/prediction_service_batch_predict/component.yaml | name: Automl prediction service batch predict
inputs:
- name: model_path
- name: gcs_input_uris
type: JsonArray
optional: true
- name: gcs_output_uri_prefix
type: String
optional: true
- name: bq_input_uri
type: String
optional: true
- name: bq_output_uri
type: String
optional: true
- name: params
optional: true
- name: retry
optional: true
- name: timeout
optional: true
- name: metadata
type: JsonObject
optional: true
outputs:
- name: gcs_output_directory
type: String
- name: bigquery_output_dataset
type: String
metadata:
annotations:
author: Alexey Volkov <[email protected]>
canonical_location: 'https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/gcp/automl/prediction_service_batch_predict/component.yaml'
implementation:
container:
image: python:3.7
command:
- python3
- -u
- -c
- |
from typing import NamedTuple
def automl_prediction_service_batch_predict(
model_path,
gcs_input_uris: str = None,
gcs_output_uri_prefix: str = None,
bq_input_uri: str = None,
bq_output_uri: str = None,
params=None,
retry=None, #google.api_core.gapic_v1.method.DEFAULT,
timeout=None, #google.api_core.gapic_v1.method.DEFAULT,
metadata: dict = None,
) -> NamedTuple('Outputs', [('gcs_output_directory', str), ('bigquery_output_dataset', str)]):
import sys
import subprocess
subprocess.run([sys.executable, '-m', 'pip', 'install', 'google-cloud-automl==0.4.0', '--quiet', '--no-warn-script-location'], env={'PIP_DISABLE_PIP_VERSION_CHECK': '1'}, check=True)
input_config = {}
if gcs_input_uris:
input_config['gcs_source'] = {'input_uris': gcs_input_uris}
if bq_input_uri:
input_config['bigquery_source'] = {'input_uri': bq_input_uri}
output_config = {}
if gcs_output_uri_prefix:
output_config['gcs_destination'] = {'output_uri_prefix': gcs_output_uri_prefix}
if bq_output_uri:
output_config['bigquery_destination'] = {'output_uri': bq_output_uri}
from google.cloud import automl
client = automl.PredictionServiceClient()
response = client.batch_predict(
model_path,
input_config,
output_config,
params,
retry,
timeout,
metadata,
)
print('Operation started:')
print(response.operation)
result = response.result()
metadata = response.metadata
print('Operation finished:')
print(metadata)
output_info = metadata.batch_predict_details.output_info
# Workaround for Argo issue - it fails when output is empty: https://github.com/argoproj/argo-workflows/pull/1277/files#r326028422
return (output_info.gcs_output_directory or '-', output_info.bigquery_output_dataset or '-')
import json
import argparse
_missing_arg = object()
_parser = argparse.ArgumentParser(prog='Automl prediction service batch predict', description='')
_parser.add_argument("--model-path", dest="model_path", type=str, required=True, default=_missing_arg)
_parser.add_argument("--gcs-input-uris", dest="gcs_input_uris", type=json.loads, required=False, default=_missing_arg)
_parser.add_argument("--gcs-output-uri-prefix", dest="gcs_output_uri_prefix", type=str, required=False, default=_missing_arg)
_parser.add_argument("--bq-input-uri", dest="bq_input_uri", type=str, required=False, default=_missing_arg)
_parser.add_argument("--bq-output-uri", dest="bq_output_uri", type=str, required=False, default=_missing_arg)
_parser.add_argument("--params", dest="params", type=str, required=False, default=_missing_arg)
_parser.add_argument("--retry", dest="retry", type=str, required=False, default=_missing_arg)
_parser.add_argument("--timeout", dest="timeout", type=str, required=False, default=_missing_arg)
_parser.add_argument("--metadata", dest="metadata", type=json.loads, required=False, default=_missing_arg)
_parser.add_argument("----output-paths", dest="_output_paths", type=str, nargs=2)
_parsed_args = {k: v for k, v in vars(_parser.parse_args()).items() if v is not _missing_arg}
_output_files = _parsed_args.pop("_output_paths", [])
_outputs = automl_prediction_service_batch_predict(**_parsed_args)
if not hasattr(_outputs, '__getitem__') or isinstance(_outputs, str):
_outputs = [_outputs]
import os
for idx, output_file in enumerate(_output_files):
try:
os.makedirs(os.path.dirname(output_file))
except OSError:
pass
with open(output_file, 'w') as f:
f.write(str(_outputs[idx]))
args:
- --model-path
- inputValue: model_path
- if:
cond:
isPresent: gcs_input_uris
then:
- --gcs-input-uris
- inputValue: gcs_input_uris
- if:
cond:
isPresent: gcs_output_uri_prefix
then:
- --gcs-output-uri-prefix
- inputValue: gcs_output_uri_prefix
- if:
cond:
isPresent: bq_input_uri
then:
- --bq-input-uri
- inputValue: bq_input_uri
- if:
cond:
isPresent: bq_output_uri
then:
- --bq-output-uri
- inputValue: bq_output_uri
- if:
cond:
isPresent: params
then:
- --params
- inputValue: params
- if:
cond:
isPresent: retry
then:
- --retry
- inputValue: retry
- if:
cond:
isPresent: timeout
then:
- --timeout
- inputValue: timeout
- if:
cond:
isPresent: metadata
then:
- --metadata
- inputValue: metadata
- '----output-paths'
- outputPath: gcs_output_directory
- outputPath: bigquery_output_dataset
| 666 |
0 | kubeflow_public_repos/pipelines/components/contrib/google-cloud/automl | kubeflow_public_repos/pipelines/components/contrib/google-cloud/automl/split_dataset_table_column_names/component.py | # Copyright 2019 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import NamedTuple
def automl_split_dataset_table_column_names(
dataset_path: str,
target_column_name: str,
table_index: int = 0,
) -> NamedTuple('Outputs', [('target_column_path', str), ('feature_column_paths', list)]):
import sys
import subprocess
subprocess.run([sys.executable, '-m', 'pip', 'install', 'google-cloud-automl==0.4.0', '--quiet', '--no-warn-script-location'], env={'PIP_DISABLE_PIP_VERSION_CHECK': '1'}, check=True)
from google.cloud import automl
client = automl.AutoMlClient()
list_table_specs_response = client.list_table_specs(dataset_path)
table_specs = [s for s in list_table_specs_response]
print('table_specs=')
print(table_specs)
table_spec_name = table_specs[table_index].name
list_column_specs_response = client.list_column_specs(table_spec_name)
column_specs = [s for s in list_column_specs_response]
print('column_specs=')
print(column_specs)
target_column_spec = [s for s in column_specs if s.display_name == target_column_name][0]
feature_column_specs = [s for s in column_specs if s.display_name != target_column_name]
feature_column_names = [s.name for s in feature_column_specs]
import json
return (target_column_spec.name, json.dumps(feature_column_names))
if __name__ == '__main__':
from kfp.components import create_component_from_func
automl_split_dataset_table_column_names_op = create_component_from_func(
automl_split_dataset_table_column_names,
output_component_file='component.yaml',
base_image='python:3.7',
annotations={
"author": "Alexey Volkov <[email protected]>",
"canonical_location": "https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/gcp/automl/split_dataset_table_column_names/component.yaml",
},
)
| 667 |
0 | kubeflow_public_repos/pipelines/components/contrib/google-cloud/automl | kubeflow_public_repos/pipelines/components/contrib/google-cloud/automl/split_dataset_table_column_names/component.yaml | name: Automl split dataset table column names
inputs:
- name: dataset_path
type: String
- name: target_column_name
type: String
- name: table_index
type: Integer
default: '0'
optional: true
outputs:
- name: target_column_path
type: String
- name: feature_column_paths
type: JsonArray
metadata:
annotations:
author: Alexey Volkov <[email protected]>
canonical_location: 'https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/gcp/automl/split_dataset_table_column_names/component.yaml'
implementation:
container:
image: python:3.7
command:
- python3
- -u
- -c
- |
from typing import NamedTuple
def automl_split_dataset_table_column_names(
dataset_path: str,
target_column_name: str,
table_index: int = 0,
) -> NamedTuple('Outputs', [('target_column_path', str), ('feature_column_paths', list)]):
import sys
import subprocess
subprocess.run([sys.executable, '-m', 'pip', 'install', 'google-cloud-automl==0.4.0', '--quiet', '--no-warn-script-location'], env={'PIP_DISABLE_PIP_VERSION_CHECK': '1'}, check=True)
from google.cloud import automl
client = automl.AutoMlClient()
list_table_specs_response = client.list_table_specs(dataset_path)
table_specs = [s for s in list_table_specs_response]
print('table_specs=')
print(table_specs)
table_spec_name = table_specs[table_index].name
list_column_specs_response = client.list_column_specs(table_spec_name)
column_specs = [s for s in list_column_specs_response]
print('column_specs=')
print(column_specs)
target_column_spec = [s for s in column_specs if s.display_name == target_column_name][0]
feature_column_specs = [s for s in column_specs if s.display_name != target_column_name]
feature_column_names = [s.name for s in feature_column_specs]
import json
return (target_column_spec.name, json.dumps(feature_column_names))
import argparse
_missing_arg = object()
_parser = argparse.ArgumentParser(prog='Automl split dataset table column names', description='')
_parser.add_argument("--dataset-path", dest="dataset_path", type=str, required=True, default=_missing_arg)
_parser.add_argument("--target-column-name", dest="target_column_name", type=str, required=True, default=_missing_arg)
_parser.add_argument("--table-index", dest="table_index", type=int, required=False, default=_missing_arg)
_parser.add_argument("----output-paths", dest="_output_paths", type=str, nargs=2)
_parsed_args = {k: v for k, v in vars(_parser.parse_args()).items() if v is not _missing_arg}
_output_files = _parsed_args.pop("_output_paths", [])
_outputs = automl_split_dataset_table_column_names(**_parsed_args)
if not hasattr(_outputs, '__getitem__') or isinstance(_outputs, str):
_outputs = [_outputs]
import os
for idx, output_file in enumerate(_output_files):
try:
os.makedirs(os.path.dirname(output_file))
except OSError:
pass
with open(output_file, 'w') as f:
f.write(str(_outputs[idx]))
args:
- --dataset-path
- inputValue: dataset_path
- --target-column-name
- inputValue: target_column_name
- if:
cond:
isPresent: table_index
then:
- --table-index
- inputValue: table_index
- '----output-paths'
- outputPath: target_column_path
- outputPath: feature_column_paths
| 668 |
0 | kubeflow_public_repos/pipelines/components/contrib/google-cloud/automl | kubeflow_public_repos/pipelines/components/contrib/google-cloud/automl/import_data_from_gcs/component.py | # Copyright 2019 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import NamedTuple
def automl_import_data_from_gcs(
dataset_path: str,
input_uris: list,
retry=None, #=google.api_core.gapic_v1.method.DEFAULT,
timeout=None, #=google.api_core.gapic_v1.method.DEFAULT,
metadata: dict = None,
) -> NamedTuple('Outputs', [('dataset_path', str)]):
import sys
import subprocess
subprocess.run([sys.executable, '-m', 'pip', 'install', 'google-cloud-automl==0.4.0', '--quiet', '--no-warn-script-location'], env={'PIP_DISABLE_PIP_VERSION_CHECK': '1'}, check=True)
import google
from google.cloud import automl
client = automl.AutoMlClient()
input_config = {
'gcs_source': {
'input_uris': input_uris,
},
}
response = client.import_data(
dataset_path,
input_config,
retry or google.api_core.gapic_v1.method.DEFAULT,
timeout or google.api_core.gapic_v1.method.DEFAULT,
metadata,
)
result = response.result()
print(result)
metadata = response.metadata
print(metadata)
return (dataset_path)
if __name__ == '__main__':
from kfp.components import create_component_from_func
automl_import_data_from_gcs_op = create_component_from_func(
automl_import_data_from_gcs,
output_component_file='component.yaml',
base_image='python:3.7',
annotations={
"author": "Alexey Volkov <[email protected]>",
"canonical_location": "https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/gcp/automl/import_data_from_gcs/component.yaml",
},
)
| 669 |
0 | kubeflow_public_repos/pipelines/components/contrib/google-cloud/automl | kubeflow_public_repos/pipelines/components/contrib/google-cloud/automl/import_data_from_gcs/component.yaml | name: Automl import data from gcs
inputs:
- name: dataset_path
type: String
- name: input_uris
type: JsonArray
- name: retry
optional: true
- name: timeout
optional: true
- name: metadata
type: JsonObject
optional: true
outputs:
- name: dataset_path
type: String
metadata:
annotations:
author: Alexey Volkov <[email protected]>
canonical_location: 'https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/gcp/automl/import_data_from_gcs/component.yaml'
implementation:
container:
image: python:3.7
command:
- python3
- -u
- -c
- |
from typing import NamedTuple
def automl_import_data_from_gcs(
dataset_path: str,
input_uris: list,
retry=None, #=google.api_core.gapic_v1.method.DEFAULT,
timeout=None, #=google.api_core.gapic_v1.method.DEFAULT,
metadata: dict = None,
) -> NamedTuple('Outputs', [('dataset_path', str)]):
import sys
import subprocess
subprocess.run([sys.executable, '-m', 'pip', 'install', 'google-cloud-automl==0.4.0', '--quiet', '--no-warn-script-location'], env={'PIP_DISABLE_PIP_VERSION_CHECK': '1'}, check=True)
import google
from google.cloud import automl
client = automl.AutoMlClient()
input_config = {
'gcs_source': {
'input_uris': input_uris,
},
}
response = client.import_data(
dataset_path,
input_config,
retry or google.api_core.gapic_v1.method.DEFAULT,
timeout or google.api_core.gapic_v1.method.DEFAULT,
metadata,
)
result = response.result()
print(result)
metadata = response.metadata
print(metadata)
return (dataset_path)
import json
import argparse
_missing_arg = object()
_parser = argparse.ArgumentParser(prog='Automl import data from gcs', description='')
_parser.add_argument("--dataset-path", dest="dataset_path", type=str, required=True, default=_missing_arg)
_parser.add_argument("--input-uris", dest="input_uris", type=json.loads, required=True, default=_missing_arg)
_parser.add_argument("--retry", dest="retry", type=str, required=False, default=_missing_arg)
_parser.add_argument("--timeout", dest="timeout", type=str, required=False, default=_missing_arg)
_parser.add_argument("--metadata", dest="metadata", type=json.loads, required=False, default=_missing_arg)
_parser.add_argument("----output-paths", dest="_output_paths", type=str, nargs=1)
_parsed_args = {k: v for k, v in vars(_parser.parse_args()).items() if v is not _missing_arg}
_output_files = _parsed_args.pop("_output_paths", [])
_outputs = automl_import_data_from_gcs(**_parsed_args)
if not hasattr(_outputs, '__getitem__') or isinstance(_outputs, str):
_outputs = [_outputs]
import os
for idx, output_file in enumerate(_output_files):
try:
os.makedirs(os.path.dirname(output_file))
except OSError:
pass
with open(output_file, 'w') as f:
f.write(str(_outputs[idx]))
args:
- --dataset-path
- inputValue: dataset_path
- --input-uris
- inputValue: input_uris
- if:
cond:
isPresent: retry
then:
- --retry
- inputValue: retry
- if:
cond:
isPresent: timeout
then:
- --timeout
- inputValue: timeout
- if:
cond:
isPresent: metadata
then:
- --metadata
- inputValue: metadata
- '----output-paths'
- outputPath: dataset_path
| 670 |
0 | kubeflow_public_repos/pipelines/components/contrib/google-cloud/automl | kubeflow_public_repos/pipelines/components/contrib/google-cloud/automl/create_dataset_for_tables/component.py | # Copyright 2019 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import NamedTuple
def automl_create_dataset_for_tables(
gcp_project_id: str,
gcp_region: str,
display_name: str,
description: str = None,
tables_dataset_metadata: dict = {},
retry=None, #=google.api_core.gapic_v1.method.DEFAULT,
timeout: float = None, #=google.api_core.gapic_v1.method.DEFAULT,
metadata: dict = None,
) -> NamedTuple('Outputs', [('dataset_path', str), ('create_time', str), ('dataset_id', str), ('dataset_url', 'URI')]):
'''automl_create_dataset_for_tables creates an empty Dataset for AutoML tables
'''
import google
from google.cloud import automl
client = automl.AutoMlClient()
location_path = client.location_path(gcp_project_id, gcp_region)
dataset_dict = {
'display_name': display_name,
'description': description,
'tables_dataset_metadata': tables_dataset_metadata,
}
dataset = client.create_dataset(
location_path,
dataset_dict,
retry or google.api_core.gapic_v1.method.DEFAULT,
timeout or google.api_core.gapic_v1.method.DEFAULT,
metadata,
)
print(dataset)
dataset_id = dataset.name.rsplit('/', 1)[-1]
dataset_url = 'https://console.cloud.google.com/automl-tables/locations/{region}/datasets/{dataset_id}/schemav2?project={project_id}'.format(
project_id=gcp_project_id,
region=gcp_region,
dataset_id=dataset_id,
)
return (dataset.name, str(dataset.create_time), dataset_id, dataset_url)
if __name__ == '__main__':
from kfp.components import create_component_from_func
automl_create_dataset_for_tables_op = create_component_from_func(
automl_create_dataset_for_tables,
output_component_file='component.yaml',
base_image='python:3.7',
packages_to_install=['google-cloud-automl==0.4.0'],
annotations={
"author": "Alexey Volkov <[email protected]>",
"canonical_location": "https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/gcp/automl/create_dataset_for_tables/component.yaml",
},
)
| 671 |
0 | kubeflow_public_repos/pipelines/components/contrib/google-cloud/automl | kubeflow_public_repos/pipelines/components/contrib/google-cloud/automl/create_dataset_for_tables/component.yaml | name: Automl create dataset for tables
description: automl_create_dataset_for_tables creates an empty Dataset for AutoML
tables
inputs:
- {name: gcp_project_id, type: String}
- {name: gcp_region, type: String}
- {name: display_name, type: String}
- {name: description, type: String, optional: true}
- {name: tables_dataset_metadata, type: JsonObject, default: '{}', optional: true}
- {name: retry, optional: true}
- {name: timeout, type: Float, optional: true}
- {name: metadata, type: JsonObject, optional: true}
outputs:
- {name: dataset_path, type: String}
- {name: create_time, type: String}
- {name: dataset_id, type: String}
- {name: dataset_url, type: URI}
metadata:
annotations:
author: Alexey Volkov <[email protected]>
canonical_location: 'https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/gcp/automl/create_dataset_for_tables/component.yaml'
implementation:
container:
image: python:3.7
command:
- sh
- -c
- (PIP_DISABLE_PIP_VERSION_CHECK=1 python3 -m pip install --quiet --no-warn-script-location
'google-cloud-automl==0.4.0' || PIP_DISABLE_PIP_VERSION_CHECK=1 python3 -m pip
install --quiet --no-warn-script-location 'google-cloud-automl==0.4.0' --user)
&& "$0" "$@"
- python3
- -u
- -c
- |
def automl_create_dataset_for_tables(
gcp_project_id ,
gcp_region ,
display_name ,
description = None,
tables_dataset_metadata = {},
retry=None, #=google.api_core.gapic_v1.method.DEFAULT,
timeout = None, #=google.api_core.gapic_v1.method.DEFAULT,
metadata = None,
) :
'''automl_create_dataset_for_tables creates an empty Dataset for AutoML tables
'''
import google
from google.cloud import automl
client = automl.AutoMlClient()
location_path = client.location_path(gcp_project_id, gcp_region)
dataset_dict = {
'display_name': display_name,
'description': description,
'tables_dataset_metadata': tables_dataset_metadata,
}
dataset = client.create_dataset(
location_path,
dataset_dict,
retry or google.api_core.gapic_v1.method.DEFAULT,
timeout or google.api_core.gapic_v1.method.DEFAULT,
metadata,
)
print(dataset)
dataset_id = dataset.name.rsplit('/', 1)[-1]
dataset_url = 'https://console.cloud.google.com/automl-tables/locations/{region}/datasets/{dataset_id}/schemav2?project={project_id}'.format(
project_id=gcp_project_id,
region=gcp_region,
dataset_id=dataset_id,
)
return (dataset.name, str(dataset.create_time), dataset_id, dataset_url)
import json
def _serialize_str(str_value: str) -> str:
if not isinstance(str_value, str):
raise TypeError('Value "{}" has type "{}" instead of str.'.format(str(str_value), str(type(str_value))))
return str_value
import argparse
_parser = argparse.ArgumentParser(prog='Automl create dataset for tables', description='automl_create_dataset_for_tables creates an empty Dataset for AutoML tables')
_parser.add_argument("--gcp-project-id", dest="gcp_project_id", type=str, required=True, default=argparse.SUPPRESS)
_parser.add_argument("--gcp-region", dest="gcp_region", type=str, required=True, default=argparse.SUPPRESS)
_parser.add_argument("--display-name", dest="display_name", type=str, required=True, default=argparse.SUPPRESS)
_parser.add_argument("--description", dest="description", type=str, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--tables-dataset-metadata", dest="tables_dataset_metadata", type=json.loads, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--retry", dest="retry", type=str, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--timeout", dest="timeout", type=float, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--metadata", dest="metadata", type=json.loads, required=False, default=argparse.SUPPRESS)
_parser.add_argument("----output-paths", dest="_output_paths", type=str, nargs=4)
_parsed_args = vars(_parser.parse_args())
_output_files = _parsed_args.pop("_output_paths", [])
_outputs = automl_create_dataset_for_tables(**_parsed_args)
_output_serializers = [
_serialize_str,
_serialize_str,
_serialize_str,
str,
]
import os
for idx, output_file in enumerate(_output_files):
try:
os.makedirs(os.path.dirname(output_file))
except OSError:
pass
with open(output_file, 'w') as f:
f.write(_output_serializers[idx](_outputs[idx]))
args:
- --gcp-project-id
- {inputValue: gcp_project_id}
- --gcp-region
- {inputValue: gcp_region}
- --display-name
- {inputValue: display_name}
- if:
cond: {isPresent: description}
then:
- --description
- {inputValue: description}
- if:
cond: {isPresent: tables_dataset_metadata}
then:
- --tables-dataset-metadata
- {inputValue: tables_dataset_metadata}
- if:
cond: {isPresent: retry}
then:
- --retry
- {inputValue: retry}
- if:
cond: {isPresent: timeout}
then:
- --timeout
- {inputValue: timeout}
- if:
cond: {isPresent: metadata}
then:
- --metadata
- {inputValue: metadata}
- '----output-paths'
- {outputPath: dataset_path}
- {outputPath: create_time}
- {outputPath: dataset_id}
- {outputPath: dataset_url}
| 672 |
0 | kubeflow_public_repos/pipelines/components/contrib/google-cloud/automl | kubeflow_public_repos/pipelines/components/contrib/google-cloud/automl/deploy_model/component.py | from typing import NamedTuple
from kfp.components import create_component_from_func
def automl_deploy_model(
model_path: str,
) -> NamedTuple('Outputs', [
('model_path', str),
]):
"""Deploys a trained model.
Args:
model_path: The resource name of the model to export. Format: 'projects/<project>/locations/<location>/models/<model>'
Annotations:
author: Alexey Volkov <[email protected]>
"""
from google.cloud import automl
client = automl.AutoMlClient()
response = client.deploy_model(
name=model_path,
)
print('Operation started:')
print(response.operation)
result = response.result()
metadata = response.metadata
print('Operation finished:')
print(metadata)
return (model_path, )
if __name__ == '__main__':
automl_deploy_model_op = create_component_from_func(
automl_deploy_model,
output_component_file='component.yaml',
base_image='python:3.8',
packages_to_install=[
'google-cloud-automl==2.0.0',
],
annotations={
"author": "Alexey Volkov <[email protected]>",
"canonical_location": "https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/gcp/automl/deploy_model/component.yaml",
},
)
| 673 |
0 | kubeflow_public_repos/pipelines/components/contrib/google-cloud/automl | kubeflow_public_repos/pipelines/components/contrib/google-cloud/automl/deploy_model/component.yaml | name: Automl deploy model
description: |-
Deploys a trained model.
Args:
model_path: The resource name of the model to export. Format: 'projects/<project>/locations/<location>/models/<model>'
Annotations:
author: Alexey Volkov <[email protected]>
inputs:
- {name: model_path, type: String}
outputs:
- {name: model_path, type: String}
metadata:
annotations:
author: Alexey Volkov <[email protected]>
canonical_location: 'https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/gcp/automl/deploy_model/component.yaml'
implementation:
container:
image: python:3.8
command:
- sh
- -c
- (PIP_DISABLE_PIP_VERSION_CHECK=1 python3 -m pip install --quiet --no-warn-script-location
'google-cloud-automl==2.0.0' || PIP_DISABLE_PIP_VERSION_CHECK=1 python3 -m pip
install --quiet --no-warn-script-location 'google-cloud-automl==2.0.0' --user)
&& "$0" "$@"
- python3
- -u
- -c
- |
def automl_deploy_model(
model_path,
):
"""Deploys a trained model.
Args:
model_path: The resource name of the model to export. Format: 'projects/<project>/locations/<location>/models/<model>'
Annotations:
author: Alexey Volkov <[email protected]>
"""
from google.cloud import automl
client = automl.AutoMlClient()
response = client.deploy_model(
name=model_path,
)
print('Operation started:')
print(response.operation)
result = response.result()
metadata = response.metadata
print('Operation finished:')
print(metadata)
return (model_path, )
def _serialize_str(str_value: str) -> str:
if not isinstance(str_value, str):
raise TypeError('Value "{}" has type "{}" instead of str.'.format(str(str_value), str(type(str_value))))
return str_value
import argparse
_parser = argparse.ArgumentParser(prog='Automl deploy model', description="Deploys a trained model.\n\n Args:\n model_path: The resource name of the model to export. Format: 'projects/<project>/locations/<location>/models/<model>'\n\n Annotations:\n author: Alexey Volkov <[email protected]>")
_parser.add_argument("--model-path", dest="model_path", type=str, required=True, default=argparse.SUPPRESS)
_parser.add_argument("----output-paths", dest="_output_paths", type=str, nargs=1)
_parsed_args = vars(_parser.parse_args())
_output_files = _parsed_args.pop("_output_paths", [])
_outputs = automl_deploy_model(**_parsed_args)
_output_serializers = [
_serialize_str,
]
import os
for idx, output_file in enumerate(_output_files):
try:
os.makedirs(os.path.dirname(output_file))
except OSError:
pass
with open(output_file, 'w') as f:
f.write(_output_serializers[idx](_outputs[idx]))
args:
- --model-path
- {inputValue: model_path}
- '----output-paths'
- {outputPath: model_path}
| 674 |
0 | kubeflow_public_repos/pipelines/components/contrib/google-cloud/automl | kubeflow_public_repos/pipelines/components/contrib/google-cloud/automl/export_model_to_gcs/component.py | from typing import NamedTuple
from kfp.components import create_component_from_func
def automl_export_model_to_gcs(
model_path: str,
gcs_output_uri_prefix: str,
model_format: str = 'tf_saved_model',
) -> NamedTuple('Outputs', [
('model_directory', 'Uri'),
]):
"""Exports a trained model to a user specified Google Cloud Storage location.
Args:
model_path: The resource name of the model to export. Format: 'projects/<project>/locations/<location>/models/<model>'
gcs_output_uri_prefix: The Google Cloud Storage directory where the model should be written to. Must be in the same location as AutoML. Required location: us-central1.
model_format: The format in which the model must be exported. The available, and default, formats depend on the problem and model type. Possible formats: tf_saved_model, tf_js, tflite, core_ml, edgetpu_tflite. See https://cloud.google.com/automl/docs/reference/rest/v1/projects.locations.models/export?hl=en#modelexportoutputconfig
Annotations:
author: Alexey Volkov <[email protected]>
"""
from google.cloud import automl
client = automl.AutoMlClient()
response = client.export_model(
name=model_path,
output_config=automl.ModelExportOutputConfig(
model_format=model_format,
gcs_destination=automl.GcsDestination(
output_uri_prefix=gcs_output_uri_prefix,
),
),
)
print('Operation started:')
print(response.operation)
result = response.result()
metadata = response.metadata
print('Operation finished:')
print(metadata)
return (metadata.export_model_details.output_info.gcs_output_directory, )
if __name__ == '__main__':
automl_export_model_to_gcs_op = create_component_from_func(
automl_export_model_to_gcs,
output_component_file='component.yaml',
base_image='python:3.8',
packages_to_install=[
'google-cloud-automl==2.0.0',
],
annotations={
"author": "Alexey Volkov <[email protected]>",
"canonical_location": "https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/gcp/automl/export_model_to_gcs/component.yaml",
},
)
| 675 |
0 | kubeflow_public_repos/pipelines/components/contrib/google-cloud/automl | kubeflow_public_repos/pipelines/components/contrib/google-cloud/automl/export_model_to_gcs/component.yaml | name: Automl export model to gcs
description: |-
Exports a trained model to a user specified Google Cloud Storage location.
Args:
model_path: The resource name of the model to export. Format: 'projects/<project>/locations/<location>/models/<model>'
gcs_output_uri_prefix: The Google Cloud Storage directory where the model should be written to. Must be in the same location as AutoML. Required location: us-central1.
model_format: The format in which the model must be exported. The available, and default, formats depend on the problem and model type. Possible formats: tf_saved_model, tf_js, tflite, core_ml, edgetpu_tflite. See https://cloud.google.com/automl/docs/reference/rest/v1/projects.locations.models/export?hl=en#modelexportoutputconfig
Annotations:
author: Alexey Volkov <[email protected]>
inputs:
- {name: model_path, type: String}
- {name: gcs_output_uri_prefix, type: String}
- {name: model_format, type: String, default: tf_saved_model, optional: true}
outputs:
- {name: model_directory, type: Uri}
metadata:
annotations:
author: Alexey Volkov <[email protected]>
canonical_location: 'https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/gcp/automl/export_model_to_gcs/component.yaml'
implementation:
container:
image: python:3.8
command:
- sh
- -c
- (PIP_DISABLE_PIP_VERSION_CHECK=1 python3 -m pip install --quiet --no-warn-script-location
'google-cloud-automl==2.0.0' || PIP_DISABLE_PIP_VERSION_CHECK=1 python3 -m pip
install --quiet --no-warn-script-location 'google-cloud-automl==2.0.0' --user)
&& "$0" "$@"
- python3
- -u
- -c
- |
def automl_export_model_to_gcs(
model_path,
gcs_output_uri_prefix,
model_format = 'tf_saved_model',
):
"""Exports a trained model to a user specified Google Cloud Storage location.
Args:
model_path: The resource name of the model to export. Format: 'projects/<project>/locations/<location>/models/<model>'
gcs_output_uri_prefix: The Google Cloud Storage directory where the model should be written to. Must be in the same location as AutoML. Required location: us-central1.
model_format: The format in which the model must be exported. The available, and default, formats depend on the problem and model type. Possible formats: tf_saved_model, tf_js, tflite, core_ml, edgetpu_tflite. See https://cloud.google.com/automl/docs/reference/rest/v1/projects.locations.models/export?hl=en#modelexportoutputconfig
Annotations:
author: Alexey Volkov <[email protected]>
"""
from google.cloud import automl
client = automl.AutoMlClient()
response = client.export_model(
name=model_path,
output_config=automl.ModelExportOutputConfig(
model_format=model_format,
gcs_destination=automl.GcsDestination(
output_uri_prefix=gcs_output_uri_prefix,
),
),
)
print('Operation started:')
print(response.operation)
result = response.result()
metadata = response.metadata
print('Operation finished:')
print(metadata)
return (metadata.export_model_details.output_info.gcs_output_directory, )
import argparse
_parser = argparse.ArgumentParser(prog='Automl export model to gcs', description="Exports a trained model to a user specified Google Cloud Storage location.\n\n Args:\n model_path: The resource name of the model to export. Format: 'projects/<project>/locations/<location>/models/<model>'\n gcs_output_uri_prefix: The Google Cloud Storage directory where the model should be written to. Must be in the same location as AutoML. Required location: us-central1.\n model_format: The format in which the model must be exported. The available, and default, formats depend on the problem and model type. Possible formats: tf_saved_model, tf_js, tflite, core_ml, edgetpu_tflite. See https://cloud.google.com/automl/docs/reference/rest/v1/projects.locations.models/export?hl=en#modelexportoutputconfig\n\n Annotations:\n author: Alexey Volkov <[email protected]>")
_parser.add_argument("--model-path", dest="model_path", type=str, required=True, default=argparse.SUPPRESS)
_parser.add_argument("--gcs-output-uri-prefix", dest="gcs_output_uri_prefix", type=str, required=True, default=argparse.SUPPRESS)
_parser.add_argument("--model-format", dest="model_format", type=str, required=False, default=argparse.SUPPRESS)
_parser.add_argument("----output-paths", dest="_output_paths", type=str, nargs=1)
_parsed_args = vars(_parser.parse_args())
_output_files = _parsed_args.pop("_output_paths", [])
_outputs = automl_export_model_to_gcs(**_parsed_args)
_output_serializers = [
str,
]
import os
for idx, output_file in enumerate(_output_files):
try:
os.makedirs(os.path.dirname(output_file))
except OSError:
pass
with open(output_file, 'w') as f:
f.write(_output_serializers[idx](_outputs[idx]))
args:
- --model-path
- {inputValue: model_path}
- --gcs-output-uri-prefix
- {inputValue: gcs_output_uri_prefix}
- if:
cond: {isPresent: model_format}
then:
- --model-format
- {inputValue: model_format}
- '----output-paths'
- {outputPath: model_directory}
| 676 |
0 | kubeflow_public_repos/pipelines/components/contrib/google-cloud/automl | kubeflow_public_repos/pipelines/components/contrib/google-cloud/automl/import_data_from_bigquery/component.py | # Copyright 2019 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import NamedTuple
def automl_import_data_from_bigquery(
dataset_path,
input_uri: str,
retry=None, #=google.api_core.gapic_v1.method.DEFAULT,
timeout=None, #=google.api_core.gapic_v1.method.DEFAULT,
metadata: dict = None,
) -> NamedTuple('Outputs', [('dataset_path', str)]):
import sys
import subprocess
subprocess.run([sys.executable, '-m', 'pip', 'install', 'google-cloud-automl==0.4.0', '--quiet', '--no-warn-script-location'], env={'PIP_DISABLE_PIP_VERSION_CHECK': '1'}, check=True)
import google
from google.cloud import automl
client = automl.AutoMlClient()
input_config = {
'bigquery_source': {
'input_uri': input_uri,
},
}
response = client.import_data(
dataset_path,
input_config,
retry or google.api_core.gapic_v1.method.DEFAULT,
timeout or google.api_core.gapic_v1.method.DEFAULT,
metadata,
)
result = response.result()
print(result)
metadata = response.metadata
print(metadata)
return (dataset_path)
if __name__ == '__main__':
from kfp.components import create_component_from_func
automl_import_data_from_bigquery_op = create_component_from_func(
automl_import_data_from_bigquery,
output_component_file='component.yaml',
base_image='python:3.7',
annotations={
"author": "Alexey Volkov <[email protected]>",
"canonical_location": "https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/gcp/automl/import_data_from_bigquery/component.yaml",
},
)
| 677 |
0 | kubeflow_public_repos/pipelines/components/contrib/google-cloud/automl | kubeflow_public_repos/pipelines/components/contrib/google-cloud/automl/import_data_from_bigquery/component.yaml | name: Automl import data from bigquery
inputs:
- name: dataset_path
- name: input_uri
type: String
- name: retry
optional: true
- name: timeout
optional: true
- name: metadata
type: JsonObject
optional: true
outputs:
- name: dataset_path
type: String
metadata:
annotations:
author: Alexey Volkov <[email protected]>
canonical_location: 'https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/gcp/automl/import_data_from_bigquery/component.yaml'
implementation:
container:
image: python:3.7
command:
- python3
- -u
- -c
- |
from typing import NamedTuple
def automl_import_data_from_bigquery(
dataset_path,
input_uri: str,
retry=None, #=google.api_core.gapic_v1.method.DEFAULT,
timeout=None, #=google.api_core.gapic_v1.method.DEFAULT,
metadata: dict = None,
) -> NamedTuple('Outputs', [('dataset_path', str)]):
import sys
import subprocess
subprocess.run([sys.executable, '-m', 'pip', 'install', 'google-cloud-automl==0.4.0', '--quiet', '--no-warn-script-location'], env={'PIP_DISABLE_PIP_VERSION_CHECK': '1'}, check=True)
import google
from google.cloud import automl
client = automl.AutoMlClient()
input_config = {
'bigquery_source': {
'input_uri': input_uri,
},
}
response = client.import_data(
dataset_path,
input_config,
retry or google.api_core.gapic_v1.method.DEFAULT,
timeout or google.api_core.gapic_v1.method.DEFAULT,
metadata,
)
result = response.result()
print(result)
metadata = response.metadata
print(metadata)
return (dataset_path)
import json
import argparse
_missing_arg = object()
_parser = argparse.ArgumentParser(prog='Automl import data from bigquery', description='')
_parser.add_argument("--dataset-path", dest="dataset_path", type=str, required=True, default=_missing_arg)
_parser.add_argument("--input-uri", dest="input_uri", type=str, required=True, default=_missing_arg)
_parser.add_argument("--retry", dest="retry", type=str, required=False, default=_missing_arg)
_parser.add_argument("--timeout", dest="timeout", type=str, required=False, default=_missing_arg)
_parser.add_argument("--metadata", dest="metadata", type=json.loads, required=False, default=_missing_arg)
_parser.add_argument("----output-paths", dest="_output_paths", type=str, nargs=1)
_parsed_args = {k: v for k, v in vars(_parser.parse_args()).items() if v is not _missing_arg}
_output_files = _parsed_args.pop("_output_paths", [])
_outputs = automl_import_data_from_bigquery(**_parsed_args)
if not hasattr(_outputs, '__getitem__') or isinstance(_outputs, str):
_outputs = [_outputs]
import os
for idx, output_file in enumerate(_output_files):
try:
os.makedirs(os.path.dirname(output_file))
except OSError:
pass
with open(output_file, 'w') as f:
f.write(str(_outputs[idx]))
args:
- --dataset-path
- inputValue: dataset_path
- --input-uri
- inputValue: input_uri
- if:
cond:
isPresent: retry
then:
- --retry
- inputValue: retry
- if:
cond:
isPresent: timeout
then:
- --timeout
- inputValue: timeout
- if:
cond:
isPresent: metadata
then:
- --metadata
- inputValue: metadata
- '----output-paths'
- outputPath: dataset_path
| 678 |
0 | kubeflow_public_repos/pipelines/components/contrib/google-cloud/automl | kubeflow_public_repos/pipelines/components/contrib/google-cloud/automl/export_data_to_gcs/component.py | # Copyright 2019 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import NamedTuple
def automl_export_data_to_gcs(
dataset_path: str,
gcs_output_uri_prefix: str = None,
#retry=None, #=google.api_core.gapic_v1.method.DEFAULT,
timeout: float = None, #=google.api_core.gapic_v1.method.DEFAULT,
metadata: dict = {},
) -> NamedTuple('Outputs', [('gcs_output_uri_prefix', str)]):
"""Exports dataset data to GCS."""
import sys
import subprocess
subprocess.run([sys.executable, "-m", "pip", "install", "google-cloud-automl==0.4.0", "--quiet", "--no-warn-script-location"], env={"PIP_DISABLE_PIP_VERSION_CHECK": "1"}, check=True)
import google
from google.cloud import automl
client = automl.AutoMlClient()
output_config = {"gcs_destination": {"output_uri_prefix": gcs_output_uri_prefix}}
response = client.export_data(
name=dataset_path,
output_config=output_config,
#retry=retry or google.api_core.gapic_v1.method.DEFAULT
timeout=timeout or google.api_core.gapic_v1.method.DEFAULT,
metadata=metadata,
)
print('Operation started:')
print(response.operation)
result = response.result()
metadata = response.metadata
print('Operation finished:')
print(metadata)
return (gcs_output_uri_prefix, )
if __name__ == '__main__':
from kfp.components import create_component_from_func
automl_export_data_to_gcs_op = create_component_from_func(
automl_export_data_to_gcs,
output_component_file='component.yaml',base_image='python:3.7',
annotations={
"author": "Alexey Volkov <[email protected]>",
"canonical_location": "https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/gcp/automl/export_data_to_gcs/component.yaml",
},
)
| 679 |
0 | kubeflow_public_repos/pipelines/components/contrib/google-cloud/automl | kubeflow_public_repos/pipelines/components/contrib/google-cloud/automl/export_data_to_gcs/component.yaml | name: Automl export data to gcs
description: |
Exports dataset data to GCS.
inputs:
- name: dataset_path
type: String
- name: gcs_output_uri_prefix
optional: true
type: String
- name: timeout
optional: true
type: Float
- default: '{}'
name: metadata
optional: true
type: JsonObject
outputs:
- name: gcs_output_uri_prefix
type: String
metadata:
annotations:
author: Alexey Volkov <[email protected]>
canonical_location: 'https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/gcp/automl/export_data_to_gcs/component.yaml'
implementation:
container:
image: python:3.7
command:
- python3
- -u
- -c
- |
from typing import NamedTuple
def automl_export_data_to_gcs(
dataset_path: str,
gcs_output_uri_prefix: str = None,
#retry=None, #=google.api_core.gapic_v1.method.DEFAULT,
timeout: float = None, #=google.api_core.gapic_v1.method.DEFAULT,
metadata: dict = {},
) -> NamedTuple('Outputs', [('gcs_output_uri_prefix', str)]):
"""Exports dataset data to GCS."""
import sys
import subprocess
subprocess.run([sys.executable, "-m", "pip", "install", "google-cloud-automl==0.4.0", "--quiet", "--no-warn-script-location"], env={"PIP_DISABLE_PIP_VERSION_CHECK": "1"}, check=True)
import google
from google.cloud import automl
client = automl.AutoMlClient()
output_config = {"gcs_destination": {"output_uri_prefix": gcs_output_uri_prefix}}
response = client.export_data(
name=dataset_path,
output_config=output_config,
#retry=retry or google.api_core.gapic_v1.method.DEFAULT
timeout=timeout or google.api_core.gapic_v1.method.DEFAULT,
metadata=metadata,
)
print('Operation started:')
print(response.operation)
result = response.result()
metadata = response.metadata
print('Operation finished:')
print(metadata)
return (gcs_output_uri_prefix, )
import json
import argparse
_parser = argparse.ArgumentParser(prog='Automl export data to gcs', description='Exports dataset data to GCS.\n')
_parser.add_argument("--dataset-path", dest="dataset_path", type=str, required=True, default=argparse.SUPPRESS)
_parser.add_argument("--gcs-output-uri-prefix", dest="gcs_output_uri_prefix", type=str, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--timeout", dest="timeout", type=float, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--metadata", dest="metadata", type=json.loads, required=False, default=argparse.SUPPRESS)
_parser.add_argument("----output-paths", dest="_output_paths", type=str, nargs=1)
_parsed_args = vars(_parser.parse_args())
_output_files = _parsed_args.pop("_output_paths", [])
_outputs = automl_export_data_to_gcs(**_parsed_args)
if not hasattr(_outputs, '__getitem__') or isinstance(_outputs, str):
_outputs = [_outputs]
_output_serializers = [
str
]
import os
for idx, output_file in enumerate(_output_files):
try:
os.makedirs(os.path.dirname(output_file))
except OSError:
pass
with open(output_file, 'w') as f:
f.write(_output_serializers[idx](_outputs[idx]))
args:
- --dataset-path
- inputValue: dataset_path
- if:
cond:
isPresent: gcs_output_uri_prefix
then:
- --gcs-output-uri-prefix
- inputValue: gcs_output_uri_prefix
- if:
cond:
isPresent: timeout
then:
- --timeout
- inputValue: timeout
- if:
cond:
isPresent: metadata
then:
- --metadata
- inputValue: metadata
- '----output-paths'
- outputPath: gcs_output_uri_prefix
| 680 |
0 | kubeflow_public_repos/pipelines/components/contrib/google-cloud/automl/Tables/Create_dataset | kubeflow_public_repos/pipelines/components/contrib/google-cloud/automl/Tables/Create_dataset/from_CSV/component.py | from typing import NamedTuple
from kfp.components import create_component_from_func, InputPath, OutputPath
def automl_create_tables_dataset_from_csv(
data_path: InputPath('CSV'),
target_column_name: str = None,
column_nullability: dict = {},
column_types: dict = {},
gcs_staging_uri: str = None, # Currently AutoML Tables only supports regional buckets in "us-central1".
gcp_project_id: str = None,
gcp_region: str = 'us-central1', # Currently "us-central1" is the only region supported by AutoML tables.
) -> NamedTuple('Outputs', [
('dataset_name', str),
('dataset_url', 'URI'),
]):
'''Creates Google Cloud AutoML Tables Dataset from CSV data.
Annotations:
author: Alexey Volkov <[email protected]>
Args:
data_path: Data in CSV format that will be imported to the dataset.
target_column_name: Name of the target column for training.
column_nullability: Maps column name to boolean specifying whether the column should be marked as nullable.
column_types: Maps column name to column type. Supported types: FLOAT64, CATEGORY, STRING.
gcs_staging_uri: URI of the data staging location in Google Cloud Storage. The bucket must have the us-central1 region. If not specified, a new staging bucket will be created.
gcp_project_id: Google Cloud project ID. If not set, the default one will be used.
gcp_region: Google Cloud region. AutoML Tables only supports us-central1.
Returns:
dataset_name: AutoML dataset name (fully-qualified)
'''
import logging
import random
import google.auth
from google.cloud import automl_v1beta1 as automl
from google.cloud import storage
logging.getLogger().setLevel(logging.INFO)
# Validating and inferring the arguments
if not gcp_project_id:
_, gcp_project_id = google.auth.default()
if not gcp_region:
gcp_region = 'us-central1'
if gcp_region != 'us-central1':
logging.warn('AutoML only supports the us-central1 region')
dataset_display_name = 'Dataset' # Allowed characters for displayName are ASCII Latin letters A-Z and a-z, an underscore (_), and ASCII digits 0-9
column_nullability = column_nullability or {}
for name, nullability in column_nullability.items():
assert isinstance(name, str)
assert isinstance(nullability, bool)
column_types = column_types or {}
for name, data_type in column_types.items():
assert isinstance(name, str)
if not hasattr(automl.TypeCode, data_type):
supported_types = [type_name for type_name in dir(automl.TypeCode) if type_name[0] != '_']
raise ValueError(f'Unknow column type "{data_type}". Supported types: {supported_types}')
# Generating execution ID for data staging
random_integer = random.SystemRandom().getrandbits(256)
execution_id = '{:064x}'.format(random_integer)
logging.info(f'Execution ID: {execution_id}')
logging.info('Uploading the data to storage')
# TODO: Split table into < 100MB chunks as required by AutoML Tables
storage_client = storage.Client()
if gcs_staging_uri:
if not gcs_staging_uri.startswith('gs://'):
raise ValueError(f"Invalid staging storage URI: {gcs_staging_uri}")
(bucket_name, blob_prefix) = gcs_staging_uri[5:].split('/', 1)
bucket = storage_client.get_bucket(bucket_name)
else:
bucket_name = gcp_project_id + '_staging_' + gcp_region
try:
bucket = storage_client.get_bucket(bucket_name)
except Exception as ex:
logging.info(f'Creating Storage bucket {bucket_name}')
bucket = storage_client.create_bucket(
bucket_or_name=bucket_name,
project=gcp_project_id,
location=gcp_region,
)
logging.info(f'Created Storage bucket {bucket.name}')
blob_prefix = 'google.cloud.automl_tmp'
# AutoML Tables import data requires that "the file name must have a (case-insensitive) '.CSV' file extension"
training_data_blob_name = blob_prefix.rstrip('/') + '/' + execution_id + '/' + 'training_data.csv'
training_data_blob_uri = f'gs://{bucket.name}/{training_data_blob_name}'
training_data_blob = bucket.blob(training_data_blob_name)
logging.info(f'Uploading training data to {training_data_blob_uri}')
training_data_blob.upload_from_filename(data_path)
logging.info(f'Creating AutoML Tables dataset.')
automl_client = automl.AutoMlClient()
project_location_path = f'projects/{gcp_project_id}/locations/{gcp_region}'
dataset = automl.Dataset(
display_name=dataset_display_name,
tables_dataset_metadata=automl.TablesDatasetMetadata(),
# labels={},
)
dataset = automl_client.create_dataset(
dataset=dataset,
parent=project_location_path,
)
dataset_id = dataset.name.split('/')[-1]
dataset_web_url = f'https://console.cloud.google.com/automl-tables/locations/{gcp_region}/datasets/{dataset_id}'
logging.info(f'Created dataset {dataset.name}. Link: {dataset_web_url}')
logging.info(f'Importing data to the dataset: {dataset.name}.')
import_data_input_config = automl.InputConfig(
gcs_source=automl.GcsSource(
input_uris=[training_data_blob_uri],
)
)
import_data_response = automl_client.import_data(
name=dataset.name,
input_config=import_data_input_config,
)
import_data_response.result()
dataset = automl_client.get_dataset(
name=dataset.name,
)
logging.info(f'Finished importing data.')
logging.info('Updating column specs')
target_column_spec = None
primary_table_spec_name = dataset.name + '/tableSpecs/' + dataset.tables_dataset_metadata.primary_table_spec_id
table_specs_list = list(automl_client.list_table_specs(
parent=dataset.name,
))
for table_spec in table_specs_list:
table_spec_id = table_spec.name.split('/')[-1]
column_specs_list = list(automl_client.list_column_specs(
parent=table_spec.name,
))
is_primary_table = table_spec.name == primary_table_spec_name
for column_spec in column_specs_list:
if column_spec.display_name == target_column_name and is_primary_table:
target_column_spec = column_spec
column_updated = False
if column_spec.display_name in column_nullability:
column_spec.data_type.nullable = column_nullability[column_spec.display_name]
column_updated = True
if column_spec.display_name in column_types:
new_column_type = column_types[column_spec.display_name]
column_spec.data_type.type_code = getattr(automl.TypeCode, new_column_type)
column_updated = True
if column_updated:
automl_client.update_column_spec(column_spec=column_spec)
if target_column_name:
logging.info('Setting target column')
if not target_column_spec:
raise ValueError(f'Primary table does not have column "{target_column_name}"')
target_column_spec_id = target_column_spec.name.split('/')[-1]
dataset.tables_dataset_metadata.target_column_spec_id = target_column_spec_id
dataset = automl_client.update_dataset(dataset=dataset)
return (dataset.name, dataset_web_url)
if __name__ == '__main__':
automl_create_tables_dataset_from_csv_op = create_component_from_func(
automl_create_tables_dataset_from_csv,
base_image='python:3.8',
packages_to_install=['google-cloud-automl==2.0.0', 'google-cloud-storage==1.31.2', 'google-auth==1.21.3'],
output_component_file='component.yaml',
annotations={
"author": "Alexey Volkov <[email protected]>",
"canonical_location": "https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/google-cloud/AutoML/Tables/Create_dataset/from_CSV/component.yaml",
},
)
| 681 |
0 | kubeflow_public_repos/pipelines/components/contrib/google-cloud/automl/Tables/Create_dataset | kubeflow_public_repos/pipelines/components/contrib/google-cloud/automl/Tables/Create_dataset/from_CSV/component.yaml | name: Automl create tables dataset from csv
description: Creates Google Cloud AutoML Tables Dataset from CSV data.
inputs:
- {name: data, type: CSV, description: Data in CSV format that will be imported to
the dataset.}
- {name: target_column_name, type: String, description: Name of the target column
for training., optional: true}
- {name: column_nullability, type: JsonObject, description: Maps column name to boolean
specifying whether the column should be marked as nullable., default: '{}', optional: true}
- {name: column_types, type: JsonObject, description: 'Maps column name to column
type. Supported types: FLOAT64, CATEGORY, STRING.', default: '{}', optional: true}
- {name: gcs_staging_uri, type: String, description: 'URI of the data staging location
in Google Cloud Storage. The bucket must have the us-central1 region. If not specified,
a new staging bucket will be created.', optional: true}
- {name: gcp_project_id, type: String, description: 'Google Cloud project ID. If not
set, the default one will be used.', optional: true}
- {name: gcp_region, type: String, description: Google Cloud region. AutoML Tables
only supports us-central1., default: us-central1, optional: true}
outputs:
- {name: dataset_name, type: String}
- {name: dataset_url, type: URI}
metadata:
annotations:
author: Alexey Volkov <[email protected]>
canonical_location: 'https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/google-cloud/AutoML/Tables/Create_dataset/from_CSV/component.yaml'
implementation:
container:
image: python:3.8
command:
- sh
- -c
- (PIP_DISABLE_PIP_VERSION_CHECK=1 python3 -m pip install --quiet --no-warn-script-location
'google-cloud-automl==2.0.0' 'google-cloud-storage==1.31.2' 'google-auth==1.21.3'
|| PIP_DISABLE_PIP_VERSION_CHECK=1 python3 -m pip install --quiet --no-warn-script-location
'google-cloud-automl==2.0.0' 'google-cloud-storage==1.31.2' 'google-auth==1.21.3'
--user) && "$0" "$@"
- python3
- -u
- -c
- |
def automl_create_tables_dataset_from_csv(
data_path,
target_column_name = None,
column_nullability = {},
column_types = {},
gcs_staging_uri = None, # Currently AutoML Tables only supports regional buckets in "us-central1".
gcp_project_id = None,
gcp_region = 'us-central1', # Currently "us-central1" is the only region supported by AutoML tables.
):
'''Creates Google Cloud AutoML Tables Dataset from CSV data.
Annotations:
author: Alexey Volkov <[email protected]>
Args:
data_path: Data in CSV format that will be imported to the dataset.
target_column_name: Name of the target column for training.
column_nullability: Maps column name to boolean specifying whether the column should be marked as nullable.
column_types: Maps column name to column type. Supported types: FLOAT64, CATEGORY, STRING.
gcs_staging_uri: URI of the data staging location in Google Cloud Storage. The bucket must have the us-central1 region. If not specified, a new staging bucket will be created.
gcp_project_id: Google Cloud project ID. If not set, the default one will be used.
gcp_region: Google Cloud region. AutoML Tables only supports us-central1.
Returns:
dataset_name: AutoML dataset name (fully-qualified)
'''
import logging
import random
import google.auth
from google.cloud import automl_v1beta1 as automl
from google.cloud import storage
logging.getLogger().setLevel(logging.INFO)
# Validating and inferring the arguments
if not gcp_project_id:
_, gcp_project_id = google.auth.default()
if not gcp_region:
gcp_region = 'us-central1'
if gcp_region != 'us-central1':
logging.warn('AutoML only supports the us-central1 region')
dataset_display_name = 'Dataset' # Allowed characters for displayName are ASCII Latin letters A-Z and a-z, an underscore (_), and ASCII digits 0-9
column_nullability = column_nullability or {}
for name, nullability in column_nullability.items():
assert isinstance(name, str)
assert isinstance(nullability, bool)
column_types = column_types or {}
for name, data_type in column_types.items():
assert isinstance(name, str)
if not hasattr(automl.TypeCode, data_type):
supported_types = [type_name for type_name in dir(automl.TypeCode) if type_name[0] != '_']
raise ValueError(f'Unknow column type "{data_type}". Supported types: {supported_types}')
# Generating execution ID for data staging
random_integer = random.SystemRandom().getrandbits(256)
execution_id = '{:064x}'.format(random_integer)
logging.info(f'Execution ID: {execution_id}')
logging.info('Uploading the data to storage')
# TODO: Split table into < 100MB chunks as required by AutoML Tables
storage_client = storage.Client()
if gcs_staging_uri:
if not gcs_staging_uri.startswith('gs://'):
raise ValueError(f"Invalid staging storage URI: {gcs_staging_uri}")
(bucket_name, blob_prefix) = gcs_staging_uri[5:].split('/', 1)
bucket = storage_client.get_bucket(bucket_name)
else:
bucket_name = gcp_project_id + '_staging_' + gcp_region
try:
bucket = storage_client.get_bucket(bucket_name)
except Exception as ex:
logging.info(f'Creating Storage bucket {bucket_name}')
bucket = storage_client.create_bucket(
bucket_or_name=bucket_name,
project=gcp_project_id,
location=gcp_region,
)
logging.info(f'Created Storage bucket {bucket.name}')
blob_prefix = 'google.cloud.automl_tmp'
# AutoML Tables import data requires that "the file name must have a (case-insensitive) '.CSV' file extension"
training_data_blob_name = blob_prefix.rstrip('/') + '/' + execution_id + '/' + 'training_data.csv'
training_data_blob_uri = f'gs://{bucket.name}/{training_data_blob_name}'
training_data_blob = bucket.blob(training_data_blob_name)
logging.info(f'Uploading training data to {training_data_blob_uri}')
training_data_blob.upload_from_filename(data_path)
logging.info(f'Creating AutoML Tables dataset.')
automl_client = automl.AutoMlClient()
project_location_path = f'projects/{gcp_project_id}/locations/{gcp_region}'
dataset = automl.Dataset(
display_name=dataset_display_name,
tables_dataset_metadata=automl.TablesDatasetMetadata(),
# labels={},
)
dataset = automl_client.create_dataset(
dataset=dataset,
parent=project_location_path,
)
dataset_id = dataset.name.split('/')[-1]
dataset_web_url = f'https://console.cloud.google.com/automl-tables/locations/{gcp_region}/datasets/{dataset_id}'
logging.info(f'Created dataset {dataset.name}. Link: {dataset_web_url}')
logging.info(f'Importing data to the dataset: {dataset.name}.')
import_data_input_config = automl.InputConfig(
gcs_source=automl.GcsSource(
input_uris=[training_data_blob_uri],
)
)
import_data_response = automl_client.import_data(
name=dataset.name,
input_config=import_data_input_config,
)
import_data_response.result()
dataset = automl_client.get_dataset(
name=dataset.name,
)
logging.info(f'Finished importing data.')
logging.info('Updating column specs')
target_column_spec = None
primary_table_spec_name = dataset.name + '/tableSpecs/' + dataset.tables_dataset_metadata.primary_table_spec_id
table_specs_list = list(automl_client.list_table_specs(
parent=dataset.name,
))
for table_spec in table_specs_list:
table_spec_id = table_spec.name.split('/')[-1]
column_specs_list = list(automl_client.list_column_specs(
parent=table_spec.name,
))
is_primary_table = table_spec.name == primary_table_spec_name
for column_spec in column_specs_list:
if column_spec.display_name == target_column_name and is_primary_table:
target_column_spec = column_spec
column_updated = False
if column_spec.display_name in column_nullability:
column_spec.data_type.nullable = column_nullability[column_spec.display_name]
column_updated = True
if column_spec.display_name in column_types:
new_column_type = column_types[column_spec.display_name]
column_spec.data_type.type_code = getattr(automl.TypeCode, new_column_type)
column_updated = True
if column_updated:
automl_client.update_column_spec(column_spec=column_spec)
if target_column_name:
logging.info('Setting target column')
if not target_column_spec:
raise ValueError(f'Primary table does not have column "{target_column_name}"')
target_column_spec_id = target_column_spec.name.split('/')[-1]
dataset.tables_dataset_metadata.target_column_spec_id = target_column_spec_id
dataset = automl_client.update_dataset(dataset=dataset)
return (dataset.name, dataset_web_url)
def _serialize_str(str_value: str) -> str:
if not isinstance(str_value, str):
raise TypeError('Value "{}" has type "{}" instead of str.'.format(str(str_value), str(type(str_value))))
return str_value
import json
import argparse
_parser = argparse.ArgumentParser(prog='Automl create tables dataset from csv', description='Creates Google Cloud AutoML Tables Dataset from CSV data.')
_parser.add_argument("--data", dest="data_path", type=str, required=True, default=argparse.SUPPRESS)
_parser.add_argument("--target-column-name", dest="target_column_name", type=str, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--column-nullability", dest="column_nullability", type=json.loads, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--column-types", dest="column_types", type=json.loads, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--gcs-staging-uri", dest="gcs_staging_uri", type=str, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--gcp-project-id", dest="gcp_project_id", type=str, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--gcp-region", dest="gcp_region", type=str, required=False, default=argparse.SUPPRESS)
_parser.add_argument("----output-paths", dest="_output_paths", type=str, nargs=2)
_parsed_args = vars(_parser.parse_args())
_output_files = _parsed_args.pop("_output_paths", [])
_outputs = automl_create_tables_dataset_from_csv(**_parsed_args)
_output_serializers = [
_serialize_str,
str,
]
import os
for idx, output_file in enumerate(_output_files):
try:
os.makedirs(os.path.dirname(output_file))
except OSError:
pass
with open(output_file, 'w') as f:
f.write(_output_serializers[idx](_outputs[idx]))
args:
- --data
- {inputPath: data}
- if:
cond: {isPresent: target_column_name}
then:
- --target-column-name
- {inputValue: target_column_name}
- if:
cond: {isPresent: column_nullability}
then:
- --column-nullability
- {inputValue: column_nullability}
- if:
cond: {isPresent: column_types}
then:
- --column-types
- {inputValue: column_types}
- if:
cond: {isPresent: gcs_staging_uri}
then:
- --gcs-staging-uri
- {inputValue: gcs_staging_uri}
- if:
cond: {isPresent: gcp_project_id}
then:
- --gcp-project-id
- {inputValue: gcp_project_id}
- if:
cond: {isPresent: gcp_region}
then:
- --gcp-region
- {inputValue: gcp_region}
- '----output-paths'
- {outputPath: dataset_name}
- {outputPath: dataset_url}
| 682 |
0 | kubeflow_public_repos/pipelines/components/contrib/google-cloud/storage | kubeflow_public_repos/pipelines/components/contrib/google-cloud/storage/upload_to_explicit_uri/component.yaml | name: Upload to GCS
inputs:
- {name: Data}
- {name: GCS path, type: String}
outputs:
- {name: GCS path, type: String}
metadata:
annotations:
author: Alexey Volkov <[email protected]>
canonical_location: 'https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/google-cloud/storage/upload_to_explicit_uri/component.yaml'
implementation:
container:
image: google/cloud-sdk
command:
- sh
- -ex
- -c
- |
if [ -n "${GOOGLE_APPLICATION_CREDENTIALS}" ]; then
gcloud auth activate-service-account --key-file="${GOOGLE_APPLICATION_CREDENTIALS}"
fi
gsutil cp -r "$0" "$1"
mkdir -p "$(dirname "$2")"
echo "$1" > "$2"
- inputPath: Data
- inputValue: GCS path
- outputPath: GCS path
| 683 |
0 | kubeflow_public_repos/pipelines/components/contrib/google-cloud/storage | kubeflow_public_repos/pipelines/components/contrib/google-cloud/storage/download_dir/component.yaml | name: Download from GCS
inputs:
- {name: GCS path, type: String}
outputs:
- {name: Data}
metadata:
annotations:
author: Alexey Volkov <[email protected]>
canonical_location: 'https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/google-cloud/storage/download_dir/component.yaml'
implementation:
container:
image: google/cloud-sdk
command:
- sh
- -ex
- -c
- |
if [ -n "${GOOGLE_APPLICATION_CREDENTIALS}" ]; then
gcloud auth activate-service-account --key-file="${GOOGLE_APPLICATION_CREDENTIALS}"
fi
mkdir -p "$1"
gsutil -m cp -r "$0" "$1"
- inputValue: GCS path
- outputPath: Data
| 684 |
0 | kubeflow_public_repos/pipelines/components/contrib/google-cloud/storage | kubeflow_public_repos/pipelines/components/contrib/google-cloud/storage/download_blob/component.yaml | name: Download from GCS
inputs:
- {name: GCS path, type: String}
outputs:
- {name: Data}
metadata:
annotations:
author: Alexey Volkov <[email protected]>
canonical_location: 'https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/google-cloud/storage/download_blob/component.yaml'
implementation:
container:
image: google/cloud-sdk
command:
- sh
- -ex
- -c
- |
if [ -n "${GOOGLE_APPLICATION_CREDENTIALS}" ]; then
gcloud auth activate-service-account --key-file="${GOOGLE_APPLICATION_CREDENTIALS}"
fi
mkdir -p "$(dirname "$1")"
gsutil -m cp -r "$0" "$1"
- inputValue: GCS path
- outputPath: Data
| 685 |
0 | kubeflow_public_repos/pipelines/components/contrib/google-cloud/storage | kubeflow_public_repos/pipelines/components/contrib/google-cloud/storage/upload_to_unique_uri/component.yaml | name: Upload to GCS
description: Upload to GCS with unique URI suffix
inputs:
- {name: Data}
- {name: GCS path prefix, type: String}
outputs:
- {name: GCS path, type: String}
metadata:
annotations:
author: Alexey Volkov <[email protected]>
canonical_location: 'https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/google-cloud/storage/upload_to_unique_uri/component.yaml'
implementation:
container:
image: google/cloud-sdk
command:
- sh
- -ex
- -c
- |
if [ -n "${GOOGLE_APPLICATION_CREDENTIALS}" ]; then
gcloud auth activate-service-account --key-file="${GOOGLE_APPLICATION_CREDENTIALS}"
fi
gsutil cp -r "$0" "$1"
mkdir -p "$(dirname "$2")"
echo "$1" > "$2"
- inputPath: Data
- concat: [{inputValue: GCS path prefix}, '{{workflow.uid}}_{{pod.name}}']
- outputPath: GCS path
| 686 |
0 | kubeflow_public_repos/pipelines/components/contrib/google-cloud/storage | kubeflow_public_repos/pipelines/components/contrib/google-cloud/storage/download/component.yaml | name: Download from GCS
inputs:
- {name: GCS path, type: String}
outputs:
- {name: Data}
metadata:
annotations:
author: Alexey Volkov <[email protected]>
canonical_location: 'https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/google-cloud/storage/download/component.yaml'
implementation:
container:
image: google/cloud-sdk
command:
- bash # Pattern comparison only works in Bash
- -ex
- -c
- |
if [ -n "${GOOGLE_APPLICATION_CREDENTIALS}" ]; then
gcloud auth activate-service-account --key-file="${GOOGLE_APPLICATION_CREDENTIALS}"
fi
uri="$0"
output_path="$1"
# Checking whether the URI points to a single blob, a directory or a URI pattern
# URI points to a blob when that URI does not end with slash and listing that URI only yields the same URI
if [[ "$uri" != */ ]] && (gsutil ls "$uri" | grep --fixed-strings --line-regexp "$uri"); then
mkdir -p "$(dirname "$output_path")"
gsutil -m cp -r "$uri" "$output_path"
else
mkdir -p "$output_path" # When source path is a directory, gsutil requires the destination to also be a directory
gsutil -m rsync -r "$uri" "$output_path" # gsutil cp has different path handling than Linux cp. It always puts the source directory (name) inside the destination directory. gsutil rsync does not have that problem.
fi
- inputValue: GCS path
- outputPath: Data
| 687 |
0 | kubeflow_public_repos/pipelines/components/contrib/google-cloud/storage | kubeflow_public_repos/pipelines/components/contrib/google-cloud/storage/list/component.yaml | name: List blobs
inputs:
- {name: GCS path, type: String, description: 'GCS path for listing. For recursive listing use the "gs://bucket/path/**" syntax".'}
outputs:
- {name: Paths}
metadata:
annotations:
author: Alexey Volkov <[email protected]>
canonical_location: 'https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/google-cloud/storage/list/component.yaml'
volatile_component: 'true'
implementation:
container:
image: google/cloud-sdk
command:
- sh
- -ex
- -c
- |
if [ -n "${GOOGLE_APPLICATION_CREDENTIALS}" ]; then
gcloud auth activate-service-account --key-file="${GOOGLE_APPLICATION_CREDENTIALS}"
fi
mkdir -p "$(dirname "$1")"
gsutil ls "$0" > "$1"
- inputValue: GCS path
- outputPath: Paths
| 688 |
0 | kubeflow_public_repos/pipelines/components/contrib/google-cloud/ml_engine | kubeflow_public_repos/pipelines/components/contrib/google-cloud/ml_engine/train/README.md |
# Name
Component: Submitting an AI Platform training job as a pipeline step
# Label
AI Platform, Kubeflow
# Summary
A Kubeflow pipeline component to submit an AI Platform training job as a step in a pipeline.
# Facets
<!--Make sure the asset has data for the following facets:
Use case
Technique
Input data type
ML workflow
The data must map to the acceptable values for these facets, as documented on the “taxonomy” sheet of go/aihub-facets
https://gitlab.aihub-content-external.com/aihubbot/kfp-components/commit/fe387ab46181b5d4c7425dcb8032cb43e70411c1
--->
Use case:
Other
Technique:
Other
Input data type:
Tabular
ML workflow:
Training
# Details
## Intended use
Use this component to submit a training job to AI Platform from a Kubeflow pipeline.
## Runtime arguments
| Argument | Description | Optional | Data type | Accepted values | Default |
|:------------------|:------------------|:----------|:--------------|:-----------------|:-------------|
| project_id | The Google Cloud Platform (GCP) project ID of the job. | No | GCPProjectID | - | - |
| python_module | The name of the Python module to run after installing the training program. | Yes | String | - | None |
| package_uris | The Cloud Storage location of the packages that contain the training program and any additional dependencies. The maximum number of package URIs is 100. | Yes | List | -| None |
| region | The Compute Engine region in which the training job is run. | Yes | GCPRegion | -| us-central1 |
| args | The command line arguments to pass to the training program. | Yes | List | - | None |
| job_dir | A Cloud Storage path in which to store the training outputs and other data needed for training. This path is passed to your TensorFlow program as the command-line argument, `job-dir`. The benefit of specifying this field is that Cloud ML validates the path for use in training. | Yes | GCSPath | - | None |
| python_version | The version of Python used in training. If it is not set, the default version is 2.7. Python 3.5 is available when the runtime version is set to 1.4 and above. | Yes | String | - | None |
| runtime_version | The runtime version of AI Platform to use for training. If it is not set, AI Platform uses the default. | Yes | String | - | 1 |
| master_image_uri | The Docker image to run on the master replica. This image must be in Container Registry. | Yes | GCRPath | - | None |
| worker_image_uri | The Docker image to run on the worker replica. This image must be in Container Registry. | Yes | GCRPath |- | None |
| training_input | The input parameters to create a training job. | Yes | Dict | [TrainingInput](https://cloud.google.com/ml-engine/reference/rest/v1/projects.jobs#TrainingInput) | None |
| job_id_prefix | The prefix of the job ID that is generated. | Yes | String | - | None |
| job_id | The ID of the job to create, takes precedence over generated job id if set. | Yes | String | - | None |
| wait_interval | The number of seconds to wait between API calls to get the status of the job. | Yes | Integer | - | 30 |
## Input data schema
The component accepts two types of inputs:
* A list of Python packages from Cloud Storage.
* You can manually build a Python package and upload it to Cloud Storage by following this [guide](https://cloud.google.com/ml-engine/docs/tensorflow/packaging-trainer#manual-build).
* A Docker container from Container Registry.
* Follow this [guide](https://cloud.google.com/ml-engine/docs/using-containers) to publish and use a Docker container with this component.
## Output
| Name | Description | Type |
|:------- |:---- | :--- |
| job_id | The ID of the created job. | String |
| job_dir | The Cloud Storage path that contains the output files with the trained model. | GCSPath |
## Cautions & requirements
To use the component, you must:
* Set up a cloud environment by following this [guide](https://cloud.google.com/ml-engine/docs/tensorflow/getting-started-training-prediction#setup).
* The component can authenticate to GCP. Refer to [Authenticating Pipelines to GCP](https://www.kubeflow.org/docs/gke/authentication-pipelines/) for details.
* Grant the following access to the Kubeflow user service account:
* Read access to the Cloud Storage buckets which contain the input data, packages, or Docker images.
* Write access to the Cloud Storage bucket of the output directory.
## Detailed description
The component builds the [TrainingInput](https://cloud.google.com/ml-engine/reference/rest/v1/projects.jobs#TrainingInput) payload and submits a job via the [AI Platform REST API](https://cloud.google.com/ml-engine/reference/rest/v1/projects.jobs).
The steps to use the component in a pipeline are:
1. Install the Kubeflow pipeline's SDK:
```python
%%capture --no-stderr
!pip3 install kfp --upgrade
```
2. Load the component using the Kubeflow pipeline's SDK:
```python
import kfp.components as comp
mlengine_train_op = comp.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/1.7.0-rc.3/components/gcp/ml_engine/train/component.yaml')
help(mlengine_train_op)
```
### Sample
The following sample code works in an IPython notebook or directly in Python code.
In this sample, you use the code from the [census estimator sample](https://github.com/GoogleCloudPlatform/cloudml-samples/tree/master/census/estimator) to train a model on AI Platform. To upload the code to AI Platform, package the Python code and upload it to a Cloud Storage bucket.
Note: You must have read and write permissions on the bucket that you use as the working directory.
#### Set sample parameters
```python
# Required parameters
PROJECT_ID = '<Put your project ID here>'
GCS_WORKING_DIR = 'gs://<Put your GCS path here>' # No ending slash
```
```python
# Optional parameters
EXPERIMENT_NAME = 'CLOUDML - Train'
TRAINER_GCS_PATH = GCS_WORKING_DIR + '/train/trainer.tar.gz'
OUTPUT_GCS_PATH = GCS_WORKING_DIR + '/train/output/'
```
#### Clean up the working directory
```python
%%capture --no-stderr
!gsutil rm -r $GCS_WORKING_DIR
```
#### Download the sample trainer code to a local directory
```python
%%capture --no-stderr
!wget https://github.com/GoogleCloudPlatform/cloudml-samples/archive/master.zip
!unzip master.zip
```
#### Package code and upload the package to Cloud Storage
```python
%%capture --no-stderr
%%bash -s "$TRAINER_GCS_PATH"
pushd ./cloudml-samples-master/census/estimator/
python setup.py sdist
gsutil cp dist/preprocessing-1.0.tar.gz $1
popd
rm -fr ./cloudml-samples-master/ ./master.zip ./dist
```
#### Example pipeline that uses the component
```python
import kfp.dsl as dsl
import json
@dsl.pipeline(
name='CloudML training pipeline',
description='CloudML training pipeline'
)
def pipeline(
project_id = PROJECT_ID,
python_module = 'trainer.task',
package_uris = json.dumps([TRAINER_GCS_PATH]),
region = 'us-central1',
args = json.dumps([
'--train-files', 'gs://cloud-samples-data/ml-engine/census/data/adult.data.csv',
'--eval-files', 'gs://cloud-samples-data/ml-engine/census/data/adult.test.csv',
'--train-steps', '1000',
'--eval-steps', '100',
'--verbosity', 'DEBUG'
]),
job_dir = OUTPUT_GCS_PATH,
python_version = '',
runtime_version = '1.10',
master_image_uri = '',
worker_image_uri = '',
training_input = '',
job_id_prefix = '',
job_id = '',
wait_interval = '30'):
task = mlengine_train_op(
project_id=project_id,
python_module=python_module,
package_uris=package_uris,
region=region,
args=args,
job_dir=job_dir,
python_version=python_version,
runtime_version=runtime_version,
master_image_uri=master_image_uri,
worker_image_uri=worker_image_uri,
training_input=training_input,
job_id_prefix=job_id_prefix,
job_id=job_id,
wait_interval=wait_interval)
```
#### Compile the pipeline
```python
pipeline_func = pipeline
pipeline_filename = pipeline_func.__name__ + '.zip'
import kfp.compiler as compiler
compiler.Compiler().compile(pipeline_func, pipeline_filename)
```
#### Submit the pipeline for execution
```python
#Specify values for the pipeline's arguments
arguments = {}
#Get or create an experiment
import kfp
client = kfp.Client()
experiment = client.create_experiment(EXPERIMENT_NAME)
#Submit a pipeline run
run_name = pipeline_func.__name__ + ' run'
run_result = client.run_pipeline(experiment.id, run_name, pipeline_filename, arguments)
```
#### Inspect the results
Use the following command to inspect the contents in the output directory:
```python
!gsutil ls $OUTPUT_GCS_PATH
```
## References
* [Component Python code](https://github.com/kubeflow/pipelines/blob/release-1.7/components/gcp/container/component_sdk/python/kfp_component/google/ml_engine/_train.py)
* [Component Docker file](https://github.com/kubeflow/pipelines/blob/release-1.7/components/gcp/container/Dockerfile)
* [Sample notebook](https://github.com/kubeflow/pipelines/blob/release-1.7/components/gcp/ml_engine/train/sample.ipynb)
* [AI Platform REST API - Resource: Job](https://cloud.google.com/ml-engine/reference/rest/v1/projects.jobs)
## License
By deploying or using this software you agree to comply with the [AI Hub Terms of Service](https://aihub.cloud.google.com/u/0/aihub-tos) and the [Google APIs Terms of Service](https://developers.google.com/terms/). To the extent of a direct conflict of terms, the AI Hub Terms of Service will control.
| 689 |
0 | kubeflow_public_repos/pipelines/components/contrib/google-cloud/ml_engine | kubeflow_public_repos/pipelines/components/contrib/google-cloud/ml_engine/train/sample.ipynb | import kfp.components as comp
mlengine_train_op = comp.load_component_from_url(
'https://raw.githubusercontent.com/kubeflow/pipelines/1.7.0-rc.3/components/gcp/ml_engine/train/component.yaml')
help(mlengine_train_op)# Required Parameters
PROJECT_ID = '<Please put your project ID here>'
GCS_WORKING_DIR = 'gs://<Please put your GCS path here>' # No ending slash# Optional Parameters
EXPERIMENT_NAME = 'CLOUDML - Train'
TRAINER_GCS_PATH = GCS_WORKING_DIR + '/train/trainer.tar.gz'
OUTPUT_GCS_PATH = GCS_WORKING_DIR + '/train/output/'import kfp.dsl as dsl
import json
@dsl.pipeline(
name='CloudML training pipeline',
description='CloudML training pipeline'
)
def pipeline(
project_id = PROJECT_ID,
python_module = 'trainer.task',
package_uris = json.dumps([TRAINER_GCS_PATH]),
region = 'us-central1',
args = json.dumps([
'--train-files', 'gs://cloud-samples-data/ml-engine/census/data/adult.data.csv',
'--eval-files', 'gs://cloud-samples-data/ml-engine/census/data/adult.test.csv',
'--train-steps', '1000',
'--eval-steps', '100',
'--verbosity', 'DEBUG'
]),
job_dir = OUTPUT_GCS_PATH,
python_version = '',
runtime_version = '1.10',
master_image_uri = '',
worker_image_uri = '',
training_input = '',
job_id_prefix = '',
job_id = '',
wait_interval = '30'):
task = mlengine_train_op(
project_id=project_id,
python_module=python_module,
package_uris=package_uris,
region=region,
args=args,
job_dir=job_dir,
python_version=python_version,
runtime_version=runtime_version,
master_image_uri=master_image_uri,
worker_image_uri=worker_image_uri,
training_input=training_input,
job_id_prefix=job_id_prefix,
job_id=job_id,
wait_interval=wait_interval)pipeline_func = pipeline
pipeline_filename = pipeline_func.__name__ + '.zip'
import kfp.compiler as compiler
compiler.Compiler().compile(pipeline_func, pipeline_filename)#Specify pipeline argument values
arguments = {}
#Get or create an experiment and submit a pipeline run
import kfp
client = kfp.Client()
experiment = client.create_experiment(EXPERIMENT_NAME)
#Submit a pipeline run
run_name = pipeline_func.__name__ + ' run'
run_result = client.run_pipeline(experiment.id, run_name, pipeline_filename, arguments) | 690 |
0 | kubeflow_public_repos/pipelines/components/contrib/google-cloud/ml_engine | kubeflow_public_repos/pipelines/components/contrib/google-cloud/ml_engine/train/component.yaml | # Copyright 2018 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
name: Submitting a Cloud ML training job as a pipeline step
description: |
A Kubeflow Pipeline component to submit a Cloud Machine Learning (Cloud ML)
Engine training job as a step in a pipeline.
metadata:
labels:
add-pod-env: 'true'
inputs:
- name: project_id
description: 'Required. The ID of the parent project of the job.'
type: GCPProjectID
- name: python_module
description: 'The Python module name to run after installing the packages.'
default: ''
type: String
- name: package_uris
description: >-
The Cloud Storage location of the packages (that contain the training program
and any additional dependencies). The maximum number of package URIs is 100.
default: ''
type: List
- name: region
description: 'The Compute Engine region in which the training job is run.'
default: ''
type: GCPRegion
- name: args
description: 'The command line arguments to pass to the program.'
default: ''
type: List
- name: job_dir
description: >-
A Cloud Storage path in which to store the training outputs and other data
needed for training. This path is passed to your TensorFlow program as the
`job-dir` command-line argument. The benefit of specifying this field is
that Cloud ML validates the path for use in training.
default: ''
type: GCSPath
- name: python_version
description: >-
The version of Python used in training. If not set, the default
version is `2.7`. Python `3.5` is available when runtimeVersion is set to `1.4`
and above.
default: ''
type: String
- name: runtime_version
description: >-
The Cloud ML Engine runtime version to use for training. If not set,
Cloud ML Engine uses the default stable version, 1.0.
default: ''
type: String
- name: master_image_uri
description: >-
The Docker image to run on the master replica. This image must be in
Container Registry.
default: ''
type: GCRPath
- name: worker_image_uri
description: >-
The Docker image to run on the worker replica. This image must be in
Container Registry.
default: ''
type: GCRPath
- name: training_input
description: >-
The input parameters to create a training job. It is the JSON payload
of a [TrainingInput](https://cloud.google.com/ml-engine/reference/rest/v1/projects.jobs#TrainingInput)
default: ''
type: Dict
- name: job_id_prefix
description: 'The prefix of the generated job id.'
default: ''
type: String
- name: job_id
description: >-
The ID of the job to create, takes precedence over generated
job id if set.
default: ''
type: String
- name: wait_interval
description: >-
Optional. A time-interval to wait for between calls to get the job status.
Defaults to 30.'
default: '30'
type: Integer
outputs:
- name: job_id
description: 'The ID of the created job.'
type: String
- name: job_dir
description: >-
The output path in Cloud Storage of the training job, which contains
the trained model files.
type: GCSPath
- name: MLPipeline UI metadata
type: UI metadata
implementation:
container:
image: gcr.io/ml-pipeline/ml-pipeline-gcp:1.7.0-rc.3
command: ["python", -u, -m, "kfp_component.launcher"]
args: [
--ui_metadata_path, {outputPath: MLPipeline UI metadata},
kfp_component.google.ml_engine, train,
--project_id, {inputValue: project_id},
--python_module, {inputValue: python_module},
--package_uris, {inputValue: package_uris},
--region, {inputValue: region},
--args, {inputValue: args},
--job_dir, {inputValue: job_dir},
--python_version, {inputValue: python_version},
--runtime_version, {inputValue: runtime_version},
--master_image_uri, {inputValue: master_image_uri},
--worker_image_uri, {inputValue: worker_image_uri},
--training_input, {inputValue: training_input},
--job_id_prefix, {inputValue: job_id_prefix},
--job_id, {inputValue: job_id},
--wait_interval, {inputValue: wait_interval},
--job_id_output_path, {outputPath: job_id},
--job_dir_output_path, {outputPath: job_dir},
]
env:
KFP_POD_NAME: "{{pod.name}}"
| 691 |
0 | kubeflow_public_repos/pipelines/components/contrib/google-cloud/ml_engine | kubeflow_public_repos/pipelines/components/contrib/google-cloud/ml_engine/batch_predict/README.md |
# Name
Batch prediction using Cloud Machine Learning Engine
# Label
Cloud Storage, Cloud ML Engine, Kubeflow, Pipeline, Component
# Summary
A Kubeflow Pipeline component to submit a batch prediction job against a deployed model on Cloud ML Engine.
# Details
## Intended use
Use the component to run a batch prediction job against a deployed model on Cloud ML Engine. The prediction output is stored in a Cloud Storage bucket.
## Runtime arguments
| Argument | Description | Optional | Data type | Accepted values | Default |
|--------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------|--------------|-----------------|---------|
| project_id | The ID of the Google Cloud Platform (GCP) project of the job. | No | GCPProjectID | | |
| model_path | The path to the model. It can be one of the following:<br/> <ul> <li>projects/[PROJECT_ID]/models/[MODEL_ID]</li> <li>projects/[PROJECT_ID]/models/[MODEL_ID]/versions/[VERSION_ID]</li> <li>The path to a Cloud Storage location containing a model file.</li> </ul> | No | GCSPath | | |
| input_paths | The path to the Cloud Storage location containing the input data files. It can contain wildcards, for example, `gs://foo/*.csv` | No | List | GCSPath | |
| input_data_format | The format of the input data files. See [REST Resource: projects.jobs](https://cloud.google.com/ml-engine/reference/rest/v1/projects.jobs#DataFormat) for more details. | No | String | DataFormat | |
| output_path | The path to the Cloud Storage location for the output data. | No | GCSPath | | |
| region | The Compute Engine region where the prediction job is run. | No | GCPRegion | | |
| output_data_format | The format of the output data files. See [REST Resource: projects.jobs](https://cloud.google.com/ml-engine/reference/rest/v1/projects.jobs#DataFormat) for more details. | Yes | String | DataFormat | JSON |
| prediction_input | The JSON input parameters to create a prediction job. See [PredictionInput](https://cloud.google.com/ml-engine/reference/rest/v1/projects.jobs#PredictionInput) for more information. | Yes | Dict | | None |
| job_id_prefix | The prefix of the generated job id. | Yes | String | | None |
| wait_interval | The number of seconds to wait in case the operation has a long run time. | Yes | | | 30 |
## Input data schema
The component accepts the following as input:
* A trained model: It can be a model file in Cloud Storage, a deployed model, or a version in Cloud ML Engine. Specify the path to the model in the `model_path `runtime argument.
* Input data: The data used to make predictions against the trained model. The data can be in [multiple formats](https://cloud.google.com/ml-engine/reference/rest/v1/projects.jobs#DataFormat). The data path is specified by `input_paths` and the format is specified by `input_data_format`.
## Output
Name | Description | Type
:--- | :---------- | :---
job_id | The ID of the created batch job. | String
## Cautions & requirements
To use the component, you must:
* Set up a cloud environment by following this [guide](https://cloud.google.com/ml-engine/docs/tensorflow/getting-started-training-prediction#setup).
* The component can authenticate to GCP. Refer to [Authenticating Pipelines to GCP](https://www.kubeflow.org/docs/gke/authentication-pipelines/) for details.
* Grant the following types of access to the Kubeflow user service account:
* Read access to the Cloud Storage buckets which contains the input data.
* Write access to the Cloud Storage bucket of the output directory.
## Detailed description
Follow these steps to use the component in a pipeline:
1. Install the Kubeflow Pipeline SDK:
```python
%%capture --no-stderr
!pip3 install kfp --upgrade
```
2. Load the component using KFP SDK
```python
import kfp.components as comp
mlengine_batch_predict_op = comp.load_component_from_url(
'https://raw.githubusercontent.com/kubeflow/pipelines/1.7.0-rc.3/components/gcp/ml_engine/batch_predict/component.yaml')
help(mlengine_batch_predict_op)
```
### Sample Code
Note: The following sample code works in an IPython notebook or directly in Python code.
In this sample, you batch predict against a pre-built trained model from `gs://ml-pipeline-playground/samples/ml_engine/census/trained_model/` and use the test data from `gs://ml-pipeline-playground/samples/ml_engine/census/test.json`.
#### Inspect the test data
```python
!gsutil cat gs://ml-pipeline-playground/samples/ml_engine/census/test.json
```
#### Set sample parameters
```python
# Required Parameters
PROJECT_ID = '<Please put your project ID here>'
GCS_WORKING_DIR = 'gs://<Please put your GCS path here>' # No ending slash
```
```python
# Optional Parameters
EXPERIMENT_NAME = 'CLOUDML - Batch Predict'
OUTPUT_GCS_PATH = GCS_WORKING_DIR + '/batch_predict/output/'
```
#### Example pipeline that uses the component
```python
import kfp.dsl as dsl
import json
@dsl.pipeline(
name='CloudML batch predict pipeline',
description='CloudML batch predict pipeline'
)
def pipeline(
project_id = PROJECT_ID,
model_path = 'gs://ml-pipeline-playground/samples/ml_engine/census/trained_model/',
input_paths = '["gs://ml-pipeline-playground/samples/ml_engine/census/test.json"]',
input_data_format = 'JSON',
output_path = OUTPUT_GCS_PATH,
region = 'us-central1',
output_data_format='',
prediction_input = json.dumps({
'runtimeVersion': '1.10'
}),
job_id_prefix='',
wait_interval='30'):
mlengine_batch_predict_op(
project_id=project_id,
model_path=model_path,
input_paths=input_paths,
input_data_format=input_data_format,
output_path=output_path,
region=region,
output_data_format=output_data_format,
prediction_input=prediction_input,
job_id_prefix=job_id_prefix,
wait_interval=wait_interval)
```
#### Compile the pipeline
```python
pipeline_func = pipeline
pipeline_filename = pipeline_func.__name__ + '.zip'
import kfp.compiler as compiler
compiler.Compiler().compile(pipeline_func, pipeline_filename)
```
#### Submit the pipeline for execution
```python
#Specify pipeline argument values
arguments = {}
#Get or create an experiment and submit a pipeline run
import kfp
client = kfp.Client()
experiment = client.create_experiment(EXPERIMENT_NAME)
#Submit a pipeline run
run_name = pipeline_func.__name__ + ' run'
run_result = client.run_pipeline(experiment.id, run_name, pipeline_filename, arguments)
```
#### Inspect prediction results
```python
OUTPUT_FILES_PATTERN = OUTPUT_GCS_PATH + '*'
!gsutil cat OUTPUT_FILES_PATTERN
```
## References
* [Component python code](https://github.com/kubeflow/pipelines/blob/release-1.7/components/gcp/container/component_sdk/python/kfp_component/google/ml_engine/_batch_predict.py)
* [Component docker file](https://github.com/kubeflow/pipelines/blob/release-1.7/components/gcp/container/Dockerfile)
* [Sample notebook](https://github.com/kubeflow/pipelines/blob/release-1.7/components/gcp/ml_engine/batch_predict/sample.ipynb)
* [Cloud Machine Learning Engine job REST API](https://cloud.google.com/ml-engine/reference/rest/v1/projects.jobs)
## License
By deploying or using this software you agree to comply with the [AI Hub Terms of Service](https://aihub.cloud.google.com/u/0/aihub-tos) and the [Google APIs Terms of Service](https://developers.google.com/terms/). To the extent of a direct conflict of terms, the AI Hub Terms of Service will control.
| 692 |
0 | kubeflow_public_repos/pipelines/components/contrib/google-cloud/ml_engine | kubeflow_public_repos/pipelines/components/contrib/google-cloud/ml_engine/batch_predict/sample.ipynb | import kfp.components as comp
mlengine_batch_predict_op = comp.load_component_from_url(
'https://raw.githubusercontent.com/kubeflow/pipelines/1.7.0-rc.3/components/gcp/ml_engine/batch_predict/component.yaml')
help(mlengine_batch_predict_op)# Required Parameters
PROJECT_ID = '<Please put your project ID here>'
GCS_WORKING_DIR = 'gs://<Please put your GCS path here>' # No ending slash# Optional Parameters
EXPERIMENT_NAME = 'CLOUDML - Batch Predict'
OUTPUT_GCS_PATH = GCS_WORKING_DIR + '/batch_predict/output/'import kfp.dsl as dsl
import json
@dsl.pipeline(
name='CloudML batch predict pipeline',
description='CloudML batch predict pipeline'
)
def pipeline(
project_id = PROJECT_ID,
model_path = 'gs://ml-pipeline-playground/samples/ml_engine/census/trained_model/',
input_paths = '["gs://ml-pipeline-playground/samples/ml_engine/census/test.json"]',
input_data_format = 'JSON',
output_path = OUTPUT_GCS_PATH,
region = 'us-central1',
output_data_format='',
prediction_input = json.dumps({
'runtimeVersion': '1.10'
}),
job_id_prefix='',
wait_interval='30'):
mlengine_batch_predict_op(
project_id=project_id,
model_path=model_path,
input_paths=input_paths,
input_data_format=input_data_format,
output_path=output_path,
region=region,
output_data_format=output_data_format,
prediction_input=prediction_input,
job_id_prefix=job_id_prefix,
wait_interval=wait_interval)pipeline_func = pipeline
pipeline_filename = pipeline_func.__name__ + '.zip'
import kfp.compiler as compiler
compiler.Compiler().compile(pipeline_func, pipeline_filename)#Specify pipeline argument values
arguments = {}
#Get or create an experiment and submit a pipeline run
import kfp
client = kfp.Client()
experiment = client.create_experiment(EXPERIMENT_NAME)
#Submit a pipeline run
run_name = pipeline_func.__name__ + ' run'
run_result = client.run_pipeline(experiment.id, run_name, pipeline_filename, arguments)OUTPUT_FILES_PATTERN = OUTPUT_GCS_PATH + '*'
!gsutil cat OUTPUT_FILES_PATTERN | 693 |
0 | kubeflow_public_repos/pipelines/components/contrib/google-cloud/ml_engine | kubeflow_public_repos/pipelines/components/contrib/google-cloud/ml_engine/batch_predict/component.yaml | # Copyright 2018 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
name: Batch predict against a model with Cloud ML Engine
description: |
Creates a MLEngine batch prediction job.
metadata:
labels:
add-pod-env: 'true'
inputs:
- name: project_id
description: 'Required. The ID of the parent project of the job.'
type: GCPProjectID
- name: model_path
description: >-
The path to the model. It can be either: `projects/[PROJECT_ID]/models/[MODEL_ID]`
or `projects/[PROJECT_ID]/models/[MODEL_ID]/versions/[VERSION_ID]` or a GCS path
of a model file.
type: String
- name: input_paths
description: >-
Required. The Google Cloud Storage location of the input data files. May contain
wildcards.
type: List
- name: input_data_format
description: >-
Required. The format of the input data files. See
https://cloud.google.com/ml-engine/reference/rest/v1/projects.jobs#DataFormat.
type: String
- name: output_path
description: 'Required. The output Google Cloud Storage location.'
type: GCSPath
- name: region
description: >-
Required. The Google Compute Engine region to run the prediction job in.
type: GCPRegion
- name: output_data_format
description: 'Optional. Format of the output data files, defaults to JSON.'
default: ''
type: String
- name: prediction_input
description: 'Input parameters to create a prediction job.'
default: ''
type: Dict
- name: job_id_prefix
description: 'The prefix of the generated job id.'
default: ''
type: String
- name: wait_interval
description: 'Optional wait interval between calls to get job status. Defaults to 30.'
default: '30'
type: Integer
outputs:
- name: job_id
description: 'The ID of the created job.'
type: String
- name: MLPipeline UI metadata
type: UI metadata
implementation:
container:
image: gcr.io/ml-pipeline/ml-pipeline-gcp:1.7.0-rc.3
command: ['python', '-u', '-m', 'kfp_component.launcher']
args: [
--ui_metadata_path, {outputPath: MLPipeline UI metadata},
kfp_component.google.ml_engine, batch_predict,
--project_id, {inputValue: project_id},
--model_path, {inputValue: model_path},
--input_paths, {inputValue: input_paths},
--input_data_format, {inputValue: input_data_format},
--output_path, {inputValue: output_path},
--region, {inputValue: region},
--output_data_format, {inputValue: output_data_format},
--prediction_input, {inputValue: prediction_input},
--job_id_prefix, {inputValue: job_id_prefix},
--wait_interval, {inputValue: wait_interval},
--job_id_output_path, {outputPath: job_id},
]
env:
KFP_POD_NAME: "{{pod.name}}"
| 694 |
0 | kubeflow_public_repos/pipelines/components/contrib/google-cloud/ml_engine | kubeflow_public_repos/pipelines/components/contrib/google-cloud/ml_engine/deploy/README.md |
# Name
Deploying a trained model to Cloud Machine Learning Engine
# Label
Cloud Storage, Cloud ML Engine, Kubeflow, Pipeline
# Summary
A Kubeflow Pipeline component to deploy a trained model from a Cloud Storage location to Cloud ML Engine.
# Details
## Intended use
Use the component to deploy a trained model to Cloud ML Engine. The deployed model can serve online or batch predictions in a Kubeflow Pipeline.
## Runtime arguments
| Argument | Description | Optional | Data type | Accepted values | Default |
|--------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------|--------------|-----------------|---------|
| model_uri | The URI of a Cloud Storage directory that contains a trained model file.<br/> Or <br/> An [Estimator export base directory](https://www.tensorflow.org/guide/saved_model#perform_the_export) that contains a list of subdirectories named by timestamp. The directory with the latest timestamp is used to load the trained model file. | No | GCSPath | | |
| project_id | The ID of the Google Cloud Platform (GCP) project of the serving model. | No | GCPProjectID | | |
| model_id | The name of the trained model. | Yes | String | | None |
| version_id | The name of the version of the model. If it is not provided, the operation uses a random name. | Yes | String | | None |
| runtime_version | The Cloud ML Engine runtime version to use for this deployment. If it is not provided, the default stable version, 1.0, is used. | Yes | String | | None |
| python_version | The version of Python used in the prediction. If it is not provided, version 2.7 is used. You can use Python 3.5 if runtime_version is set to 1.4 or above. Python 2.7 works with all supported runtime versions. | Yes | String | | 2.7 |
| model | The JSON payload of the new [model](https://cloud.google.com/ml-engine/reference/rest/v1/projects.models). | Yes | Dict | | None |
| version | The new [version](https://cloud.google.com/ml-engine/reference/rest/v1/projects.models.versions) of the trained model. | Yes | Dict | | None |
| replace_existing_version | Indicates whether to replace the existing version in case of a conflict (if the same version number is found.) | Yes | Boolean | | FALSE |
| set_default | Indicates whether to set the new version as the default version in the model. | Yes | Boolean | | FALSE |
| wait_interval | The number of seconds to wait in case the operation has a long run time. | Yes | Integer | | 30 |
## Input data schema
The component looks for a trained model in the location specified by the `model_uri` runtime argument. The accepted trained models are:
* [Tensorflow SavedModel](https://cloud.google.com/ml-engine/docs/tensorflow/exporting-for-prediction)
* [Scikit-learn & XGBoost model](https://cloud.google.com/ml-engine/docs/scikit/exporting-for-prediction)
The accepted file formats are:
* *.pb
* *.pbtext
* model.bst
* model.joblib
* model.pkl
`model_uri` can also be an [Estimator export base directory, ](https://www.tensorflow.org/guide/saved_model#perform_the_export)which contains a list of subdirectories named by timestamp. The directory with the latest timestamp is used to load the trained model file.
## Output
Name | Description | Type
:--- | :---------- | :---
| model_uri | The Cloud Storage URI of the trained model. | GCSPath |
| model_name | The name of the deployed model. | String |
| version_name | The name of the deployed version. | String |
## Cautions & requirements
To use the component, you must:
* [Set up the cloud environment](https://cloud.google.com/ml-engine/docs/tensorflow/getting-started-training-prediction#setup).
* The component can authenticate to GCP. Refer to [Authenticating Pipelines to GCP](https://www.kubeflow.org/docs/gke/authentication-pipelines/) for details.
* Grant read access to the Cloud Storage bucket that contains the trained model to the Kubeflow user service account.
## Detailed description
Use the component to:
* Locate the trained model at the Cloud Storage location you specify.
* Create a new model if a model provided by you doesn’t exist.
* Delete the existing model version if `replace_existing_version` is enabled.
* Create a new version of the model from the trained model.
* Set the new version as the default version of the model if `set_default` is enabled.
Follow these steps to use the component in a pipeline:
1. Install the Kubeflow Pipeline SDK:
```python
%%capture --no-stderr
!pip3 install kfp --upgrade
```
2. Load the component using KFP SDK
```python
import kfp.components as comp
mlengine_deploy_op = comp.load_component_from_url(
'https://raw.githubusercontent.com/kubeflow/pipelines/1.7.0-rc.3/components/gcp/ml_engine/deploy/component.yaml')
help(mlengine_deploy_op)
```
### Sample
Note: The following sample code works in IPython notebook or directly in Python code.
In this sample, you deploy a pre-built trained model from `gs://ml-pipeline-playground/samples/ml_engine/census/trained_model/` to Cloud ML Engine. The deployed model is `kfp_sample_model`. A new version is created every time the sample is run, and the latest version is set as the default version of the deployed model.
#### Set sample parameters
```python
# Required Parameters
PROJECT_ID = '<Please put your project ID here>'
# Optional Parameters
EXPERIMENT_NAME = 'CLOUDML - Deploy'
TRAINED_MODEL_PATH = 'gs://ml-pipeline-playground/samples/ml_engine/census/trained_model/'
```
#### Example pipeline that uses the component
```python
import kfp.dsl as dsl
import json
@dsl.pipeline(
name='CloudML deploy pipeline',
description='CloudML deploy pipeline'
)
def pipeline(
model_uri = 'gs://ml-pipeline-playground/samples/ml_engine/census/trained_model/',
project_id = PROJECT_ID,
model_id = 'kfp_sample_model',
version_id = '',
runtime_version = '1.10',
python_version = '',
version = {},
replace_existing_version = 'False',
set_default = 'True',
wait_interval = '30'):
task = mlengine_deploy_op(
model_uri=model_uri,
project_id=project_id,
model_id=model_id,
version_id=version_id,
runtime_version=runtime_version,
python_version=python_version,
version=version,
replace_existing_version=replace_existing_version,
set_default=set_default,
wait_interval=wait_interval)
```
#### Compile the pipeline
```python
pipeline_func = pipeline
pipeline_filename = pipeline_func.__name__ + '.zip'
import kfp.compiler as compiler
compiler.Compiler().compile(pipeline_func, pipeline_filename)
```
#### Submit the pipeline for execution
```python
#Specify pipeline argument values
arguments = {}
#Get or create an experiment and submit a pipeline run
import kfp
client = kfp.Client()
experiment = client.create_experiment(EXPERIMENT_NAME)
#Submit a pipeline run
run_name = pipeline_func.__name__ + ' run'
run_result = client.run_pipeline(experiment.id, run_name, pipeline_filename, arguments)
```
## References
* [Component python code](https://github.com/kubeflow/pipelines/blob/release-1.7/components/gcp/container/component_sdk/python/kfp_component/google/ml_engine/_deploy.py)
* [Component docker file](https://github.com/kubeflow/pipelines/blob/release-1.7/components/gcp/container/Dockerfile)
* [Sample notebook](https://github.com/kubeflow/pipelines/blob/release-1.7/components/gcp/ml_engine/deploy/sample.ipynb)
* [Cloud Machine Learning Engine Model REST API](https://cloud.google.com/ml-engine/reference/rest/v1/projects.models)
* [Cloud Machine Learning Engine Version REST API](https://cloud.google.com/ml-engine/reference/rest/v1/projects.versions)
## License
By deploying or using this software you agree to comply with the [AI Hub Terms of Service](https://aihub.cloud.google.com/u/0/aihub-tos) and the [Google APIs Terms of Service](https://developers.google.com/terms/). To the extent of a direct conflict of terms, the AI Hub Terms of Service will control.
| 695 |
0 | kubeflow_public_repos/pipelines/components/contrib/google-cloud/ml_engine | kubeflow_public_repos/pipelines/components/contrib/google-cloud/ml_engine/deploy/sample.ipynb | import kfp.components as comp
mlengine_deploy_op = comp.load_component_from_url(
'https://raw.githubusercontent.com/kubeflow/pipelines/1.7.0-rc.3/components/gcp/ml_engine/deploy/component.yaml')
help(mlengine_deploy_op)# Required Parameters
PROJECT_ID = '<Please put your project ID here>'
# Optional Parameters
EXPERIMENT_NAME = 'CLOUDML - Deploy'
TRAINED_MODEL_PATH = 'gs://ml-pipeline-playground/samples/ml_engine/census/trained_model/'import kfp.dsl as dsl
import json
@dsl.pipeline(
name='CloudML deploy pipeline',
description='CloudML deploy pipeline'
)
def pipeline(
model_uri = 'gs://ml-pipeline-playground/samples/ml_engine/census/trained_model/',
project_id = PROJECT_ID,
model_id = 'kfp_sample_model',
version_id = '',
runtime_version = '1.10',
python_version = '',
version = {},
replace_existing_version = 'False',
set_default = 'True',
wait_interval = '30'):
task = mlengine_deploy_op(
model_uri=model_uri,
project_id=project_id,
model_id=model_id,
version_id=version_id,
runtime_version=runtime_version,
python_version=python_version,
version=version,
replace_existing_version=replace_existing_version,
set_default=set_default,
wait_interval=wait_interval)pipeline_func = pipeline
pipeline_filename = pipeline_func.__name__ + '.zip'
import kfp.compiler as compiler
compiler.Compiler().compile(pipeline_func, pipeline_filename)#Specify pipeline argument values
arguments = {}
#Get or create an experiment and submit a pipeline run
import kfp
client = kfp.Client()
experiment = client.create_experiment(EXPERIMENT_NAME)
#Submit a pipeline run
run_name = pipeline_func.__name__ + ' run'
run_result = client.run_pipeline(experiment.id, run_name, pipeline_filename, arguments) | 696 |
0 | kubeflow_public_repos/pipelines/components/contrib/google-cloud/ml_engine | kubeflow_public_repos/pipelines/components/contrib/google-cloud/ml_engine/deploy/component.yaml | # Copyright 2018 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
name: Deploying a trained model to Cloud Machine Learning Engine
description: |
A Kubeflow Pipeline component to deploy a trained model from a Cloud Storage
path to a Cloud Machine Learning Engine service.
metadata:
labels:
add-pod-env: 'true'
inputs:
- name: model_uri
description: >-
Required. The Cloud Storage URI which contains a model file. Commonly
used TF model search paths (export/exporter) will be used if they exist.
type: GCSPath
- name: project_id
description: 'Required.The ID of the parent project of the serving model.'
type: GCPProjectID
- name: model_id
description: >-
Optional. The user-specified name of the model. If it is not provided,
the operation uses a random name.
default: ''
type: String
- name: version_id
description: >-
Optional. The user-specified name of the version. If it is not provided,
the operation uses a random name.
default: ''
type: String
- name: runtime_version
description: >-
Optional. The [Cloud ML Engine runtime version](https://cloud.google.com/ml-engine/docs/tensorflow/runtime-version-list) to use for
this deployment. If it is not set, the Cloud ML Engine uses the default
stable version, 1.0.
default: ''
type: String
- name: python_version
description: >-
Optional. The version of Python used in the prediction. If it is not set,
the default version is `2.7`. Python `3.5` is available when the
runtime_version is set to `1.4` and above. Python `2.7` works with all
supported runtime versions.
default: ''
type: String
- name: model
description: >-
Optional. The JSON payload of the new
[Model](https://cloud.google.com/ml-engine/reference/rest/v1/projects.models), if it does not exist.
default: ''
type: Dict
- name: version
description: >-
Optional. The JSON payload of the new
[Version](https://cloud.google.com/ml-engine/reference/rest/v1/projects.models.versions).
default: ''
type: Dict
- name: replace_existing_version
description: >-
A Boolean flag that indicates whether to replace existing version in case of conflict.
default: 'False'
type: Bool
- name: set_default
description: >-
A Boolean flag that indicates whether to set the new version as default version in the model.
default: 'False'
type: Bool
- name: wait_interval
description: 'A time-interval to wait for in case the operation has a long run time.'
default: '30'
type: Integer
outputs:
- name: model_uri
description: 'The Cloud Storage URI of the trained model.'
type: GCSPath
- name: model_name
description: 'The name of the deployed model.'
type: String
- name: version_name
description: 'The name of the deployed version.'
type: String
- name: MLPipeline UI metadata
type: UI metadata
implementation:
container:
image: gcr.io/ml-pipeline/ml-pipeline-gcp:1.7.0-rc.3
command: ["python", -u, -m, "kfp_component.launcher"]
args: [
--ui_metadata_path, {outputPath: MLPipeline UI metadata},
kfp_component.google.ml_engine, deploy,
--model_uri, {inputValue: model_uri},
--project_id, {inputValue: project_id},
--model_id, {inputValue: model_id},
--version_id, {inputValue: version_id},
--runtime_version, {inputValue: runtime_version},
--python_version, {inputValue: python_version},
--model, {inputValue: model},
--version, {inputValue: version},
--replace_existing_version, {inputValue: replace_existing_version},
--set_default, {inputValue: set_default},
--wait_interval, {inputValue: wait_interval},
--model_uri_output_path, {outputPath: model_uri},
--model_name_output_path, {outputPath: model_name},
--version_name_output_path, {outputPath: version_name},
]
env:
KFP_POD_NAME: "{{pod.name}}"
| 697 |
0 | kubeflow_public_repos/pipelines/components/contrib/google-cloud/Optimizer | kubeflow_public_repos/pipelines/components/contrib/google-cloud/Optimizer/Suggest_trials/component.py | from typing import NamedTuple
from kfp.components import create_component_from_func
def suggest_trials_in_gcp_ai_platform_optimizer(
study_name: str,
suggestion_count: int,
gcp_project_id: str = None,
gcp_region: str = "us-central1",
) -> NamedTuple('Outputs', [
("suggested_trials", list),
]):
"""Suggests trials (parameter sets) to evaluate.
See https://cloud.google.com/ai-platform/optimizer/docs
Annotations:
author: Alexey Volkov <[email protected]>
Args:
study_name: Full resource name of the study.
suggestion_count: Number of suggestions to request.
"""
import logging
import time
import google.auth
from googleapiclient import discovery
logging.getLogger().setLevel(logging.INFO)
client_id = 'client1'
credentials, default_project_id = google.auth.default()
# Validating and inferring the arguments
if not gcp_project_id:
gcp_project_id = default_project_id
# Building the API client.
# The main API does not work, so we need to build from the published discovery document.
def create_caip_optimizer_client(project_id):
from google.cloud import storage
_OPTIMIZER_API_DOCUMENT_BUCKET = 'caip-optimizer-public'
_OPTIMIZER_API_DOCUMENT_FILE = 'api/ml_public_google_rest_v1.json'
client = storage.Client(project_id)
bucket = client.get_bucket(_OPTIMIZER_API_DOCUMENT_BUCKET)
blob = bucket.get_blob(_OPTIMIZER_API_DOCUMENT_FILE)
discovery_document = blob.download_as_bytes()
return discovery.build_from_document(service=discovery_document)
# Workaround for the Optimizer bug: Optimizer returns resource names that use project number, but only supports resource names with project IDs when making requests
def get_project_number(project_id):
service = discovery.build('cloudresourcemanager', 'v1', credentials=credentials)
response = service.projects().get(projectId=project_id).execute()
return response['projectNumber']
gcp_project_number = get_project_number(gcp_project_id)
def fix_resource_name(name):
return name.replace(gcp_project_number, gcp_project_id)
ml_api = create_caip_optimizer_client(gcp_project_id)
trials_api = ml_api.projects().locations().studies().trials()
operations_api = ml_api.projects().locations().operations()
suggest_trials_request = trials_api.suggest(
parent=fix_resource_name(study_name),
body=dict(
suggestionCount=suggestion_count,
clientId=client_id,
),
)
suggest_trials_response = suggest_trials_request.execute()
operation_name = suggest_trials_response['name']
while True:
get_operation_response = operations_api.get(
name=fix_resource_name(operation_name),
).execute()
# Knowledge: The "done" key is just missing until the result is available
if get_operation_response.get('done'):
break
logging.info('Not finished yet: ' + str(get_operation_response))
time.sleep(10)
operation_response = get_operation_response['response']
suggested_trials = operation_response['trials']
return (suggested_trials,)
if __name__ == '__main__':
suggest_trials_in_gcp_ai_platform_optimizer_op = create_component_from_func(
suggest_trials_in_gcp_ai_platform_optimizer,
base_image='python:3.8',
packages_to_install=['google-api-python-client==1.12.3', 'google-cloud-storage==1.31.2', 'google-auth==1.21.3'],
output_component_file='component.yaml',
annotations={
"author": "Alexey Volkov <[email protected]>",
"canonical_location": "https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/google-cloud/Optimizer/Suggest_trials/component.yaml",
},
)
| 698 |
0 | kubeflow_public_repos/pipelines/components/contrib/google-cloud/Optimizer | kubeflow_public_repos/pipelines/components/contrib/google-cloud/Optimizer/Suggest_trials/component.yaml | name: Suggest trials in gcp ai platform optimizer
description: Suggests trials (parameter sets) to evaluate.
inputs:
- {name: study_name, type: String, description: Full resource name of the study.}
- {name: suggestion_count, type: Integer, description: Number of suggestions to request.}
- {name: gcp_project_id, type: String, optional: true}
- {name: gcp_region, type: String, default: us-central1, optional: true}
outputs:
- {name: suggested_trials, type: JsonArray}
metadata:
annotations:
author: Alexey Volkov <[email protected]>
canonical_location: 'https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/google-cloud/Optimizer/Suggest_trials/component.yaml'
implementation:
container:
image: python:3.8
command:
- sh
- -c
- (PIP_DISABLE_PIP_VERSION_CHECK=1 python3 -m pip install --quiet --no-warn-script-location
'google-api-python-client==1.12.3' 'google-cloud-storage==1.31.2' 'google-auth==1.21.3'
|| PIP_DISABLE_PIP_VERSION_CHECK=1 python3 -m pip install --quiet --no-warn-script-location
'google-api-python-client==1.12.3' 'google-cloud-storage==1.31.2' 'google-auth==1.21.3'
--user) && "$0" "$@"
- python3
- -u
- -c
- |
def suggest_trials_in_gcp_ai_platform_optimizer(
study_name,
suggestion_count,
gcp_project_id = None,
gcp_region = "us-central1",
):
"""Suggests trials (parameter sets) to evaluate.
See https://cloud.google.com/ai-platform/optimizer/docs
Annotations:
author: Alexey Volkov <[email protected]>
Args:
study_name: Full resource name of the study.
suggestion_count: Number of suggestions to request.
"""
import logging
import time
import google.auth
from googleapiclient import discovery
logging.getLogger().setLevel(logging.INFO)
client_id = 'client1'
credentials, default_project_id = google.auth.default()
# Validating and inferring the arguments
if not gcp_project_id:
gcp_project_id = default_project_id
# Building the API client.
# The main API does not work, so we need to build from the published discovery document.
def create_caip_optimizer_client(project_id):
from google.cloud import storage
_OPTIMIZER_API_DOCUMENT_BUCKET = 'caip-optimizer-public'
_OPTIMIZER_API_DOCUMENT_FILE = 'api/ml_public_google_rest_v1.json'
client = storage.Client(project_id)
bucket = client.get_bucket(_OPTIMIZER_API_DOCUMENT_BUCKET)
blob = bucket.get_blob(_OPTIMIZER_API_DOCUMENT_FILE)
discovery_document = blob.download_as_bytes()
return discovery.build_from_document(service=discovery_document)
# Workaround for the Optimizer bug: Optimizer returns resource names that use project number, but only supports resource names with project IDs when making requests
def get_project_number(project_id):
service = discovery.build('cloudresourcemanager', 'v1', credentials=credentials)
response = service.projects().get(projectId=project_id).execute()
return response['projectNumber']
gcp_project_number = get_project_number(gcp_project_id)
def fix_resource_name(name):
return name.replace(gcp_project_number, gcp_project_id)
ml_api = create_caip_optimizer_client(gcp_project_id)
trials_api = ml_api.projects().locations().studies().trials()
operations_api = ml_api.projects().locations().operations()
suggest_trials_request = trials_api.suggest(
parent=fix_resource_name(study_name),
body=dict(
suggestionCount=suggestion_count,
clientId=client_id,
),
)
suggest_trials_response = suggest_trials_request.execute()
operation_name = suggest_trials_response['name']
while True:
get_operation_response = operations_api.get(
name=fix_resource_name(operation_name),
).execute()
# Knowledge: The "done" key is just missing until the result is available
if get_operation_response.get('done'):
break
logging.info('Not finished yet: ' + str(get_operation_response))
time.sleep(10)
operation_response = get_operation_response['response']
suggested_trials = operation_response['trials']
return (suggested_trials,)
def _serialize_json(obj) -> str:
if isinstance(obj, str):
return obj
import json
def default_serializer(obj):
if hasattr(obj, 'to_struct'):
return obj.to_struct()
else:
raise TypeError("Object of type '%s' is not JSON serializable and does not have .to_struct() method." % obj.__class__.__name__)
return json.dumps(obj, default=default_serializer, sort_keys=True)
import argparse
_parser = argparse.ArgumentParser(prog='Suggest trials in gcp ai platform optimizer', description='Suggests trials (parameter sets) to evaluate.')
_parser.add_argument("--study-name", dest="study_name", type=str, required=True, default=argparse.SUPPRESS)
_parser.add_argument("--suggestion-count", dest="suggestion_count", type=int, required=True, default=argparse.SUPPRESS)
_parser.add_argument("--gcp-project-id", dest="gcp_project_id", type=str, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--gcp-region", dest="gcp_region", type=str, required=False, default=argparse.SUPPRESS)
_parser.add_argument("----output-paths", dest="_output_paths", type=str, nargs=1)
_parsed_args = vars(_parser.parse_args())
_output_files = _parsed_args.pop("_output_paths", [])
_outputs = suggest_trials_in_gcp_ai_platform_optimizer(**_parsed_args)
_output_serializers = [
_serialize_json,
]
import os
for idx, output_file in enumerate(_output_files):
try:
os.makedirs(os.path.dirname(output_file))
except OSError:
pass
with open(output_file, 'w') as f:
f.write(_output_serializers[idx](_outputs[idx]))
args:
- --study-name
- {inputValue: study_name}
- --suggestion-count
- {inputValue: suggestion_count}
- if:
cond: {isPresent: gcp_project_id}
then:
- --gcp-project-id
- {inputValue: gcp_project_id}
- if:
cond: {isPresent: gcp_region}
then:
- --gcp-region
- {inputValue: gcp_region}
- '----output-paths'
- {outputPath: suggested_trials}
| 699 |