index
int64 0
0
| repo_id
stringlengths 21
232
| file_path
stringlengths 34
259
| content
stringlengths 1
14.1M
| __index_level_0__
int64 0
10k
|
---|---|---|---|---|
0 | kubeflow_public_repos/pipelines/components/kubeflow | kubeflow_public_repos/pipelines/components/kubeflow/deployer/Dockerfile | # Copyright 2018 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM debian
RUN apt-get update -q && apt-get upgrade -y && \
apt-get install -y -qq --no-install-recommends \
apt-transport-https \
ca-certificates \
git \
gnupg \
lsb-release \
unzip \
wget && \
wget --no-verbose -O /opt/ks_0.13.1_linux_amd64.tar.gz \
https://github.com/ksonnet/ksonnet/releases/download/v0.13.1/ks_0.13.1_linux_amd64.tar.gz && \
tar -C /opt -xzf /opt/ks_0.13.1_linux_amd64.tar.gz && \
cp /opt/ks_0.13.1_linux_amd64/ks /bin/. && \
rm -f /opt/ks_0.13.1_linux_amd64.tar.gz && \
wget --no-verbose -O /bin/kubectl \
https://storage.googleapis.com/kubernetes-release/release/v1.11.2/bin/linux/amd64/kubectl && \
chmod u+x /bin/kubectl && \
wget --no-verbose -O /opt/kubernetes_v1.11.2 \
https://github.com/kubernetes/kubernetes/archive/v1.11.2.tar.gz && \
mkdir -p /src && \
tar -C /src -xzf /opt/kubernetes_v1.11.2 && \
rm -rf /opt/kubernetes_v1.11.2 && \
wget --no-verbose -O /opt/google-apt-key.gpg \
https://packages.cloud.google.com/apt/doc/apt-key.gpg && \
apt-key add /opt/google-apt-key.gpg && \
export CLOUD_SDK_REPO="cloud-sdk-$(lsb_release -c -s)" && \
echo "deb https://packages.cloud.google.com/apt $CLOUD_SDK_REPO main" >> \
/etc/apt/sources.list.d/google-cloud-sdk.list && \
apt-get update -q && \
apt-get install -y -qq --no-install-recommends google-cloud-sdk && \
gcloud config set component_manager/disable_update_check true
ENV KUBEFLOW_VERSION v0.4.0
# Checkout the kubeflow packages at image build time so that we do not
# require calling in to the GitHub API at run time.
RUN cd /src && \
mkdir -p github.com/kubeflow && \
cd github.com/kubeflow && \
git clone https://github.com/kubeflow/kubeflow && \
cd kubeflow && \
git checkout ${KUBEFLOW_VERSION}
ADD ./src/deploy.sh /bin/.
ENTRYPOINT ["/bin/deploy.sh"]
| 500 |
0 | kubeflow_public_repos/pipelines/components/kubeflow/deployer | kubeflow_public_repos/pipelines/components/kubeflow/deployer/src/deploy.sh | #!/bin/bash -e
# Copyright 2018 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -x
KUBERNETES_NAMESPACE="${KUBERNETES_NAMESPACE:-kubeflow}"
SERVER_NAME="${SERVER_NAME:-model-server}"
while (($#)); do
case $1 in
"--model-export-path")
shift
MODEL_EXPORT_PATH="$1"
shift
;;
"--cluster-name")
shift
CLUSTER_NAME="$1"
shift
;;
"--namespace")
shift
KUBERNETES_NAMESPACE="$1"
shift
;;
"--server-name")
shift
SERVER_NAME="$1"
shift
;;
"--pvc-name")
shift
PVC_NAME="$1"
shift
;;
"--service-type")
shift
SERVICE_TYPE="$1"
shift
;;
*)
echo "Unknown argument: '$1'"
exit 1
;;
esac
done
if [ -z "${MODEL_EXPORT_PATH}" ]; then
echo "You must specify a path to the saved model"
exit 1
fi
echo "Deploying the model '${MODEL_EXPORT_PATH}'"
if [ -z "${CLUSTER_NAME}" ]; then
CLUSTER_NAME=$(wget -q -O- --header="Metadata-Flavor: Google" http://metadata.google.internal/computeMetadata/v1/instance/attributes/cluster-name)
fi
# Ensure the server name is not more than 63 characters.
SERVER_NAME="${SERVER_NAME:0:63}"
# Trim any trailing hyphens from the server name.
while [[ "${SERVER_NAME:(-1)}" == "-" ]]; do SERVER_NAME="${SERVER_NAME::-1}"; done
echo "Deploying ${SERVER_NAME} to the cluster ${CLUSTER_NAME}"
# Connect kubectl to the local cluster
kubectl config set-cluster "${CLUSTER_NAME}" --server=https://kubernetes.default --certificate-authority=/var/run/secrets/kubernetes.io/serviceaccount/ca.crt
kubectl config set-credentials pipeline --token "$(cat /var/run/secrets/kubernetes.io/serviceaccount/token)"
kubectl config set-context kubeflow --cluster "${CLUSTER_NAME}" --user pipeline
kubectl config use-context kubeflow
# Configure and deploy the TF serving app
cd /src/github.com/kubeflow/kubeflow
git checkout ${KUBEFLOW_VERSION}
cd /opt
echo "Initializing KSonnet app..."
ks init tf-serving-app
cd tf-serving-app/
if [ -n "${KUBERNETES_NAMESPACE}" ]; then
echo "Setting Kubernetes namespace: ${KUBERNETES_NAMESPACE} ..."
ks env set default --namespace "${KUBERNETES_NAMESPACE}"
fi
echo "Installing Kubeflow packages..."
ks registry add kubeflow /src/github.com/kubeflow/kubeflow/kubeflow
ks pkg install kubeflow/common@${KUBEFLOW_VERSION}
ks pkg install kubeflow/tf-serving@${KUBEFLOW_VERSION}
echo "Generating the TF Serving config..."
ks generate tf-serving server --name="${SERVER_NAME}"
ks param set server modelPath "${MODEL_EXPORT_PATH}"
# service type: ClusterIP or NodePort
if [ -n "${SERVICE_TYPE}" ];then
ks param set server serviceType "${SERVICE_TYPE}"
fi
# support local storage to deploy tf-serving.
if [ -n "${PVC_NAME}" ];then
# TODO: Remove modelStorageType setting after the hard code nfs was removed at
# https://github.com/kubeflow/kubeflow/blob/v0.4-branch/kubeflow/tf-serving/tf-serving.libsonnet#L148-L151
ks param set server modelStorageType nfs
ks param set server nfsPVC "${PVC_NAME}"
fi
echo "Deploying the TF Serving service..."
ks apply default -c server
# Wait for the deployment to have at least one available replica
echo "Waiting for the TF Serving deployment to show up..."
timeout="1000"
start_time=`date +%s`
while [[ $(kubectl get deploy --namespace "${KUBERNETES_NAMESPACE}" --selector=app="${SERVER_NAME}" 2>&1|wc -l) != "2" ]];do
current_time=`date +%s`
elapsed_time=$(expr $current_time + 1 - $start_time)
if [[ $elapsed_time -gt $timeout ]];then
echo "timeout"
exit 1
fi
sleep 2
done
echo "Waiting for the valid workflow json..."
start_time=`date +%s`
exit_code="1"
while [[ $exit_code != "0" ]];do
kubectl get deploy --namespace "${KUBERNETES_NAMESPACE}" --selector=app="${SERVER_NAME}" --output=jsonpath='{.items[0].status.availableReplicas}'
exit_code=$?
current_time=`date +%s`
elapsed_time=$(expr $current_time + 1 - $start_time)
if [[ $elapsed_time -gt $timeout ]];then
echo "timeout"
exit 1
fi
sleep 2
done
echo "Waiting for the TF Serving deployment to have at least one available replica..."
start_time=`date +%s`
while [[ $(kubectl get deploy --namespace "${KUBERNETES_NAMESPACE}" --selector=app="${SERVER_NAME}" --output=jsonpath='{.items[0].status.availableReplicas}') < "1" ]]; do
current_time=`date +%s`
elapsed_time=$(expr $current_time + 1 - $start_time)
if [[ $elapsed_time -gt $timeout ]];then
echo "timeout"
exit 1
fi
sleep 5
done
echo "Obtaining the pod name..."
start_time=`date +%s`
pod_name=""
while [[ $pod_name == "" ]];do
pod_name=$(kubectl get pods --namespace "${KUBERNETES_NAMESPACE}" --selector=app="${SERVER_NAME}" --template '{{range .items}}{{.metadata.name}}{{"\n"}}{{end}}')
current_time=`date +%s`
elapsed_time=$(expr $current_time + 1 - $start_time)
if [[ $elapsed_time -gt $timeout ]];then
echo "timeout"
exit 1
fi
sleep 2
done
echo "Pod name is: " $pod_name
# Wait for the pod container to start running
echo "Waiting for the TF Serving pod to start running..."
start_time=`date +%s`
exit_code="1"
while [[ $exit_code != "0" ]];do
kubectl get po ${pod_name} --namespace "${KUBERNETES_NAMESPACE}" -o jsonpath='{.status.containerStatuses[0].state.running}'
exit_code=$?
current_time=`date +%s`
elapsed_time=$(expr $current_time + 1 - $start_time)
if [[ $elapsed_time -gt $timeout ]];then
echo "timeout"
exit 1
fi
sleep 2
done
start_time=`date +%s`
while [ -z "$(kubectl get po ${pod_name} --namespace "${KUBERNETES_NAMESPACE}" -o jsonpath='{.status.containerStatuses[0].state.running}')" ]; do
current_time=`date +%s`
elapsed_time=$(expr $current_time + 1 - $start_time)
if [[ $elapsed_time -gt $timeout ]];then
echo "timeout"
exit 1
fi
sleep 5
done
# Wait a little while and then grab the logs of the running server
sleep 10
echo "Logs from the TF Serving pod:"
kubectl logs ${pod_name} --namespace "${KUBERNETES_NAMESPACE}"
| 501 |
0 | kubeflow_public_repos/pipelines/components/kubeflow | kubeflow_public_repos/pipelines/components/kubeflow/common/launch_crd.py | # Copyright 2019 kubeflow.org.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import json
import logging
import multiprocessing
import time
from kubernetes import client as k8s_client
from kubernetes.client import rest
logger = logging.getLogger(__name__)
class K8sCR(object):
def __init__(self, group, plural, version, client):
self.group = group
self.plural = plural
self.version = version
self.client = k8s_client.CustomObjectsApi(client)
def wait_for_condition(self,
namespace,
name,
expected_conditions=[],
timeout=datetime.timedelta(days=365),
polling_interval=datetime.timedelta(seconds=30),
status_callback=None):
"""Waits until any of the specified conditions occur.
Args:
namespace: namespace for the CR.
name: Name of the CR.
expected_conditions: A list of conditions. Function waits until any of the
supplied conditions is reached.
timeout: How long to wait for the CR.
polling_interval: How often to poll for the status of the CR.
status_callback: (Optional): Callable. If supplied this callable is
invoked after we poll the CR. Callable takes a single argument which
is the CR.
"""
end_time = datetime.datetime.now() + timeout
while True:
try:
results = self.client.get_namespaced_custom_object(
self.group, self.version, namespace, self.plural, name)
except Exception as e:
logger.error("There was a problem waiting for %s/%s %s in namespace %s; Exception: %s",
self.group, self.plural, name, namespace, e)
raise
if results:
if status_callback:
status_callback(results)
expected, condition = self.is_expected_conditions(results, expected_conditions)
if expected:
logger.info("%s/%s %s in namespace %s has reached the expected condition: %s.",
self.group, self.plural, name, namespace, condition)
return results
else:
if condition:
logger.info("Current condition of %s/%s %s in namespace %s is %s.",
self.group, self.plural, name, namespace, condition)
if datetime.datetime.now() + polling_interval > end_time:
raise Exception(
"Timeout waiting for {0}/{1} {2} in namespace {3} to enter one of the "
"conditions {4}.".format(self.group, self.plural, name, namespace, expected_conditions))
time.sleep(polling_interval.seconds)
def is_expected_conditions(self, inst, expected_conditions):
conditions = inst.get('status', {}).get("conditions")
if not conditions:
return False, ""
if conditions[-1]["type"] in expected_conditions and conditions[-1]["status"] == "True":
return True, conditions[-1]["type"]
else:
return False, conditions[-1]["type"]
def create(self, spec):
"""Create a CR.
Args:
spec: The spec for the CR.
"""
try:
# Create a Resource
namespace = spec["metadata"].get("namespace", "default")
logger.info("Creating %s/%s %s in namespace %s.",
self.group, self.plural, spec["metadata"]["name"], namespace)
api_response = self.client.create_namespaced_custom_object(
self.group, self.version, namespace, self.plural, spec)
logger.info("Created %s/%s %s in namespace %s.",
self.group, self.plural, spec["metadata"]["name"], namespace)
return api_response
except rest.ApiException as e:
self._log_and_raise_exception(e, "create")
def delete(self, name, namespace):
try:
body = {
# Set garbage collection so that CR won't be deleted until all
# owned references are deleted.
"propagationPolicy": "Foreground",
}
logger.info("Deleteing %s/%s %s in namespace %s.",
self.group, self.plural, name, namespace)
api_response = self.client.delete_namespaced_custom_object(
group=self.group,
version=self.version,
namespace=namespace,
plural=self.plural,
name=name,
body=body)
logger.info("Deleted %s/%s %s in namespace %s.",
self.group, self.plural, name, namespace)
return api_response
except rest.ApiException as e:
self._log_and_raise_exception(e, "delete")
def _log_and_raise_exception(self, ex, action):
message = ""
if ex.message:
message = ex.message
if ex.body:
try:
body = json.loads(ex.body)
message = body.get("message")
except ValueError:
logger.error("Exception when %s %s/%s: %s", action, self.group, self.plural, ex.body)
raise
logger.error("Exception when %s %s/%s: %s", action, self.group, self.plural, ex.body)
raise ex
| 502 |
0 | kubeflow_public_repos/pipelines/components/kubeflow | kubeflow_public_repos/pipelines/components/kubeflow/launcher/build_image.sh | #!/bin/bash -e
# Copyright 2019 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
while getopts ":hp:t:i:" opt; do
case "${opt}" in
h) echo "-p: project name"
echo "-t: tag name"
echo "-i: image name. If provided, project name and tag name are not necessary"
exit
;;
p) PROJECT_ID=${OPTARG}
;;
t) TAG_NAME=${OPTARG}
;;
i) LAUNCHER_IMAGE_NAME=${OPTARG}
;;
\? ) echo "Usage: cmd [-p] project [-t] tag [-i] image"
exit
;;
esac
done
mkdir -p ./build
cp -R ./src/ ./build/
cp -R ../common/ ./build/
LOCAL_LAUNCHER_IMAGE_NAME=ml-pipeline-kubeflow-tfjob
docker build -t ${LOCAL_LAUNCHER_IMAGE_NAME} .
if [ -z "${TAG_NAME}" ]; then
TAG_NAME=$(date +v%Y%m%d)-$(git describe --tags --always --dirty)-$(git diff | shasum -a256 | cut -c -6)
fi
if [ -z "${LAUNCHER_IMAGE_NAME}" ]; then
if [ -z "${PROJECT_ID}" ]; then
PROJECT_ID=$(gcloud config config-helper --format "value(configuration.properties.core.project)")
fi
docker tag ${LOCAL_LAUNCHER_IMAGE_NAME} gcr.io/${PROJECT_ID}/${LOCAL_LAUNCHER_IMAGE_NAME}:${TAG_NAME}
docker push gcr.io/${PROJECT_ID}/${LOCAL_LAUNCHER_IMAGE_NAME}:${TAG_NAME}
else
docker tag ${LOCAL_LAUNCHER_IMAGE_NAME} ${LAUNCHER_IMAGE_NAME}:${TAG_NAME}
docker push ${LAUNCHER_IMAGE_NAME}:${TAG_NAME}
fi
rm -rf ./build
| 503 |
0 | kubeflow_public_repos/pipelines/components/kubeflow | kubeflow_public_repos/pipelines/components/kubeflow/launcher/sample.py | import json
import kfp.dsl as dsl
from kfp import components
from kfp.dsl.types import Integer
from typing import NamedTuple
def create_worker_spec(workerNum: int=0) -> NamedTuple(
'CreatWorkerSpec',
[
('worker_spec', dict),
]):
"""
Creates tf-job worker spec
"""
worker = {}
if workerNum > 0:
worker = {
"replicas": workerNum ,
"restartPolicy": "OnFailure",
"template": {
"spec": {
"containers": [
{
"command": [
"python",
"/opt/model.py"
],
"args": [
"--tf-train-steps=60"
],
"image": "liuhougangxa/tf-estimator-mnist",
"name": "tensorflow",
}
]
}
}
}
from collections import namedtuple
worker_spec_output = namedtuple(
'MyWorkerOutput',
['worker_spec'])
return worker_spec_output(worker)
worker_spec_op = components.func_to_container_op(
create_worker_spec, base_image='tensorflow/tensorflow:1.11.0-py3')
@dsl.pipeline(
name="Launch kubeflow tfjob",
description="An example to launch tfjob."
)
def mnist_train(name: str="mnist",
namespace: str="kubeflow",
workerNum: int=3,
ttlSecondsAfterFinished: int=-1,
tfjobTimeoutMinutes: int=60,
deleteAfterDone =False):
tfjob_launcher_op = components.load_component_from_file("./component.yaml")
# tfjob_launcher_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/master/components/kubeflow/launcher/component.yaml')
chief = {
"replicas": 1,
"restartPolicy": "OnFailure",
"template": {
"spec": {
"containers": [
{
"command": [
"python",
"/opt/model.py"
],
"args": [
"--tf-train-steps=60"
],
"image": "liuhougangxa/tf-estimator-mnist",
"name": "tensorflow",
}
]
}
}
}
worker_spec_create = worker_spec_op(workerNum)
tfjob_launcher_op(
name=name,
namespace=namespace,
ttl_seconds_after_finished=ttlSecondsAfterFinished,
worker_spec=worker_spec_create.outputs['worker_spec'],
chief_spec=chief,
tfjob_timeout_minutes=tfjobTimeoutMinutes,
delete_finished_tfjob=deleteAfterDone
)
if __name__ == "__main__":
import kfp.compiler as compiler
compiler.Compiler().compile(mnist_train, __file__ + ".tar.gz") | 504 |
0 | kubeflow_public_repos/pipelines/components/kubeflow | kubeflow_public_repos/pipelines/components/kubeflow/launcher/requirements.txt | pyyaml
kubernetes
| 505 |
0 | kubeflow_public_repos/pipelines/components/kubeflow | kubeflow_public_repos/pipelines/components/kubeflow/launcher/component.yaml | name: Kubeflow - Launch TFJob
description: Kubeflow TFJob launcher
inputs:
- {name: Name, type: String, description: 'TFJob name.'}
- {name: Namespace, type: String, default: kubeflow, description: 'TFJob namespace.'}
- {name: Version, type: String, default: v1, description: 'TFJob version.'}
- {name: ActiveDeadlineSeconds, type: Integer, default: -1, description: 'Specifies the duration (in seconds) since startTime during which the job can remain active before it is terminated. Must be a positive integer. This setting applies only to pods where restartPolicy is OnFailure or Always.'}
- {name: BackoffLimit, type: Integer, default: -1, description: 'Number of retries before marking this job as failed.'}
- {name: ttl Seconds After Finished, type: Integer, default: -1, description: 'Defines the TTL for cleaning up finished TFJobs.'}
- {name: CleanPodPolicy, type: String, default: Running, description: 'Defines the policy for cleaning up pods after the TFJob completes.'}
- {name: PS Spec, type: JsonObject, default: '{}', description: 'TFJob ps replicaSpecs.'}
- {name: Worker Spec, type: JsonObject, default: '{}', description: 'TFJob worker replicaSpecs.'}
- {name: Chief Spec, type: JsonObject, default: '{}', description: 'TFJob chief replicaSpecs.'}
- {name: Evaluator Spec, type: JsonObject, default: '{}', description: 'TFJob evaluator replicaSpecs.'}
- {name: Tfjob Timeout Minutes, type: Integer, default: 1440, description: 'Time in minutes to wait for the TFJob to complete.'}
- {name: Delete Finished Tfjob, type: Bool, default: 'True' , description: 'Whether to delete the tfjob after it is finished.'}
implementation:
container:
image: nikenano/launchernew:latest
command: [python, /ml/launch_tfjob.py]
args: [
--name, {inputValue: Name},
--namespace, {inputValue: Namespace},
--version, {inputValue: Version},
--activeDeadlineSeconds, {inputValue: ActiveDeadlineSeconds},
--backoffLimit, {inputValue: BackoffLimit},
--cleanPodPolicy, {inputValue: CleanPodPolicy},
--ttlSecondsAfterFinished, {inputValue: ttl Seconds After Finished},
--psSpec, {inputValue: PS Spec},
--workerSpec, {inputValue: Worker Spec},
--chiefSpec, {inputValue: Chief Spec},
--evaluatorSpec, {inputValue: Evaluator Spec},
--tfjobTimeoutMinutes, {inputValue: Tfjob Timeout Minutes},
--deleteAfterDone, {inputValue: Delete Finished Tfjob},
]
| 506 |
0 | kubeflow_public_repos/pipelines/components/kubeflow | kubeflow_public_repos/pipelines/components/kubeflow/launcher/OWNERS | approvers:
- hougangliu
- gaocegege
- NikeNano
reviewers:
- hougangliu
| 507 |
0 | kubeflow_public_repos/pipelines/components/kubeflow | kubeflow_public_repos/pipelines/components/kubeflow/launcher/Dockerfile | # Copyright 2019 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM python:3.6
COPY requirements.txt .
RUN python3 -m pip install -r \
requirements.txt --quiet --no-cache-dir \
&& rm -f requirements.txt
ADD build /ml
ENTRYPOINT ["python", "/ml/launch_tfjob.py"]
| 508 |
0 | kubeflow_public_repos/pipelines/components/kubeflow/launcher | kubeflow_public_repos/pipelines/components/kubeflow/launcher/src/__init__.py | # Copyright 2019 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 509 |
0 | kubeflow_public_repos/pipelines/components/kubeflow/launcher | kubeflow_public_repos/pipelines/components/kubeflow/launcher/src/launch_tfjob.py | # Copyright 2019 kubeflow.org.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import datetime
from distutils.util import strtobool
import json
import os
import logging
import yaml
import launch_crd
from kubernetes import client as k8s_client
from kubernetes import config
def yamlOrJsonStr(str):
if str == "" or str == None:
return None
return yaml.safe_load(str)
TFJobGroup = "kubeflow.org"
TFJobPlural = "tfjobs"
class TFJob(launch_crd.K8sCR):
def __init__(self, version="v1", client=None):
super(TFJob, self).__init__(TFJobGroup, TFJobPlural, version, client)
def is_expected_conditions(self, inst, expected_conditions):
conditions = inst.get('status', {}).get("conditions")
if not conditions:
return False, ""
if conditions[-1]["type"] in expected_conditions and conditions[-1]["status"] == "True":
return True, conditions[-1]["type"]
else:
return False, conditions[-1]["type"]
def main(argv=None):
parser = argparse.ArgumentParser(description='Kubeflow TFJob launcher')
parser.add_argument('--name', type=str,
help='TFJob name.')
parser.add_argument('--namespace', type=str,
default='kubeflow',
help='TFJob namespace.')
parser.add_argument('--version', type=str,
default='v1',
help='TFJob version.')
parser.add_argument('--activeDeadlineSeconds', type=int,
default=-1,
help='Specifies the duration (in seconds) since startTime during which the job can remain active before it is terminated. Must be a positive integer. This setting applies only to pods where restartPolicy is OnFailure or Always.')
parser.add_argument('--backoffLimit', type=int,
default=-1,
help='Number of retries before marking this job as failed.')
parser.add_argument('--cleanPodPolicy', type=str,
default="Running",
help='Defines the policy for cleaning up pods after the TFJob completes.')
parser.add_argument('--ttlSecondsAfterFinished', type=int,
default=-1,
help='Defines the TTL for cleaning up finished TFJobs.')
parser.add_argument('--psSpec', type=yamlOrJsonStr,
default={},
help='TFJob ps replicaSpecs.')
parser.add_argument('--workerSpec', type=yamlOrJsonStr,
default={},
help='TFJob worker replicaSpecs.')
parser.add_argument('--chiefSpec', type=yamlOrJsonStr,
default={},
help='TFJob chief replicaSpecs.')
parser.add_argument('--evaluatorSpec', type=yamlOrJsonStr,
default={},
help='TFJob evaluator replicaSpecs.')
parser.add_argument('--deleteAfterDone', type=strtobool,
default=True,
help='When tfjob done, delete the tfjob automatically if it is True.')
parser.add_argument('--tfjobTimeoutMinutes', type=int,
default=60*24,
help='Time in minutes to wait for the TFJob to reach end')
args = parser.parse_args()
logging.getLogger().setLevel(logging.INFO)
logging.info('Generating tfjob template.')
config.load_incluster_config()
api_client = k8s_client.ApiClient()
tfjob = TFJob(version=args.version, client=api_client)
inst = {
"apiVersion": "%s/%s" % (TFJobGroup, args.version),
"kind": "TFJob",
"metadata": {
"name": args.name,
"namespace": args.namespace,
},
"spec": {
"cleanPodPolicy": args.cleanPodPolicy,
"tfReplicaSpecs": {
},
},
}
if args.ttlSecondsAfterFinished >=0:
inst["spec"]["ttlSecondsAfterFinished"] = args.ttlSecondsAfterFinished
if args.backoffLimit >= 0:
inst["spec"]["backoffLimit"] = args.backoffLimit
if args.activeDeadlineSeconds >=0:
inst["spec"]["activeDeadlineSecond"] = args.activeDeadlineSeconds
if args.psSpec:
inst["spec"]["tfReplicaSpecs"]["PS"] = args.psSpec
if args.chiefSpec:
inst["spec"]["tfReplicaSpecs"]["Chief"] = args.chiefSpec
if args.workerSpec:
inst["spec"]["tfReplicaSpecs"]["Worker"] = args.workerSpec
if args.evaluatorSpec:
inst["spec"]["tfReplicaSpecs"]["Evaluator"] = args.evaluatorSpec
create_response = tfjob.create(inst)
expected_conditions = ["Succeeded", "Failed"]
tfjob.wait_for_condition(
args.namespace, args.name, expected_conditions,
timeout=datetime.timedelta(minutes=args.tfjobTimeoutMinutes))
if args.deleteAfterDone:
tfjob.delete(args.name, args.namespace)
if __name__== "__main__":
main()
| 510 |
0 | kubeflow_public_repos/pipelines/components/kubeflow | kubeflow_public_repos/pipelines/components/kubeflow/pytorch-launcher/build_image.sh | #!/bin/bash
LAUNCHER_IMAGE_NAME_DEFAULT=kubeflow-pytorchjob-launcher
while getopts ":hr:t:i:" opt; do
case "${opt}" in
h) echo "-r: repo name (including gcr.io/, etc., if not in Docker Hub)"
echo "-i: image name (default is $LAUNCHER_IMAGE_NAME_DEFAULT)"
echo "-t: image tag (default is inferred from date/git)"
exit
;;
r) REPO_NAME=${OPTARG}
;;
t) TAG_NAME=${OPTARG}
;;
i) LAUNCHER_IMAGE_NAME=${OPTARG}
;;
\? ) echo "Usage: cmd [-p] project [-t] tag [-i] image"
exit
;;
esac
done
# Apply defaults/interpret inputs
LAUNCHER_IMAGE_NAME=${LAUNCHER_IMAGE_NAME:-$LAUNCHER_IMAGE_NAME_DEFAULT}
TAG_NAME=${TAG_NAME:-$(date +v%Y%m%d)-$(git describe --tags --always --dirty)-$(git diff | shasum -a256 | cut -c -6)}
if [ -n "${REPO_NAME}" ]; then
# Ensure ends with /
if [[ "$REPO_NAME" != */ ]]; then
REPO_NAME+=/
fi
fi
FULL_NAME=${REPO_NAME}${LAUNCHER_IMAGE_NAME}:${TAG_NAME}
mkdir -p ./build
cp -R ./src/ ./build/
cp -R ../common/ ./build/
echo "Building image $FULL_NAME"
docker build -t ${FULL_NAME} .
echo "Pushing image $FULL_NAME"
docker push ${FULL_NAME}
rm -rf ./build
| 511 |
0 | kubeflow_public_repos/pipelines/components/kubeflow | kubeflow_public_repos/pipelines/components/kubeflow/pytorch-launcher/sample.py | import json
from typing import NamedTuple
from collections import namedtuple
import kfp
import kfp.dsl as dsl
from kfp import components
from kfp.dsl.types import Integer
def get_current_namespace():
"""Returns current namespace if available, else kubeflow"""
try:
current_namespace = open(
"/var/run/secrets/kubernetes.io/serviceaccount/namespace"
).read()
except:
current_namespace = "kubeflow"
return current_namespace
def create_worker_spec(
worker_num: int = 0
) -> NamedTuple(
"CreatWorkerSpec", [("worker_spec", dict)]
):
"""
Creates pytorch-job worker spec
"""
worker = {}
if worker_num > 0:
worker = {
"replicas": worker_num,
"restartPolicy": "OnFailure",
"template": {
"metadata": {
"annotations": {
"sidecar.istio.io/inject": "false"
}
},
"spec": {
"containers": [
{
"args": [
"--backend",
"gloo",
],
"image": "public.ecr.aws/pytorch-samples/pytorch_dist_mnist:latest",
"name": "pytorch",
"resources": {
"requests": {
"memory": "4Gi",
"cpu": "2000m",
# Uncomment for GPU
# "nvidia.com/gpu": 1,
},
"limits": {
"memory": "4Gi",
"cpu": "2000m",
# Uncomment for GPU
# "nvidia.com/gpu": 1,
},
},
}
]
},
},
}
worker_spec_output = namedtuple(
"MyWorkerOutput", ["worker_spec"]
)
return worker_spec_output(worker)
worker_spec_op = components.func_to_container_op(
create_worker_spec,
base_image="python:slim",
)
@dsl.pipeline(
name="launch-kubeflow-pytorchjob",
description="An example to launch pytorch.",
)
def mnist_train(
namespace: str = get_current_namespace(),
worker_replicas: int = 1,
ttl_seconds_after_finished: int = -1,
job_timeout_minutes: int = 600,
delete_after_done: bool = False,
):
pytorchjob_launcher_op = components.load_component_from_file(
"./component.yaml"
)
master = {
"replicas": 1,
"restartPolicy": "OnFailure",
"template": {
"metadata": {
"annotations": {
# See https://github.com/kubeflow/website/issues/2011
"sidecar.istio.io/inject": "false"
}
},
"spec": {
"containers": [
{
# To override default command
# "command": [
# "python",
# "/opt/mnist/src/mnist.py"
# ],
"args": [
"--backend",
"gloo",
],
# Or, create your own image from
# https://github.com/kubeflow/pytorch-operator/tree/master/examples/mnist
"image": "public.ecr.aws/pytorch-samples/pytorch_dist_mnist:latest",
"name": "pytorch",
"resources": {
"requests": {
"memory": "4Gi",
"cpu": "2000m",
# Uncomment for GPU
# "nvidia.com/gpu": 1,
},
"limits": {
"memory": "4Gi",
"cpu": "2000m",
# Uncomment for GPU
# "nvidia.com/gpu": 1,
},
},
}
],
# If imagePullSecrets required
# "imagePullSecrets": [
# {"name": "image-pull-secret"},
# ],
},
},
}
worker_spec_create = worker_spec_op(
worker_replicas
)
# Launch and monitor the job with the launcher
pytorchjob_launcher_op(
# Note: name needs to be a unique pytorchjob name in the namespace.
# Using RUN_ID_PLACEHOLDER is one way of getting something unique.
name=f"name-{kfp.dsl.RUN_ID_PLACEHOLDER}",
namespace=namespace,
master_spec=master,
# pass worker_spec as a string because the JSON serializer will convert
# the placeholder for worker_replicas (which it sees as a string) into
# a quoted variable (eg a string) instead of an unquoted variable
# (number). If worker_replicas is quoted in the spec, it will break in
# k8s. See https://github.com/kubeflow/pipelines/issues/4776
worker_spec=worker_spec_create.outputs[
"worker_spec"
],
ttl_seconds_after_finished=ttl_seconds_after_finished,
job_timeout_minutes=job_timeout_minutes,
delete_after_done=delete_after_done,
)
if __name__ == "__main__":
import kfp.compiler as compiler
pipeline_file = "test.tar.gz"
print(
f"Compiling pipeline as {pipeline_file}"
)
compiler.Compiler().compile(
mnist_train, pipeline_file
)
# # To run:
# client = kfp.Client()
# run = client.create_run_from_pipeline_package(
# pipeline_file,
# arguments={},
# run_name="test pytorchjob run"
# )
# print(f"Created run {run}")
| 512 |
0 | kubeflow_public_repos/pipelines/components/kubeflow | kubeflow_public_repos/pipelines/components/kubeflow/pytorch-launcher/requirements.txt | pyyaml
kubernetes
kubeflow-pytorchjob
retrying
| 513 |
0 | kubeflow_public_repos/pipelines/components/kubeflow | kubeflow_public_repos/pipelines/components/kubeflow/pytorch-launcher/component.yaml | description: Kubeflow PyTorchJob launcher
inputs:
- {name: name, type: String, description: 'PyTorchJob name.'}
- {name: namespace, type: String, default: kubeflow, description: 'PyTorchJob namespace (likely your current namespace).'}
- {name: version, type: String, default: v1, description: 'PyTorchJob version.'}
- {name: master_spec, type: JsonObject, default: '{}', description: 'PyTorchJob Master replicaSpecs.'}
- {name: worker_spec, type: JsonObject, default: '{}', description: 'PyTorchJob Worker replicaSpecs.'}
- {name: job_timeout_minutes, type: Integer, default: 1440, description: 'Time in minutes to wait for the job to complete.'}
- {name: delete_after_done, type: Boolean, default: 'True' , description: 'Whether to delete the job after it is finished.'}
- {name: clean_pod_policy, type: String, default: Running, description: 'Defines the policy for cleaning up pods after the PyTorchJob completes.'}
- {name: active_deadline_seconds, type: Integer, optional: true, description: 'Specifies the duration (in seconds) since startTime during which the job can remain active before it is terminated. Must be a positive integer. This setting applies only to pods where restartPolicy is OnFailure or Always.'}
- {name: backoff_limit, type: Integer, optional: true, description: 'Number of retries before marking this job as failed.'}
- {name: ttl_seconds_after_finished, type: Integer, optional: true, description: 'Defines the TTL for cleaning up finished PyTorchJobs.'}
implementation:
container:
image: cascribner/kubeflow-pytorchjob-launcher:v1
command: [python, /ml/launch_pytorchjob.py]
args:
- --name
- {inputValue: name}
- --namespace
- {inputValue: namespace}
- --version
- {inputValue: version}
- --masterSpec
- {inputValue: master_spec}
- --workerSpec
- {inputValue: worker_spec}
- --jobTimeoutMinutes
- {inputValue: job_timeout_minutes}
- --deleteAfterDone
- {inputValue: delete_after_done}
- --cleanPodPolicy
- {inputValue: clean_pod_policy}
- if:
cond: {isPresent: active_deadline_seconds}
then:
- --activeDeadlineSeconds
- {inputValue: active_deadline_seconds}
- if:
cond: {isPresent: backoff_limit}
then:
- --backoffLimit
- {inputValue: backoff_limit}
- if:
cond: {isPresent: ttl_seconds_after_finished}
then:
- --ttlSecondsAfterFinished
- {inputValue: ttl_seconds_after_finished}
| 514 |
0 | kubeflow_public_repos/pipelines/components/kubeflow | kubeflow_public_repos/pipelines/components/kubeflow/pytorch-launcher/Dockerfile | # Copyright 2021 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM python:3.6
ADD requirements.txt requirements.txt
RUN pip install --no-cache-dir -r requirements.txt
ADD build /ml
ENTRYPOINT ["python", "/ml/launch_pytorchjob.py"]
| 515 |
0 | kubeflow_public_repos/pipelines/components/kubeflow/pytorch-launcher | kubeflow_public_repos/pipelines/components/kubeflow/pytorch-launcher/src/launch_pytorchjob.py | import argparse
import datetime
from distutils.util import strtobool
import logging
import yaml
from kubernetes import client as k8s_client
from kubernetes import config
import launch_crd
from kubeflow.pytorchjob import V1PyTorchJob as V1PyTorchJob_original
from kubeflow.pytorchjob import V1PyTorchJobSpec as V1PyTorchJobSpec_original
def yamlOrJsonStr(string):
if string == "" or string is None:
return None
return yaml.safe_load(string)
def get_current_namespace():
"""Returns current namespace if available, else kubeflow"""
try:
namespace = "/var/run/secrets/kubernetes.io/serviceaccount/namespace"
current_namespace = open(namespace).read()
except FileNotFoundError:
current_namespace = "kubeflow"
return current_namespace
# Patch PyTorchJob APIs to align with k8s usage
class V1PyTorchJob(V1PyTorchJob_original):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.openapi_types = self.swagger_types
class V1PyTorchJobSpec(V1PyTorchJobSpec_original):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.openapi_types = self.swagger_types
def get_arg_parser():
parser = argparse.ArgumentParser(description='Kubeflow Job launcher')
parser.add_argument('--name', type=str,
default="pytorchjob",
help='Job name.')
parser.add_argument('--namespace', type=str,
default=get_current_namespace(),
help='Job namespace.')
parser.add_argument('--version', type=str,
default='v1',
help='Job version.')
parser.add_argument('--activeDeadlineSeconds', type=int,
default=None,
help='Specifies the duration (in seconds) since startTime during which the job can remain active before it is terminated. Must be a positive integer. This setting applies only to pods where restartPolicy is OnFailure or Always.')
parser.add_argument('--backoffLimit', type=int,
default=None,
help='Number of retries before marking this job as failed.')
parser.add_argument('--cleanPodPolicy', type=str,
default="Running",
help='Defines the policy for cleaning up pods after the Job completes.')
parser.add_argument('--ttlSecondsAfterFinished', type=int,
default=None,
help='Defines the TTL for cleaning up finished Jobs.')
parser.add_argument('--masterSpec', type=yamlOrJsonStr,
default={},
help='Job master replicaSpecs.')
parser.add_argument('--workerSpec', type=yamlOrJsonStr,
default={},
help='Job worker replicaSpecs.')
parser.add_argument('--deleteAfterDone', type=strtobool,
default=True,
help='When Job done, delete the Job automatically if it is True.')
parser.add_argument('--jobTimeoutMinutes', type=int,
default=60*24,
help='Time in minutes to wait for the Job to reach end')
# Options that likely wont be used, but left here for future use
parser.add_argument('--jobGroup', type=str,
default="kubeflow.org",
help='Group for the CRD, ex: kubeflow.org')
parser.add_argument('--jobPlural', type=str,
default="pytorchjobs", # We could select a launcher here and populate these automatically
help='Plural name for the CRD, ex: pytorchjobs')
parser.add_argument('--kind', type=str,
default='PyTorchJob',
help='CRD kind.')
return parser
def main(args):
logging.getLogger(__name__).setLevel(logging.INFO)
logging.info('Generating job template.')
jobSpec = V1PyTorchJobSpec(
pytorch_replica_specs={
'Master': args.masterSpec,
'Worker': args.workerSpec,
},
active_deadline_seconds=args.activeDeadlineSeconds,
backoff_limit=args.backoffLimit,
clean_pod_policy=args.cleanPodPolicy,
ttl_seconds_after_finished=args.ttlSecondsAfterFinished,
)
api_version = f"{args.jobGroup}/{args.version}"
job = V1PyTorchJob(
api_version=api_version,
kind=args.kind,
metadata=k8s_client.V1ObjectMeta(
name=args.name,
namespace=args.namespace,
),
spec=jobSpec,
)
serialized_job = k8s_client.ApiClient().sanitize_for_serialization(job)
logging.info('Creating launcher client.')
config.load_incluster_config()
api_client = k8s_client.ApiClient()
launcher_client = launch_crd.K8sCR(
group=args.jobGroup,
plural=args.jobPlural,
version=args.version,
client=api_client
)
logging.info('Submitting CR.')
create_response = launcher_client.create(serialized_job)
expected_conditions = ["Succeeded", "Failed"]
logging.info(
f'Monitoring job until status is any of {expected_conditions}.'
)
launcher_client.wait_for_condition(
args.namespace, args.name, expected_conditions,
timeout=datetime.timedelta(minutes=args.jobTimeoutMinutes))
if args.deleteAfterDone:
logging.info('Deleting job.')
launcher_client.delete(args.name, args.namespace)
if __name__ == "__main__":
parser = get_arg_parser()
args = parser.parse_args()
main(args)
| 516 |
0 | kubeflow_public_repos/pipelines/components/kubeflow/pytorch-launcher | kubeflow_public_repos/pipelines/components/kubeflow/pytorch-launcher/src/__init__.py | # Copyright 2021 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 517 |
0 | kubeflow_public_repos/pipelines/components/kubeflow | kubeflow_public_repos/pipelines/components/kubeflow/kfserving/README.md | # KFServing Component
This is the Kubeflow Pipelines component for KFServing. This uses the [V1beta1 API](https://github.com/kubeflow/kfserving/blob/master/docs/apis/v1beta1/README.md),
so your cluster must have a KFServing version >= v0.5.0 in order to use this.
If you are using KFServing version prior to v0.5.0, an older deprecated version of the KFServing Pipelines component must be used
and can be found at [this commit](https://github.com/kubeflow/pipelines/tree/65bed9b6d1d676ef2d541a970d3edc0aee12400d/components/kubeflow/kfserving).
Sample usage of this component can be found [here](https://github.com/kubeflow/kfserving/blob/master/docs/samples/pipelines/kfs-pipeline-v1alpha2.ipynb).
## Usage
Load the component with:
```python
import kfp.dsl as dsl
import kfp
from kfp import components
kfserving_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/master/components/kubeflow/kfserving/component.yaml')
```
**Note**: To use the previous version of this component which uses the v1alpha2 API and KFServing 0.4.1, then load the following YAML instead:
```yaml
https://raw.githubusercontent.com/kubeflow/pipelines/65bed9b6d1d676ef2d541a970d3edc0aee12400d/components/kubeflow/kfserving/component.yaml
```
### Arguments
| Argument | Default | Description |
|----------|---------|-------------|
| action | `create` | Action to execute on KFServing. Available options are `create`, `update`, `apply`, and `delete`. Note: `apply` is equivalent to `update` if the resource exists and `create` if not. |
| model_name | | Name to give to the deployed model/InferenceService |
| model_uri | | Path of the S3 or GCS compatible directory containing the model. |
| canary_traffic_percent | `100` | The traffic split percentage between the candidate model and the last ready model |
| namespace | | Kubernetes namespace where the KFServing service is deployed. If no namespace is provided, `anonymous` will be used unless a namespace is provided in the `inferenceservice_yaml` argument. |
| framework | | Machine learning framework for model serving. Currently the supported frameworks are `tensorflow`, `pytorch`, `sklearn`, `xgboost`, `onnx`, `triton`, `pmml`, and `lightgbm`. |
| custom_model_spec | `{}` | Custom model runtime container spec in JSON. Sample spec: `{"image": "codait/max-object-detector", "port":5000, "name": "test-container"}` |
| inferenceservice_yaml | `{}` | Raw InferenceService serialized YAML for deployment. Use this if you need additional configurations for your InferenceService. |
| autoscaling_target | `0` | Autoscaling Target Number. If not 0, sets the following annotation on the InferenceService: `autoscaling.knative.dev/target` |
| service_account | | ServiceAccount to use to run the InferenceService pod. |
| enable_istio_sidecar | `True` | Whether to enable istio sidecar injection. |
| watch_timeouot | `300` | Timeout in seconds for watching until the InferenceService becomes ready. |
| min_replicas | `-1` | Minimum number of InferenceService replicas. Default of -1 just delegates to pod default of 1. |
| max_replicas | `-1` | Maximum number of InferenceService replicas. |
### Basic InferenceService Creation
The following will use the KFServing component to deploy a TensorFlow model.
```python
@dsl.pipeline(
name='KFServing Pipeline',
description='A pipeline for KFServing.'
)
def kfserving_pipeline():
kfserving_op(
action='apply',
model_name='tf-sample',
model_uri='gs://kfserving-samples/models/tensorflow/flowers',
framework='tensorflow',
)
kfp.Client().create_run_from_pipeline_func(kfserving_pipeline, arguments={})
```
Sample op for deploying a PyTorch model:
```python
kfserving_op(
action='apply',
model_name='pytorch-test',
model_uri='gs://kfserving-examples/models/torchserve/image_classifier',
framework='pytorch'
)
```
### Canary Rollout
Ensure you have an initial model deployed with 100 percent traffic with something like:
```python
kfserving_op(
action = 'apply',
model_name='tf-sample',
model_uri='gs://kfserving-samples/models/tensorflow/flowers',
framework='tensorflow',
)
```
Deploy the candidate model which will only get a portion of traffic:
```python
kfserving_op(
action='apply',
model_name='tf-sample',
model_uri='gs://kfserving-samples/models/tensorflow/flowers-2',
framework='tensorflow',
canary_traffic_percent='10'
)
```
To promote the candidate model, you can either set `canary_traffic_percent` to `100` or simply remove it, then re-run the pipeline:
```python
kfserving_op(
action='apply',
model_name='tf-sample',
model_uri='gs://kfserving-samples/models/tensorflow/flowers-2',
framework='tensorflow'
)
```
If you instead want to rollback the candidate model, then set `canary_traffic_percent` to `0`, then re-run the pipeline:
```python
kfserving_op(
action='apply',
model_name='tf-sample',
model_uri='gs://kfserving-samples/models/tensorflow/flowers-2',
framework='tensorflow',
canary_traffic_percent='0'
)
```
### Deletion
To delete a model, simply set the `action` to `'delete'` and pass in the InferenceService name:
```python
kfserving_op(
action='delete',
model_name='tf-sample'
)
```
### Custom Runtime
To pass in a custom model serving runtime, you can use the `custom_model_spec` argument. Currently,
the expected format for `custom_model_spec` coincides with:
```json
{
"image": "some_image",
"port": "port_number",
"name": "custom-container",
"env" : [{ "name": "some_name", "value": "some_value"}],
"resources": { "requests": {}, "limits": {}}
}
```
Sample deployment:
```python
container_spec = '{ "image": "codait/max-object-detector", "port":5000, "name": "custom-container"}'
kfserving_op(
action='apply',
model_name='custom-simple',
custom_model_spec=container_spec
)
```
### Deploy using InferenceService YAML
If you need more fine-grained configuration, there is the option to deploy using an InferenceService YAML file:
```python
isvc_yaml = '''
apiVersion: "serving.kubeflow.org/v1beta1"
kind: "InferenceService"
metadata:
name: "sklearn-iris"
namespace: "anonymous"
spec:
predictor:
sklearn:
storageUri: "gs://kfserving-samples/models/sklearn/iris"
'''
kfserving_op(
action='apply',
inferenceservice_yaml=isvc_yaml
)
```
| 518 |
0 | kubeflow_public_repos/pipelines/components/kubeflow | kubeflow_public_repos/pipelines/components/kubeflow/kfserving/requirements.txt | kubernetes==12.0.0
kfserving==0.5.1
| 519 |
0 | kubeflow_public_repos/pipelines/components/kubeflow | kubeflow_public_repos/pipelines/components/kubeflow/kfserving/component.yaml | name: Kubeflow - Serve Model using KFServing
description: Serve Models using Kubeflow KFServing
inputs:
- {name: Action, type: String, default: 'create', description: 'Action to execute on KFServing'}
- {name: Model Name, type: String, default: '', description: 'Name to give to the deployed model'}
- {name: Model URI, type: String, default: '', description: 'Path of the S3 or GCS compatible directory containing the model.'}
- {name: Canary Traffic Percent, type: String, default: '100', description: 'The traffic split percentage between the candidate model and the last ready model'}
- {name: Namespace, type: String, default: '', description: 'Kubernetes namespace where the KFServing service is deployed.'}
- {name: Framework, type: String, default: '', description: 'Machine Learning Framework for Model Serving.'}
- {name: Custom Model Spec, type: String, default: '{}', description: 'Custom model runtime container spec in JSON'}
- {name: Autoscaling Target, type: String, default: '0', description: 'Autoscaling Target Number'}
- {name: Service Account, type: String, default: '', description: 'ServiceAccount to use to run the InferenceService pod'}
- {name: Enable Istio Sidecar, type: Bool, default: 'True', description: 'Whether to enable istio sidecar injection'}
- {name: InferenceService YAML, type: String, default: '{}', description: 'Raw InferenceService serialized YAML for deployment'}
- {name: Watch Timeout, type: String, default: '300', description: "Timeout seconds for watching until InferenceService becomes ready."}
- {name: Min Replicas, type: String, default: '-1', description: 'Minimum number of InferenceService replicas'}
- {name: Max Replicas, type: String, default: '-1', description: 'Maximum number of InferenceService replicas'}
- {name: Request Timeout, type: String, default: '60', description: "Specifies the number of seconds to wait before timing out a request to the component."}
outputs:
- {name: InferenceService Status, type: String, description: 'Status JSON output of InferenceService'}
implementation:
container:
image: quay.io/aipipeline/kfserving-component:v0.5.1
command: ['python']
args: [
-u, kfservingdeployer.py,
--action, {inputValue: Action},
--model-name, {inputValue: Model Name},
--model-uri, {inputValue: Model URI},
--canary-traffic-percent, {inputValue: Canary Traffic Percent},
--namespace, {inputValue: Namespace},
--framework, {inputValue: Framework},
--custom-model-spec, {inputValue: Custom Model Spec},
--autoscaling-target, {inputValue: Autoscaling Target},
--service-account, {inputValue: Service Account},
--enable-istio-sidecar, {inputValue: Enable Istio Sidecar},
--output-path, {outputPath: InferenceService Status},
--inferenceservice-yaml, {inputValue: InferenceService YAML},
--watch-timeout, {inputValue: Watch Timeout},
--min-replicas, {inputValue: Min Replicas},
--max-replicas, {inputValue: Max Replicas},
--request-timeout, {inputValue: Request Timeout}
]
| 520 |
0 | kubeflow_public_repos/pipelines/components/kubeflow | kubeflow_public_repos/pipelines/components/kubeflow/kfserving/OWNERS | approvers:
- animeshsingh
- Tomcli
reviewers:
- animeshsingh
- Tomcli
- yhwang
| 521 |
0 | kubeflow_public_repos/pipelines/components/kubeflow | kubeflow_public_repos/pipelines/components/kubeflow/kfserving/Dockerfile | FROM python:3.6-slim
COPY requirements.txt .
RUN python3 -m pip install -r \
requirements.txt --quiet --no-cache-dir \
&& rm -f requirements.txt
ENV APP_HOME /app
COPY src $APP_HOME
WORKDIR $APP_HOME
ENTRYPOINT ["python"]
CMD ["kfservingdeployer.py"]
| 522 |
0 | kubeflow_public_repos/pipelines/components/kubeflow/kfserving | kubeflow_public_repos/pipelines/components/kubeflow/kfserving/src/kfservingdeployer.py | # Copyright 2019 kubeflow.org.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from distutils.util import strtobool
import json
import os
import sys
import time
import yaml
from kubernetes import client
from kfserving import constants
from kfserving import KFServingClient
from kfserving import V1beta1InferenceService
from kfserving import V1beta1InferenceServiceSpec
from kfserving import V1beta1LightGBMSpec
from kfserving import V1beta1ONNXRuntimeSpec
from kfserving import V1beta1PMMLSpec
from kfserving import V1beta1PredictorSpec
from kfserving import V1beta1SKLearnSpec
from kfserving import V1beta1TFServingSpec
from kfserving import V1beta1TorchServeSpec
from kfserving import V1beta1TritonSpec
from kfserving import V1beta1XGBoostSpec
from kfserving.api.kf_serving_watch import isvc_watch
AVAILABLE_FRAMEWORKS = {
'tensorflow': V1beta1TFServingSpec,
'pytorch': V1beta1TorchServeSpec,
'sklearn': V1beta1SKLearnSpec,
'xgboost': V1beta1XGBoostSpec,
'onnx': V1beta1ONNXRuntimeSpec,
'triton': V1beta1TritonSpec,
'pmml': V1beta1PMMLSpec,
'lightgbm': V1beta1LightGBMSpec
}
def create_predictor_spec(framework, storage_uri, canary_traffic_percent,
service_account, min_replicas, max_replicas, containers, request_timeout):
"""
Create and return V1beta1PredictorSpec to be used in a V1beta1InferenceServiceSpec
object.
"""
predictor_spec = V1beta1PredictorSpec(
service_account_name=service_account,
min_replicas=(min_replicas
if min_replicas >= 0
else None
),
max_replicas=(max_replicas
if max_replicas > 0 and max_replicas >= min_replicas
else None
),
containers=(containers or None),
canary_traffic_percent=canary_traffic_percent,
timeout=request_timeout
)
# If the containers field was set, then this is custom model serving.
if containers:
return predictor_spec
if framework not in AVAILABLE_FRAMEWORKS:
raise ValueError("Error: No matching framework: " + framework)
setattr(
predictor_spec,
framework,
AVAILABLE_FRAMEWORKS[framework](storage_uri=storage_uri)
)
return predictor_spec
def create_custom_container_spec(custom_model_spec):
"""
Given a JSON container spec, return a V1Container object
representing the container. This is used for passing in
custom server images. The expected format for the input is:
{ "image": "test/containerimage",
"port":5000,
"name": "custom-container" }
"""
env = (
[
client.V1EnvVar(name=i["name"], value=i["value"])
for i in custom_model_spec["env"]
]
if custom_model_spec.get("env", "")
else None
)
ports = (
[client.V1ContainerPort(container_port=int(custom_model_spec.get("port", "")), protocol="TCP")]
if custom_model_spec.get("port", "")
else None
)
resources = (
client.V1ResourceRequirements(
requests=(custom_model_spec["resources"]["requests"]
if custom_model_spec.get('resources', {}).get('requests')
else None
),
limits=(custom_model_spec["resources"]["limits"]
if custom_model_spec.get('resources', {}).get('limits')
else None
),
)
if custom_model_spec.get("resources", {})
else None
)
return client.V1Container(
name=custom_model_spec.get("name", "custom-container"),
image=custom_model_spec["image"],
env=env,
ports=ports,
command=custom_model_spec.get("command", None),
args=custom_model_spec.get("args", None),
image_pull_policy=custom_model_spec.get("image_pull_policy", None),
working_dir=custom_model_spec.get("working_dir", None),
resources=resources
)
def create_inference_service(metadata, predictor_spec):
"""
Build and return V1beta1InferenceService object.
"""
return V1beta1InferenceService(
api_version=constants.KFSERVING_V1BETA1,
kind=constants.KFSERVING_KIND,
metadata=metadata,
spec=V1beta1InferenceServiceSpec(
predictor=predictor_spec
),
)
def submit_api_request(kfs_client, action, name, isvc, namespace=None,
watch=False, timeout_seconds=300):
"""
Creates or updates a Kubernetes custom object. This code is borrowed from the
KFServingClient.create/patch methods as using those directly doesn't allow for
sending in dicts as the InferenceService object which is needed for supporting passing
in raw InferenceService serialized YAML.
"""
custom_obj_api = kfs_client.api_instance
args = [constants.KFSERVING_GROUP,constants.KFSERVING_V1BETA1_VERSION,
namespace, constants.KFSERVING_PLURAL]
if action == 'update':
outputs = custom_obj_api.patch_namespaced_custom_object(*args, name, isvc)
else:
outputs = custom_obj_api.create_namespaced_custom_object(*args, isvc)
if watch:
# Sleep 3 to avoid status still be True within a very short time.
time.sleep(3)
isvc_watch(
name=outputs['metadata']['name'],
namespace=namespace,
timeout_seconds=timeout_seconds)
else:
return outputs
def perform_action(action, model_name, model_uri, canary_traffic_percent, namespace,
framework, custom_model_spec, service_account, inferenceservice_yaml,
request_timeout, autoscaling_target=0, enable_istio_sidecar=True,
watch_timeout=300, min_replicas=0, max_replicas=0):
"""
Perform the specified action. If the action is not 'delete' and `inferenceService_yaml`
was provided, the dict representation of the YAML will be sent directly to the
Kubernetes API. Otherwise, a V1beta1InferenceService object will be built using the
provided input and then sent for creation/update.
:return InferenceService JSON output
"""
kfs_client = KFServingClient()
if inferenceservice_yaml:
# Overwrite name and namespace if exists
if namespace:
inferenceservice_yaml['metadata']['namespace'] = namespace
if model_name:
inferenceservice_yaml['metadata']['name'] = model_name
else:
model_name = inferenceservice_yaml['metadata']['name']
kfsvc = inferenceservice_yaml
elif action != 'delete':
# Create annotations
annotations = {}
if int(autoscaling_target) != 0:
annotations["autoscaling.knative.dev/target"] = str(autoscaling_target)
if not enable_istio_sidecar:
annotations["sidecar.istio.io/inject"] = 'false'
if not annotations:
annotations = None
metadata = client.V1ObjectMeta(
name=model_name, namespace=namespace, annotations=annotations
)
# If a custom model container spec was provided, build the V1Container
# object using it.
containers = []
if custom_model_spec:
containers = [create_custom_container_spec(custom_model_spec)]
# Build the V1beta1PredictorSpec.
predictor_spec = create_predictor_spec(
framework, model_uri, canary_traffic_percent, service_account,
min_replicas, max_replicas, containers, request_timeout
)
kfsvc = create_inference_service(metadata, predictor_spec)
if action == "create":
submit_api_request(kfs_client, 'create', model_name, kfsvc, namespace,
watch=True, timeout_seconds=watch_timeout)
elif action == "update":
submit_api_request(kfs_client, 'update', model_name, kfsvc, namespace,
watch=True, timeout_seconds=watch_timeout)
elif action == "apply":
try:
submit_api_request(kfs_client, 'create', model_name, kfsvc, namespace,
watch=True, timeout_seconds=watch_timeout)
except Exception:
submit_api_request(kfs_client, 'update', model_name, kfsvc, namespace,
watch=True, timeout_seconds=watch_timeout)
elif action == "delete":
kfs_client.delete(model_name, namespace=namespace)
else:
raise ("Error: No matching action: " + action)
model_status = kfs_client.get(model_name, namespace=namespace)
return model_status
def main():
"""
This parses arguments passed in from the CLI and performs the corresponding action.
"""
parser = argparse.ArgumentParser()
parser.add_argument(
"--action", type=str, help="Action to execute on KFServing", default="create"
)
parser.add_argument(
"--model-name", type=str, help="Name to give to the deployed model"
)
parser.add_argument(
"--model-uri",
type=str,
help="Path of the S3, GCS or PVC directory containing the model",
)
parser.add_argument(
"--canary-traffic-percent",
type=str,
help="The traffic split percentage between the candidate model and the last ready model",
default="100",
)
parser.add_argument(
"--namespace",
type=str,
help="Kubernetes namespace where the KFServing service is deployed",
default="",
)
parser.add_argument(
"--framework",
type=str,
help="Model serving framework to use. Available frameworks: " +
str(list(AVAILABLE_FRAMEWORKS.keys())),
default=""
)
parser.add_argument(
"--custom-model-spec",
type=json.loads,
help="The container spec for a custom model runtime",
default="{}",
)
parser.add_argument(
"--autoscaling-target", type=str, help="Autoscaling target number", default="0"
)
parser.add_argument(
"--service-account",
type=str,
help="Service account containing s3 credentials",
default="",
)
parser.add_argument(
"--enable-istio-sidecar",
type=strtobool,
help="Whether to inject istio sidecar",
default="True"
)
parser.add_argument(
"--inferenceservice-yaml",
type=yaml.safe_load,
help="Raw InferenceService serialized YAML for deployment",
default="{}"
)
parser.add_argument("--output-path", type=str, help="Path to store URI output")
parser.add_argument("--watch-timeout",
type=str,
help="Timeout seconds for watching until InferenceService becomes ready.",
default="300")
parser.add_argument(
"--min-replicas", type=str, help="Minimum number of replicas", default="-1"
)
parser.add_argument(
"--max-replicas", type=str, help="Maximum number of replicas", default="-1"
)
parser.add_argument("--request-timeout",
type=str,
help="Specifies the number of seconds to wait before timing out a request to the component.",
default="60")
args = parser.parse_args()
action = args.action.lower()
model_name = args.model_name
model_uri = args.model_uri
canary_traffic_percent = int(args.canary_traffic_percent)
namespace = args.namespace
framework = args.framework.lower()
output_path = args.output_path
custom_model_spec = args.custom_model_spec
autoscaling_target = int(args.autoscaling_target)
service_account = args.service_account
enable_istio_sidecar = args.enable_istio_sidecar
inferenceservice_yaml = args.inferenceservice_yaml
watch_timeout = int(args.watch_timeout)
min_replicas = int(args.min_replicas)
max_replicas = int(args.max_replicas)
request_timeout = int(args.request_timeout)
# Default the namespace.
if not namespace:
namespace = 'anonymous'
# If no namespace was provided, but one is listed in the YAML, use that.
if inferenceservice_yaml and inferenceservice_yaml.get('metadata', {}).get('namespace'):
namespace = inferenceservice_yaml['metadata']['namespace']
# Only require model name when an Isvc YAML was not provided.
if not inferenceservice_yaml and not model_name:
parser.error('{} argument is required when performing "{}" action'.format(
'model_name', action
))
# If the action isn't a delete, require 'model-uri' and 'framework' only if an Isvc YAML
# or custom model container spec are not provided.
if action != 'delete':
if not inferenceservice_yaml and not custom_model_spec and not (model_uri and framework):
parser.error('Arguments for {} and {} are required when performing "{}" action'.format(
'model_uri', 'framework', action
))
model_status = perform_action(
action=action,
model_name=model_name,
model_uri=model_uri,
canary_traffic_percent=canary_traffic_percent,
namespace=namespace,
framework=framework,
custom_model_spec=custom_model_spec,
autoscaling_target=autoscaling_target,
service_account=service_account,
enable_istio_sidecar=enable_istio_sidecar,
inferenceservice_yaml=inferenceservice_yaml,
request_timeout=request_timeout,
watch_timeout=watch_timeout,
min_replicas=min_replicas,
max_replicas=max_replicas
)
print(model_status)
if action != 'delete':
# Check whether the model is ready
for condition in model_status["status"]["conditions"]:
if condition['type'] == 'Ready':
if condition['status'] == 'True':
print('Model is ready\n')
break
print('Model is timed out, please check the InferenceService events for more details.')
sys.exit(1)
try:
print( model_status["status"]["url"] + " is the Knative domain.")
print("Sample test commands: \n")
# model_status['status']['url'] is like http://flowers-sample.kubeflow.example.com/v1/models/flowers-sample
print("curl -v -X GET %s" % model_status["status"]["url"])
print("\nIf the above URL is not accessible, it's recommended to setup Knative with a configured DNS.\n"\
"https://knative.dev/docs/install/installing-istio/#configuring-dns")
except Exception:
print("Model is not ready, check the logs for the Knative URL status.")
sys.exit(1)
if output_path:
try:
# Remove some less needed fields to reduce output size.
del model_status['metadata']['managedFields']
del model_status['status']['conditions']
if sys.getsizeof(model_status) > 3000:
del model_status['components']['predictor']['address']['url']
del model_status['components']['predictor']['latestCreatedRevision']
del model_status['components']['predictor']['latestReadyRevision']
del model_status['components']['predictor']['latestRolledoutRevision']
del model_status['components']['predictor']['url']
del model_status['spec']
except KeyError:
pass
if not os.path.exists(os.path.dirname(output_path)):
os.makedirs(os.path.dirname(output_path))
with open(output_path, "w") as report:
report.write(json.dumps(model_status, indent=4))
if __name__ == "__main__":
main()
| 523 |
0 | kubeflow_public_repos/pipelines/components/kubeflow | kubeflow_public_repos/pipelines/components/kubeflow/katib-launcher/build_image.sh | #!/bin/bash -e
# Copyright 2020 The Kubeflow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
IMAGE="docker.io/kubeflowkatib/kubeflow-pipelines-launcher"
echo "Releasing image for the Katib Pipeline Launcher..."
echo -e "Image: ${IMAGE}\n"
docker build . -f Dockerfile -t ${IMAGE}
docker push ${IMAGE}
| 524 |
0 | kubeflow_public_repos/pipelines/components/kubeflow | kubeflow_public_repos/pipelines/components/kubeflow/katib-launcher/requirements.txt | kubernetes==10.0.1
kubeflow-katib==0.10.1
| 525 |
0 | kubeflow_public_repos/pipelines/components/kubeflow | kubeflow_public_repos/pipelines/components/kubeflow/katib-launcher/component.yaml | name: Katib - Launch Experiment
description: Katib Experiment launcher
inputs:
- {name: Experiment Name, type: String, default: '', description: 'Experiment name'}
- {name: Experiment Namespace, type: String, default: anonymous, description: 'Experiment namespace'}
- {name: Experiment Spec, type: JsonObject, default: '{}', description: 'Experiment specification in dict format'}
- {name: Experiment Timeout Minutes, type: Integer, default: 1440, description: 'Time in minutes to wait for the Experiment to complete'}
- {name: Delete Finished Experiment, type: Bool, default: 'True', description: 'Whether to delete the Experiment after it is finished'}
outputs:
- {name: Best Parameter Set, type: JsonObject, description: 'The hyperparameter set of the best Experiment Trial'}
implementation:
container:
image: docker.io/kubeflowkatib/kubeflow-pipelines-launcher
command: [python, src/launch_experiment.py]
args: [
--experiment-name, {inputValue: Experiment Name},
--experiment-namespace, {inputValue: Experiment Namespace},
--experiment-spec, {inputValue: Experiment Spec},
--experiment-timeout-minutes, {inputValue: Experiment Timeout Minutes},
--delete-after-done, {inputValue: Delete Finished Experiment},
--output-file, {outputPath: Best Parameter Set},
]
| 526 |
0 | kubeflow_public_repos/pipelines/components/kubeflow | kubeflow_public_repos/pipelines/components/kubeflow/katib-launcher/OWNERS | approvers:
- andreyvelich
- gaocegege
- hougangliu
- johnugeorge
| 527 |
0 | kubeflow_public_repos/pipelines/components/kubeflow | kubeflow_public_repos/pipelines/components/kubeflow/katib-launcher/Dockerfile | # Copyright 2020 The Kubeflow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM python:3.6
ENV APP_HOME /app
WORKDIR ${APP_HOME}
ADD . ${APP_HOME}
RUN pip install --no-cache-dir -r requirements.txt
ENTRYPOINT ["python", "src/launch_experiment.py"]
| 528 |
0 | kubeflow_public_repos/pipelines/components/kubeflow/katib-launcher | kubeflow_public_repos/pipelines/components/kubeflow/katib-launcher/src/__init__.py | # Copyright 2020 The Kubeflow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 529 |
0 | kubeflow_public_repos/pipelines/components/kubeflow/katib-launcher | kubeflow_public_repos/pipelines/components/kubeflow/katib-launcher/src/launch_experiment.py | # Copyright 2020 The Kubeflow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import datetime
from distutils.util import strtobool
import json
import os
import logging
import time
from kubernetes.client import V1ObjectMeta
from kubeflow.katib import KatibClient
from kubeflow.katib import ApiClient
from kubeflow.katib import V1beta1Experiment
logger = logging.getLogger()
logging.basicConfig(level=logging.INFO)
FINISH_CONDITIONS = ["Succeeded", "Failed"]
class JSONObject(object):
""" This class is needed to deserialize input JSON.
Katib API client expects JSON under .data attribute.
"""
def __init__(self, json):
self.data = json
def wait_experiment_finish(katib_client, experiment, timeout):
polling_interval = datetime.timedelta(seconds=30)
end_time = datetime.datetime.now() + datetime.timedelta(minutes=timeout)
experiment_name = experiment.metadata.name
experiment_namespace = experiment.metadata.namespace
while True:
current_status = None
try:
current_status = katib_client.get_experiment_status(name=experiment_name, namespace=experiment_namespace)
except Exception as e:
logger.info("Unable to get current status for the Experiment: {} in namespace: {}. Exception: {}".format(
experiment_name, experiment_namespace, e))
# If Experiment has reached complete condition, exit the loop.
if current_status in FINISH_CONDITIONS:
logger.info("Experiment: {} in namespace: {} has reached the end condition: {}".format(
experiment_name, experiment_namespace, current_status))
return
# Print the current condition.
logger.info("Current condition for Experiment: {} in namespace: {} is: {}".format(
experiment_name, experiment_namespace, current_status))
# If timeout has been reached, rise an exception.
if datetime.datetime.now() > end_time:
raise Exception("Timout waiting for Experiment: {} in namespace: {} "
"to reach one of these conditions: {}".format(
experiment_name, experiment_namespace, FINISH_CONDITIONS))
# Sleep for poll interval.
time.sleep(polling_interval.seconds)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Katib Experiment launcher')
parser.add_argument('--experiment-name', type=str,
help='Experiment name')
parser.add_argument('--experiment-namespace', type=str, default='anonymous',
help='Experiment namespace')
parser.add_argument('--experiment-spec', type=str, default='',
help='Experiment specification')
parser.add_argument('--experiment-timeout-minutes', type=int, default=60*24,
help='Time in minutes to wait for the Experiment to complete')
parser.add_argument('--delete-after-done', type=strtobool, default=True,
help='Whether to delete the Experiment after it is finished')
parser.add_argument('--output-file', type=str, default='/output.txt',
help='The file which stores the best hyperparameters of the Experiment')
args = parser.parse_args()
experiment_name = args.experiment_name
experiment_namespace = args.experiment_namespace
logger.info("Creating Experiment: {} in namespace: {}".format(experiment_name, experiment_namespace))
# Create JSON object from experiment spec
experiment_spec = JSONObject(args.experiment_spec)
# Deserialize JSON to ExperimentSpec
experiment_spec = ApiClient().deserialize(experiment_spec, "V1beta1ExperimentSpec")
# Create Experiment object.
experiment = V1beta1Experiment(
api_version="kubeflow.org/v1beta1",
kind="Experiment",
metadata=V1ObjectMeta(
name=experiment_name,
namespace=experiment_namespace
),
spec=experiment_spec
)
# Create Katib client.
katib_client = KatibClient()
# Create Experiment in Kubernetes cluster.
output = katib_client.create_experiment(experiment, namespace=experiment_namespace)
# Wait until Experiment is created.
end_time = datetime.datetime.now() + datetime.timedelta(minutes=args.experiment_timeout_minutes)
while True:
current_status = None
# Try to get Experiment status.
try:
current_status = katib_client.get_experiment_status(name=experiment_name, namespace=experiment_namespace)
except Exception:
logger.info("Waiting until Experiment is created...")
# If current status is set, exit the loop.
if current_status is not None:
break
# If timeout has been reached, rise an exception.
if datetime.datetime.now() > end_time:
raise Exception("Timout waiting for Experiment: {} in namespace: {} to be created".format(
experiment_name, experiment_namespace))
time.sleep(1)
logger.info("Experiment is created")
# Wait for Experiment finish.
wait_experiment_finish(katib_client, experiment, args.experiment_timeout_minutes)
# Check if Experiment is successful.
if katib_client.is_experiment_succeeded(name=experiment_name, namespace=experiment_namespace):
logger.info("Experiment: {} in namespace: {} is successful".format(
experiment_name, experiment_namespace))
optimal_hp = katib_client.get_optimal_hyperparameters(
name=experiment_name, namespace=experiment_namespace)
logger.info("Optimal hyperparameters:\n{}".format(optimal_hp))
# Create dir if it doesn't exist.
if not os.path.exists(os.path.dirname(args.output_file)):
os.makedirs(os.path.dirname(args.output_file))
# Save HyperParameters to the file.
with open(args.output_file, 'w') as f:
f.write(json.dumps(optimal_hp))
else:
logger.info("Experiment: {} in namespace: {} is failed".format(
experiment_name, experiment_namespace))
# Print Experiment if it is failed.
experiment = katib_client.get_experiment(name=experiment_name, namespace=experiment_namespace)
logger.info(experiment)
# Delete Experiment if it is needed.
if args.delete_after_done:
katib_client.delete_experiment(name=experiment_name, namespace=experiment_namespace)
logger.info("Experiment: {} in namespace: {} has been deleted".format(
experiment_name, experiment_namespace))
| 530 |
0 | kubeflow_public_repos/pipelines/components/kubeflow | kubeflow_public_repos/pipelines/components/kubeflow/dnntrainer/build_image.sh | #!/bin/bash -e
# Copyright 2018 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
while getopts ":hp:t:i:b:l:" opt; do
case "${opt}" in
h) echo "-p: project name"
echo "-t: tag name"
echo "-i: image name. If provided, project name and tag name are not necessary"
echo "-b: tensorflow base image tag. Optional. The value can be tags listed under \
https://hub.docker.com/r/tensorflow/tensorflow/tags. Defaults to '2.3.0'."
echo "-l: local image name. Optional. Defaults to 'ml-pipeline-kubeflow-tf-trainer'"
exit
;;
p) PROJECT_ID=${OPTARG}
;;
t) TAG_NAME=${OPTARG}
;;
i) IMAGE_NAME=${OPTARG}
;;
b) TF_BASE_TAG=${OPTARG}
;;
l) LOCAL_IMAGE_NAME=${OPTARG}
;;
\? ) echo "Usage: cmd [-p] project [-t] tag [-i] image [-b] base image tag [l] local image"
exit
;;
esac
done
set -x
if [ -z "${LOCAL_IMAGE_NAME}" ]; then
LOCAL_IMAGE_NAME=ml-pipeline-kubeflow-tf-trainer
fi
if [ -z "${PROJECT_ID}" ]; then
PROJECT_ID=$(gcloud config config-helper --format "value(configuration.properties.core.project)")
fi
if [ -z "${TAG_NAME}" ]; then
TAG_NAME=$(date +v%Y%m%d)-$(git describe --tags --always --dirty)-$(git diff | shasum -a256 | cut -c -6)
fi
if [ -z "${TF_BASE_TAG}" ]; then
TF_BASE_TAG=2.3.0
fi
mkdir -p ./build
cp -R ./src/ ./build/
docker build --build-arg TF_TAG=${TF_BASE_TAG} -t ${LOCAL_IMAGE_NAME} .
if [ -z "${IMAGE_NAME}" ]; then
docker tag ${LOCAL_IMAGE_NAME} gcr.io/${PROJECT_ID}/${LOCAL_IMAGE_NAME}:${TAG_NAME}
docker push gcr.io/${PROJECT_ID}/${LOCAL_IMAGE_NAME}:${TAG_NAME}
else
docker tag ${LOCAL_IMAGE_NAME} "${IMAGE_NAME}"
docker push "${IMAGE_NAME}"
fi
rm -rf ./build
| 531 |
0 | kubeflow_public_repos/pipelines/components/kubeflow | kubeflow_public_repos/pipelines/components/kubeflow/dnntrainer/requirements.txt | pyyaml==3.12
six==1.11.0
tensorflow-transform==0.23.0
tensorflow-model-analysis==0.23.0
| 532 |
0 | kubeflow_public_repos/pipelines/components/kubeflow | kubeflow_public_repos/pipelines/components/kubeflow/dnntrainer/component.yaml | name: Train FC DNN using TF
description: Trains fully-connected neural network using Tensorflow
inputs:
- {name: Transformed data dir, type: GCSPath, description: 'GCS path containing tf-transformed training and eval data.'} # type: {GCSPath: {path_type: Directory}}
- {name: Schema, type: GCSPath, description: 'GCS json schema file path.'} # type: {GCSPath: {data_type: JSON}}
- {name: Learning rate, type: Float, default: '0.1', description: 'Learning rate for training.'}
- {name: Optimizer, type: String, default: 'Adagrad', description: 'Optimizer for training. Valid values are: Adam, SGD, Adagrad. If not provided, tf.estimator default will be used.'}
- {name: Hidden layer size, type: String, default: '100', description: 'Comma-separated hidden layer sizes. For example "200,100,50".'}
- {name: Steps, type: Integer, description: 'Maximum number of training steps to perform. If unspecified, will honor epochs.'}
#- {name: Epochs, type: Integer, default: '', description: 'Maximum number of training data epochs on which to train. If both "steps" and "epochs" are specified, the training job will run for "steps" or "epochs", whichever occurs first.'}
- {name: Target, type: String, description: 'Name of the column for prediction target.'}
- {name: Preprocessing module, type: GCSPath, default: '', description: 'GCS path to a python file defining "preprocess" and "get_feature_columns" functions.'} # type: {GCSPath: {data_type: Python}}
- {name: Training output dir, type: GCSPath, description: 'GCS or local directory.'} # type: {GCSPath: {path_type: Directory}}
outputs:
- {name: Training output dir, type: GCSPath, description: 'GCS or local directory.'} # type: {GCSPath: {path_type: Directory}}
- {name: MLPipeline UI metadata, type: UI metadata}
implementation:
container:
image: gcr.io/ml-pipeline/ml-pipeline-kubeflow-tf-trainer:1.8.0-alpha.0
command: [python2, -m, trainer.task]
args: [
--transformed-data-dir, {inputValue: Transformed data dir},
--schema, {inputValue: Schema},
--learning-rate, {inputValue: Learning rate},
--optimizer, {inputValue: Optimizer},
--hidden-layer-size, {inputValue: Hidden layer size},
--steps, {inputValue: Steps},
# --epochs, {inputValue: Epochs},
--target, {inputValue: Target},
--preprocessing-module, {inputValue: Preprocessing module},
--job-dir, {inputValue: Training output dir},
--exported-model-dir-uri-output-path, {outputPath: Training output dir},
--ui-metadata-output-path, {outputPath: MLPipeline UI metadata},
]
| 533 |
0 | kubeflow_public_repos/pipelines/components/kubeflow | kubeflow_public_repos/pipelines/components/kubeflow/dnntrainer/Dockerfile | # Copyright 2018 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
ARG TF_TAG
FROM tensorflow/tensorflow:$TF_TAG
COPY requirements.txt .
RUN python3 -m pip install -r \
requirements.txt --quiet --no-cache-dir \
&& rm -f requirements.txt
ADD build /ml
WORKDIR /ml
ENTRYPOINT ["python", "-m", "trainer.task"]
| 534 |
0 | kubeflow_public_repos/pipelines/components/kubeflow/dnntrainer | kubeflow_public_repos/pipelines/components/kubeflow/dnntrainer/src/__init__.py | # Copyright 2018 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 535 |
0 | kubeflow_public_repos/pipelines/components/kubeflow/dnntrainer | kubeflow_public_repos/pipelines/components/kubeflow/dnntrainer/src/setup.py | # Copyright 2018 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup, find_packages
setup(
name='trainer',
version='1.0.0',
packages=find_packages(),
description='DNN Trainer',
author='Google',
keywords=[
],
license="Apache Software License",
long_description="""
""",
install_requires=[
'tensorflow==1.15.4',
],
package_data={
},
data_files=[],
)
| 536 |
0 | kubeflow_public_repos/pipelines/components/kubeflow/dnntrainer/src | kubeflow_public_repos/pipelines/components/kubeflow/dnntrainer/src/trainer/__init__.py | # Copyright 2018 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 537 |
0 | kubeflow_public_repos/pipelines/components/kubeflow/dnntrainer/src | kubeflow_public_repos/pipelines/components/kubeflow/dnntrainer/src/trainer/task.py | # Copyright 2018 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import os
from pathlib import Path
import tensorflow as tf
import tensorflow_transform as tft
import tensorflow_model_analysis as tfma
from tensorflow.python.lib.io import file_io
from tensorflow_transform.beam.tft_beam_io import transform_fn_io
from tensorflow_transform.saved import input_fn_maker
from tensorflow_transform.saved import saved_transform_io
from tensorflow_transform.tf_metadata import dataset_metadata
from tensorflow_transform.tf_metadata import dataset_schema
from tensorflow_transform.tf_metadata import metadata_io
IMAGE_EMBEDDING_SIZE = 2048
CLASSIFICATION_TARGET_TYPES = [tf.bool, tf.int32, tf.int64]
REGRESSION_TARGET_TYPES = [tf.float32, tf.float64]
TARGET_TYPES = CLASSIFICATION_TARGET_TYPES + REGRESSION_TARGET_TYPES
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('--job-dir',
type=str,
required=True,
help='GCS or local directory.')
parser.add_argument('--transformed-data-dir',
type=str,
required=True,
help='GCS path containing tf-transformed training and eval data.')
parser.add_argument('--schema',
type=str,
required=True,
help='GCS json schema file path.')
parser.add_argument('--target',
type=str,
required=True,
help='The name of the column to predict in training data.')
parser.add_argument('--learning-rate',
type=float,
default=0.1,
help='Learning rate for training.')
parser.add_argument('--optimizer',
choices=['Adam', 'SGD', 'Adagrad'],
default='Adagrad',
help='Optimizer for training. If not provided, '
'tf.estimator default will be used.')
parser.add_argument('--hidden-layer-size',
type=str,
default='100',
help='comma separated hidden layer sizes. For example "200,100,50".')
parser.add_argument('--steps',
type=int,
help='Maximum number of training steps to perform. If unspecified, will '
'honor epochs.')
parser.add_argument('--epochs',
type=int,
help='Maximum number of training data epochs on which to train. If '
'both "steps" and "epochs" are specified, the training '
'job will run for "steps" or "epochs", whichever occurs first.')
parser.add_argument('--preprocessing-module',
type=str,
required=False,
help=('GCS path to a python file defining '
'"preprocess" and "get_feature_columns" functions.'))
parser.add_argument('--exported-model-dir-uri-output-path',
type=str,
default='/output.txt',
help='Local output path for the file containing exported model directory URI.')
parser.add_argument('--ui-metadata-output-path',
type=str,
default='/mlpipeline-ui-metadata.json',
help='Local output path for the file containing UI metadata JSON structure.')
args = parser.parse_args()
args.hidden_layer_size = [int(x.strip()) for x in args.hidden_layer_size.split(',')]
return args
def is_classification(transformed_data_dir, target):
"""Whether the scenario is classification (vs regression).
Returns:
The number of classes if the target represents a classification
problem, or None if it does not.
"""
transformed_metadata = metadata_io.read_metadata(
os.path.join(transformed_data_dir, transform_fn_io.TRANSFORMED_METADATA_DIR))
transformed_feature_spec = transformed_metadata.schema.as_feature_spec()
if target not in transformed_feature_spec:
raise ValueError('Cannot find target "%s" in transformed data.' % target)
feature = transformed_feature_spec[target]
if (not isinstance(feature, tf.FixedLenFeature) or feature.shape != [] or
feature.dtype not in TARGET_TYPES):
raise ValueError('target "%s" is of invalid type.' % target)
if feature.dtype in CLASSIFICATION_TARGET_TYPES:
if feature.dtype == tf.bool:
return 2
return get_vocab_size(transformed_data_dir, target)
return None
def make_tft_input_metadata(schema):
"""Create tf-transform metadata from given schema."""
tft_schema = {}
for col_schema in schema:
col_type = col_schema['type']
col_name = col_schema['name']
if col_type == 'NUMBER':
tft_schema[col_name] = dataset_schema.ColumnSchema(
tf.float32, [], dataset_schema.FixedColumnRepresentation(default_value=0.0))
elif col_type in ['CATEGORY', 'TEXT', 'IMAGE_URL', 'KEY']:
tft_schema[col_name] = dataset_schema.ColumnSchema(
tf.string, [], dataset_schema.FixedColumnRepresentation(default_value=''))
return dataset_metadata.DatasetMetadata(dataset_schema.Schema(tft_schema))
def make_training_input_fn(transformed_data_dir, mode, batch_size, target_name, num_epochs=None):
"""Creates an input function reading from transformed data.
Args:
transformed_data_dir: Directory to read transformed data and metadata from.
mode: 'train' or 'eval'.
batch_size: Batch size.
target_name: name of the target column.
num_epochs: number of training data epochs.
Returns:
The input function for training or eval.
"""
transformed_metadata = metadata_io.read_metadata(
os.path.join(transformed_data_dir, transform_fn_io.TRANSFORMED_METADATA_DIR))
transformed_feature_spec = transformed_metadata.schema.as_feature_spec()
def _input_fn():
"""Input function for training and eval."""
epochs = 1 if mode == 'eval' else num_epochs
transformed_features = tf.contrib.learn.io.read_batch_features(
os.path.join(transformed_data_dir, mode + '-*'),
batch_size, transformed_feature_spec, tf.TFRecordReader, num_epochs=epochs)
# Extract features and label from the transformed tensors.
transformed_labels = transformed_features.pop(target_name)
return transformed_features, transformed_labels
return _input_fn
def make_serving_input_fn(transformed_data_dir, schema, target_name):
"""Creates an input function reading from transformed data.
Args:
transformed_data_dir: Directory to read transformed data and metadata from.
schema: the raw data schema.
target_name: name of the target column.
Returns:
The input function for serving.
"""
raw_metadata = make_tft_input_metadata(schema)
raw_feature_spec = raw_metadata.schema.as_feature_spec()
raw_keys = [x['name'] for x in schema]
raw_keys.remove(target_name)
serving_input_fn = input_fn_maker.build_csv_transforming_serving_input_receiver_fn(
raw_metadata=raw_metadata,
transform_savedmodel_dir=transformed_data_dir + '/transform_fn',
raw_keys=raw_keys)
return serving_input_fn
def get_vocab_size(transformed_data_dir, feature_name):
"""Get vocab size of a given text or category column."""
vocab_file = os.path.join(transformed_data_dir,
transform_fn_io.TRANSFORM_FN_DIR,
'assets',
'vocab_' + feature_name)
with file_io.FileIO(vocab_file, 'r') as f:
return sum(1 for _ in f)
def build_feature_columns(schema, transformed_data_dir, target):
"""Build feature columns that tf.estimator expects."""
feature_columns = []
for entry in schema:
name = entry['name']
datatype = entry['type']
if name == target:
continue
if datatype == 'NUMBER':
feature_columns.append(tf.feature_column.numeric_column(name, shape=()))
elif datatype == 'IMAGE_URL':
feature_columns.append(tf.feature_column.numeric_column(name, shape=(2048)))
elif datatype == 'CATEGORY':
vocab_size = get_vocab_size(transformed_data_dir, name)
category_column = tf.feature_column.categorical_column_with_identity(name, num_buckets=vocab_size)
indicator_column = tf.feature_column.indicator_column(category_column)
feature_columns.append(indicator_column)
elif datatype == 'TEXT':
vocab_size = get_vocab_size(transformed_data_dir, name)
indices_column = tf.feature_column.categorical_column_with_identity(name + '_indices', num_buckets=vocab_size + 1)
weighted_column = tf.feature_column.weighted_categorical_column(indices_column, name + '_weights')
indicator_column = tf.feature_column.indicator_column(weighted_column)
feature_columns.append(indicator_column)
return feature_columns
def get_estimator(schema, transformed_data_dir, target_name, output_dir, hidden_units,
optimizer, learning_rate, feature_columns):
"""Get proper tf.estimator (DNNClassifier or DNNRegressor)."""
optimizer = tf.train.AdagradOptimizer(learning_rate)
if optimizer == 'Adam':
optimizer = tf.train.AdamOptimizer(learning_rate)
elif optimizer == 'SGD':
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
# Set how often to run checkpointing in terms of steps.
config = tf.contrib.learn.RunConfig(save_checkpoints_steps=1000)
n_classes = is_classification(transformed_data_dir, target_name)
if n_classes:
estimator = tf.estimator.DNNClassifier(
feature_columns=feature_columns,
hidden_units=hidden_units,
n_classes=n_classes,
config=config,
model_dir=output_dir)
else:
estimator = tf.estimator.DNNRegressor(
feature_columns=feature_columns,
hidden_units=hidden_units,
config=config,
model_dir=output_dir,
optimizer=optimizer)
return estimator
def eval_input_receiver_fn(tf_transform_dir, schema, target):
"""Build everything needed for the tf-model-analysis to run the model.
Args:
tf_transform_dir: directory in which the tf-transform model was written
during the preprocessing step.
schema: the raw data schema.
target: name of the target column.
Returns:
EvalInputReceiver function, which contains:
- Tensorflow graph which parses raw untranformed features, applies the
tf-transform preprocessing operators.
- Set of raw, untransformed features.
- Label against which predictions will be compared.
"""
raw_metadata = make_tft_input_metadata(schema)
raw_feature_spec = raw_metadata.schema.as_feature_spec()
serialized_tf_example = tf.placeholder(
dtype=tf.string, shape=[None], name='input_example_tensor')
features = tf.parse_example(serialized_tf_example, raw_feature_spec)
_, transformed_features = (
saved_transform_io.partially_apply_saved_transform(
os.path.join(tf_transform_dir, transform_fn_io.TRANSFORM_FN_DIR),
features))
receiver_tensors = {'examples': serialized_tf_example}
return tfma.export.EvalInputReceiver(
features=transformed_features,
receiver_tensors=receiver_tensors,
labels=transformed_features[target])
def main():
# configure the TF_CONFIG such that the tensorflow recoginzes the MASTER in the yaml file as the chief.
# TODO: kubeflow is working on fixing the problem and this TF_CONFIG can be
# removed then.
args = parse_arguments()
tf.logging.set_verbosity(tf.logging.INFO)
schema = json.loads(file_io.read_file_to_string(args.schema))
feature_columns = None
if args.preprocessing_module:
module_dir = os.path.abspath(os.path.dirname(__file__))
preprocessing_module_path = os.path.join(module_dir, 'preprocessing.py')
with open(preprocessing_module_path, 'w+') as preprocessing_file:
preprocessing_file.write(
file_io.read_file_to_string(args.preprocessing_module))
import preprocessing
feature_columns = preprocessing.get_feature_columns(args.transformed_data_dir)
else:
feature_columns = build_feature_columns(schema, args.transformed_data_dir, args.target)
estimator = get_estimator(schema, args.transformed_data_dir, args.target, args.job_dir,
args.hidden_layer_size, args.optimizer, args.learning_rate,
feature_columns)
# TODO: Expose batch size.
train_input_fn = make_training_input_fn(
args.transformed_data_dir,
'train',
32,
args.target,
num_epochs=args.epochs)
eval_input_fn = make_training_input_fn(
args.transformed_data_dir,
'eval',
32,
args.target)
serving_input_fn = make_serving_input_fn(
args.transformed_data_dir,
schema,
args.target)
exporter = tf.estimator.FinalExporter('export', serving_input_fn)
train_spec = tf.estimator.TrainSpec(input_fn=train_input_fn, max_steps=args.steps)
eval_spec = tf.estimator.EvalSpec(input_fn=eval_input_fn, exporters=[exporter])
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
eval_model_dir = os.path.join(args.job_dir, 'tfma_eval_model_dir')
tfma.export.export_eval_savedmodel(
estimator=estimator,
export_dir_base=eval_model_dir,
eval_input_receiver_fn=(
lambda: eval_input_receiver_fn(
args.transformed_data_dir, schema, args.target)))
metadata = {
'outputs' : [{
'type': 'tensorboard',
'source': args.job_dir,
}]
}
Path(args.ui_metadata_output_path).parent.mkdir(parents=True, exist_ok=True)
Path(args.ui_metadata_output_path).write_text(json.dumps(metadata))
Path(args.exported_model_dir_uri_output_path).parent.mkdir(parents=True, exist_ok=True)
Path(args.exported_model_dir_uri_output_path).write_text(args.job_dir)
if __name__ == '__main__':
main()
| 538 |
0 | kubeflow_public_repos/pipelines/components/contrib/presto | kubeflow_public_repos/pipelines/components/contrib/presto/query/requirements.txt | pyhive[presto]
| 539 |
0 | kubeflow_public_repos/pipelines/components/contrib/presto | kubeflow_public_repos/pipelines/components/contrib/presto/query/component.yaml | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
name: Presto Query
description: |
A Kubeflow Pipeline component to submit a query to Presto.
inputs:
- name: host
type: String
description: 'Presto Host.'
- name: catalog
type: String
description: 'The name of the catalog.'
- name: schema
type: String
description: 'The name of the schema.'
- name: query
type: String
description: 'The SQL query statements to be executed in Presto'
- name: user
type: String
description: 'The user of the Presto.'
- name: pwd
type: String
description: 'The password of the Presto.'
- name: output
description: 'The path or name of the emitted output.'
outputs:
- name: output
description: 'The path or name of the emitted output.'
implementation:
container:
image: docker.io/mkavi/kubeflow-pipeline-presto:latest
command: [
python3, /pipelines/component/src/program.py,
--host, {inputValue: host},
--catalog, {inputValue: catalog},
--schema, {inputValue: schema},
--query, {inputValue: query},
--user, {inputValue: user},
--pwd, {inputValue: pwd},
--output, {inputValue: output}
]
fileOutputs:
output: /output.txt
| 540 |
0 | kubeflow_public_repos/pipelines/components/contrib/presto | kubeflow_public_repos/pipelines/components/contrib/presto/query/Dockerfile | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM python:3.7
COPY requirements.txt .
RUN python3 -m pip install -r \
requirements.txt --quiet --no-cache-dir \
&& rm -f requirements.txt
COPY ./src /pipelines/component/src
| 541 |
0 | kubeflow_public_repos/pipelines/components/contrib/presto/query | kubeflow_public_repos/pipelines/components/contrib/presto/query/src/program.py | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from pyhive import presto
def get_conn(host=None, catalog=None, schema=None, user=None, pwd=None):
conn = presto.connect(
host=host,
port=443,
protocol="https",
catalog=catalog,
schema=schema,
username=user,
password=pwd,
)
return conn
def query(conn, query):
cursor = conn.cursor()
cursor.execute(query)
cursor.fetchall()
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--host", type=str, help="Presto Host.")
parser.add_argument(
"--catalog", type=str, required=True, help="The name of the catalog."
)
parser.add_argument(
"--schema", type=str, required=True, help="The name of the schema."
)
parser.add_argument(
"--query",
type=str,
required=True,
help="The SQL query statements to be executed in Presto.",
)
parser.add_argument(
"--user", type=str, required=True, help="The user of the Presto."
)
parser.add_argument(
"--pwd", type=str, required=True, help="The password of the Presto."
)
parser.add_argument(
"--output",
type=str,
required=True,
help="The path or name of the emitted output.",
)
args = parser.parse_args()
conn = get_conn(args.host, args.catalog, args.schema, args.user, args.pwd)
query(conn, args.query)
with open("/output.txt", "w+") as w:
w.write(args.output)
if __name__ == "__main__":
main()
| 542 |
0 | kubeflow_public_repos/pipelines/components/contrib/git | kubeflow_public_repos/pipelines/components/contrib/git/clone/component.yaml | name: Git clone
description: Creates a shallow clone of the specified repo branch
metadata:
annotations:
author: Alexey Volkov <[email protected]>
canonical_location: 'https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/git/clone/component.yaml'
volatile_component: "true"
inputs:
- {name: Repo URI, type: URI}
- {name: Branch, type: String, default: master}
outputs:
- {name: Repo dir, type: Directory}
implementation:
container:
image: alpine/git
command:
- git
- clone
- --depth=1
- --branch
- inputValue: Branch
- inputValue: Repo URI
- outputPath: Repo dir
| 543 |
0 | kubeflow_public_repos/pipelines/components/contrib/XGBoost | kubeflow_public_repos/pipelines/components/contrib/XGBoost/Train/component.py | from kfp.components import InputPath, OutputPath, create_component_from_func
def xgboost_train(
training_data_path: InputPath('CSV'), # Also supports LibSVM
model_path: OutputPath('XGBoostModel'),
model_config_path: OutputPath('XGBoostModelConfig'),
starting_model_path: InputPath('XGBoostModel') = None,
label_column: int = 0,
num_iterations: int = 10,
booster_params: dict = None,
# Booster parameters
objective: str = 'reg:squarederror',
booster: str = 'gbtree',
learning_rate: float = 0.3,
min_split_loss: float = 0,
max_depth: int = 6,
):
'''Train an XGBoost model.
Args:
training_data_path: Path for the training data in CSV format.
model_path: Output path for the trained model in binary XGBoost format.
model_config_path: Output path for the internal parameter configuration of Booster as a JSON string.
starting_model_path: Path for the existing trained model to start from.
label_column: Column containing the label data.
num_boost_rounds: Number of boosting iterations.
booster_params: Parameters for the booster. See https://xgboost.readthedocs.io/en/latest/parameter.html
objective: The learning task and the corresponding learning objective.
See https://xgboost.readthedocs.io/en/latest/parameter.html#learning-task-parameters
The most common values are:
"reg:squarederror" - Regression with squared loss (default).
"reg:logistic" - Logistic regression.
"binary:logistic" - Logistic regression for binary classification, output probability.
"binary:logitraw" - Logistic regression for binary classification, output score before logistic transformation
"rank:pairwise" - Use LambdaMART to perform pairwise ranking where the pairwise loss is minimized
"rank:ndcg" - Use LambdaMART to perform list-wise ranking where Normalized Discounted Cumulative Gain (NDCG) is maximized
Annotations:
author: Alexey Volkov <[email protected]>
'''
import pandas
import xgboost
df = pandas.read_csv(
training_data_path,
)
training_data = xgboost.DMatrix(
data=df.drop(columns=[df.columns[label_column]]),
label=df[df.columns[label_column]],
)
booster_params = booster_params or {}
booster_params.setdefault('objective', objective)
booster_params.setdefault('booster', booster)
booster_params.setdefault('learning_rate', learning_rate)
booster_params.setdefault('min_split_loss', min_split_loss)
booster_params.setdefault('max_depth', max_depth)
starting_model = None
if starting_model_path:
starting_model = xgboost.Booster(model_file=starting_model_path)
model = xgboost.train(
params=booster_params,
dtrain=training_data,
num_boost_round=num_iterations,
xgb_model=starting_model
)
# Saving the model in binary format
model.save_model(model_path)
model_config_str = model.save_config()
with open(model_config_path, 'w') as model_config_file:
model_config_file.write(model_config_str)
if __name__ == '__main__':
create_component_from_func(
xgboost_train,
output_component_file='component.yaml',
base_image='python:3.7',
packages_to_install=[
'xgboost==1.1.1',
'pandas==1.0.5',
],
annotations={
"author": "Alexey Volkov <[email protected]>",
"canonical_location": "https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/XGBoost/Train/component.yaml",
},
)
| 544 |
0 | kubeflow_public_repos/pipelines/components/contrib/XGBoost | kubeflow_public_repos/pipelines/components/contrib/XGBoost/Train/component.yaml | name: Xgboost train
description: |-
Train an XGBoost model.
Args:
training_data_path: Path for the training data in CSV format.
model_path: Output path for the trained model in binary XGBoost format.
model_config_path: Output path for the internal parameter configuration of Booster as a JSON string.
starting_model_path: Path for the existing trained model to start from.
label_column: Column containing the label data.
num_boost_rounds: Number of boosting iterations.
booster_params: Parameters for the booster. See https://xgboost.readthedocs.io/en/latest/parameter.html
objective: The learning task and the corresponding learning objective.
See https://xgboost.readthedocs.io/en/latest/parameter.html#learning-task-parameters
The most common values are:
"reg:squarederror" - Regression with squared loss (default).
"reg:logistic" - Logistic regression.
"binary:logistic" - Logistic regression for binary classification, output probability.
"binary:logitraw" - Logistic regression for binary classification, output score before logistic transformation
"rank:pairwise" - Use LambdaMART to perform pairwise ranking where the pairwise loss is minimized
"rank:ndcg" - Use LambdaMART to perform list-wise ranking where Normalized Discounted Cumulative Gain (NDCG) is maximized
Annotations:
author: Alexey Volkov <[email protected]>
inputs:
- {name: training_data, type: CSV}
- {name: starting_model, type: XGBoostModel, optional: true}
- {name: label_column, type: Integer, default: '0', optional: true}
- {name: num_iterations, type: Integer, default: '10', optional: true}
- {name: booster_params, type: JsonObject, optional: true}
- {name: objective, type: String, default: 'reg:squarederror', optional: true}
- {name: booster, type: String, default: gbtree, optional: true}
- {name: learning_rate, type: Float, default: '0.3', optional: true}
- {name: min_split_loss, type: Float, default: '0', optional: true}
- {name: max_depth, type: Integer, default: '6', optional: true}
outputs:
- {name: model, type: XGBoostModel}
- {name: model_config, type: XGBoostModelConfig}
metadata:
annotations:
author: Alexey Volkov <[email protected]>
canonical_location: 'https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/XGBoost/Train/component.yaml'
implementation:
container:
image: python:3.7
command:
- sh
- -c
- (PIP_DISABLE_PIP_VERSION_CHECK=1 python3 -m pip install --quiet --no-warn-script-location
'xgboost==1.1.1' 'pandas==1.0.5' || PIP_DISABLE_PIP_VERSION_CHECK=1 python3
-m pip install --quiet --no-warn-script-location 'xgboost==1.1.1' 'pandas==1.0.5'
--user) && "$0" "$@"
- python3
- -u
- -c
- |
def _make_parent_dirs_and_return_path(file_path: str):
import os
os.makedirs(os.path.dirname(file_path), exist_ok=True)
return file_path
def xgboost_train(
training_data_path, # Also supports LibSVM
model_path,
model_config_path,
starting_model_path = None,
label_column = 0,
num_iterations = 10,
booster_params = None,
# Booster parameters
objective = 'reg:squarederror',
booster = 'gbtree',
learning_rate = 0.3,
min_split_loss = 0,
max_depth = 6,
):
'''Train an XGBoost model.
Args:
training_data_path: Path for the training data in CSV format.
model_path: Output path for the trained model in binary XGBoost format.
model_config_path: Output path for the internal parameter configuration of Booster as a JSON string.
starting_model_path: Path for the existing trained model to start from.
label_column: Column containing the label data.
num_boost_rounds: Number of boosting iterations.
booster_params: Parameters for the booster. See https://xgboost.readthedocs.io/en/latest/parameter.html
objective: The learning task and the corresponding learning objective.
See https://xgboost.readthedocs.io/en/latest/parameter.html#learning-task-parameters
The most common values are:
"reg:squarederror" - Regression with squared loss (default).
"reg:logistic" - Logistic regression.
"binary:logistic" - Logistic regression for binary classification, output probability.
"binary:logitraw" - Logistic regression for binary classification, output score before logistic transformation
"rank:pairwise" - Use LambdaMART to perform pairwise ranking where the pairwise loss is minimized
"rank:ndcg" - Use LambdaMART to perform list-wise ranking where Normalized Discounted Cumulative Gain (NDCG) is maximized
Annotations:
author: Alexey Volkov <[email protected]>
'''
import pandas
import xgboost
df = pandas.read_csv(
training_data_path,
)
training_data = xgboost.DMatrix(
data=df.drop(columns=[df.columns[label_column]]),
label=df[df.columns[label_column]],
)
booster_params = booster_params or {}
booster_params.setdefault('objective', objective)
booster_params.setdefault('booster', booster)
booster_params.setdefault('learning_rate', learning_rate)
booster_params.setdefault('min_split_loss', min_split_loss)
booster_params.setdefault('max_depth', max_depth)
starting_model = None
if starting_model_path:
starting_model = xgboost.Booster(model_file=starting_model_path)
model = xgboost.train(
params=booster_params,
dtrain=training_data,
num_boost_round=num_iterations,
xgb_model=starting_model
)
# Saving the model in binary format
model.save_model(model_path)
model_config_str = model.save_config()
with open(model_config_path, 'w') as model_config_file:
model_config_file.write(model_config_str)
import json
import argparse
_parser = argparse.ArgumentParser(prog='Xgboost train', description='Train an XGBoost model.\n\n Args:\n training_data_path: Path for the training data in CSV format.\n model_path: Output path for the trained model in binary XGBoost format.\n model_config_path: Output path for the internal parameter configuration of Booster as a JSON string.\n starting_model_path: Path for the existing trained model to start from.\n label_column: Column containing the label data.\n num_boost_rounds: Number of boosting iterations.\n booster_params: Parameters for the booster. See https://xgboost.readthedocs.io/en/latest/parameter.html\n objective: The learning task and the corresponding learning objective.\n See https://xgboost.readthedocs.io/en/latest/parameter.html#learning-task-parameters\n The most common values are:\n "reg:squarederror" - Regression with squared loss (default).\n "reg:logistic" - Logistic regression.\n "binary:logistic" - Logistic regression for binary classification, output probability.\n "binary:logitraw" - Logistic regression for binary classification, output score before logistic transformation\n "rank:pairwise" - Use LambdaMART to perform pairwise ranking where the pairwise loss is minimized\n "rank:ndcg" - Use LambdaMART to perform list-wise ranking where Normalized Discounted Cumulative Gain (NDCG) is maximized\n\n Annotations:\n author: Alexey Volkov <[email protected]>')
_parser.add_argument("--training-data", dest="training_data_path", type=str, required=True, default=argparse.SUPPRESS)
_parser.add_argument("--starting-model", dest="starting_model_path", type=str, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--label-column", dest="label_column", type=int, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--num-iterations", dest="num_iterations", type=int, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--booster-params", dest="booster_params", type=json.loads, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--objective", dest="objective", type=str, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--booster", dest="booster", type=str, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--learning-rate", dest="learning_rate", type=float, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--min-split-loss", dest="min_split_loss", type=float, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--max-depth", dest="max_depth", type=int, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--model", dest="model_path", type=_make_parent_dirs_and_return_path, required=True, default=argparse.SUPPRESS)
_parser.add_argument("--model-config", dest="model_config_path", type=_make_parent_dirs_and_return_path, required=True, default=argparse.SUPPRESS)
_parsed_args = vars(_parser.parse_args())
_outputs = xgboost_train(**_parsed_args)
args:
- --training-data
- {inputPath: training_data}
- if:
cond: {isPresent: starting_model}
then:
- --starting-model
- {inputPath: starting_model}
- if:
cond: {isPresent: label_column}
then:
- --label-column
- {inputValue: label_column}
- if:
cond: {isPresent: num_iterations}
then:
- --num-iterations
- {inputValue: num_iterations}
- if:
cond: {isPresent: booster_params}
then:
- --booster-params
- {inputValue: booster_params}
- if:
cond: {isPresent: objective}
then:
- --objective
- {inputValue: objective}
- if:
cond: {isPresent: booster}
then:
- --booster
- {inputValue: booster}
- if:
cond: {isPresent: learning_rate}
then:
- --learning-rate
- {inputValue: learning_rate}
- if:
cond: {isPresent: min_split_loss}
then:
- --min-split-loss
- {inputValue: min_split_loss}
- if:
cond: {isPresent: max_depth}
then:
- --max-depth
- {inputValue: max_depth}
- --model
- {outputPath: model}
- --model-config
- {outputPath: model_config}
| 545 |
0 | kubeflow_public_repos/pipelines/components/contrib/XGBoost/Train | kubeflow_public_repos/pipelines/components/contrib/XGBoost/Train/from_ApacheParquet/component.py | from kfp.components import InputPath, OutputPath, create_component_from_func
def xgboost_train(
training_data_path: InputPath('ApacheParquet'),
model_path: OutputPath('XGBoostModel'),
model_config_path: OutputPath('XGBoostModelConfig'),
label_column_name: str,
starting_model_path: InputPath('XGBoostModel') = None,
num_iterations: int = 10,
booster_params: dict = None,
# Booster parameters
objective: str = 'reg:squarederror',
booster: str = 'gbtree',
learning_rate: float = 0.3,
min_split_loss: float = 0,
max_depth: int = 6,
):
'''Train an XGBoost model.
Args:
training_data_path: Path for the training data in Apache Parquet format.
model_path: Output path for the trained model in binary XGBoost format.
model_config_path: Output path for the internal parameter configuration of Booster as a JSON string.
starting_model_path: Path for the existing trained model to start from.
label_column_name: Name of the column containing the label data.
num_boost_rounds: Number of boosting iterations.
booster_params: Parameters for the booster. See https://xgboost.readthedocs.io/en/latest/parameter.html
objective: The learning task and the corresponding learning objective.
See https://xgboost.readthedocs.io/en/latest/parameter.html#learning-task-parameters
The most common values are:
"reg:squarederror" - Regression with squared loss (default).
"reg:logistic" - Logistic regression.
"binary:logistic" - Logistic regression for binary classification, output probability.
"binary:logitraw" - Logistic regression for binary classification, output score before logistic transformation
"rank:pairwise" - Use LambdaMART to perform pairwise ranking where the pairwise loss is minimized
"rank:ndcg" - Use LambdaMART to perform list-wise ranking where Normalized Discounted Cumulative Gain (NDCG) is maximized
Annotations:
author: Alexey Volkov <[email protected]>
'''
import pandas
import xgboost
# Loading data
df = pandas.read_parquet(training_data_path)
training_data = xgboost.DMatrix(
data=df.drop(columns=[label_column_name]),
label=df[[label_column_name]],
)
# Training
booster_params = booster_params or {}
booster_params.setdefault('objective', objective)
booster_params.setdefault('booster', booster)
booster_params.setdefault('learning_rate', learning_rate)
booster_params.setdefault('min_split_loss', min_split_loss)
booster_params.setdefault('max_depth', max_depth)
starting_model = None
if starting_model_path:
starting_model = xgboost.Booster(model_file=starting_model_path)
model = xgboost.train(
params=booster_params,
dtrain=training_data,
num_boost_round=num_iterations,
xgb_model=starting_model
)
# Saving the model in binary format
model.save_model(model_path)
model_config_str = model.save_config()
with open(model_config_path, 'w') as model_config_file:
model_config_file.write(model_config_str)
if __name__ == '__main__':
create_component_from_func(
xgboost_train,
output_component_file='component.yaml',
base_image='python:3.7',
packages_to_install=[
'xgboost==1.1.1',
'pandas==1.0.5',
'pyarrow==0.17.1',
],
annotations={
"author": "Alexey Volkov <[email protected]>",
"canonical_location": "https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/XGBoost/Train/from_ApacheParquet/component.yaml",
},
)
| 546 |
0 | kubeflow_public_repos/pipelines/components/contrib/XGBoost/Train | kubeflow_public_repos/pipelines/components/contrib/XGBoost/Train/from_ApacheParquet/component.yaml | name: Xgboost train
description: |-
Train an XGBoost model.
Args:
training_data_path: Path for the training data in Apache Parquet format.
model_path: Output path for the trained model in binary XGBoost format.
model_config_path: Output path for the internal parameter configuration of Booster as a JSON string.
starting_model_path: Path for the existing trained model to start from.
label_column_name: Name of the column containing the label data.
num_boost_rounds: Number of boosting iterations.
booster_params: Parameters for the booster. See https://xgboost.readthedocs.io/en/latest/parameter.html
objective: The learning task and the corresponding learning objective.
See https://xgboost.readthedocs.io/en/latest/parameter.html#learning-task-parameters
The most common values are:
"reg:squarederror" - Regression with squared loss (default).
"reg:logistic" - Logistic regression.
"binary:logistic" - Logistic regression for binary classification, output probability.
"binary:logitraw" - Logistic regression for binary classification, output score before logistic transformation
"rank:pairwise" - Use LambdaMART to perform pairwise ranking where the pairwise loss is minimized
"rank:ndcg" - Use LambdaMART to perform list-wise ranking where Normalized Discounted Cumulative Gain (NDCG) is maximized
Annotations:
author: Alexey Volkov <[email protected]>
inputs:
- {name: training_data, type: ApacheParquet}
- {name: label_column_name, type: String}
- {name: starting_model, type: XGBoostModel, optional: true}
- {name: num_iterations, type: Integer, default: '10', optional: true}
- {name: booster_params, type: JsonObject, optional: true}
- {name: objective, type: String, default: 'reg:squarederror', optional: true}
- {name: booster, type: String, default: gbtree, optional: true}
- {name: learning_rate, type: Float, default: '0.3', optional: true}
- {name: min_split_loss, type: Float, default: '0', optional: true}
- {name: max_depth, type: Integer, default: '6', optional: true}
outputs:
- {name: model, type: XGBoostModel}
- {name: model_config, type: XGBoostModelConfig}
metadata:
annotations:
author: Alexey Volkov <[email protected]>
canonical_location: 'https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/XGBoost/Train/from_ApacheParquet/component.yaml'
implementation:
container:
image: python:3.7
command:
- sh
- -c
- (PIP_DISABLE_PIP_VERSION_CHECK=1 python3 -m pip install --quiet --no-warn-script-location
'xgboost==1.1.1' 'pandas==1.0.5' 'pyarrow==0.17.1' || PIP_DISABLE_PIP_VERSION_CHECK=1
python3 -m pip install --quiet --no-warn-script-location 'xgboost==1.1.1' 'pandas==1.0.5'
'pyarrow==0.17.1' --user) && "$0" "$@"
- python3
- -u
- -c
- |
def _make_parent_dirs_and_return_path(file_path: str):
import os
os.makedirs(os.path.dirname(file_path), exist_ok=True)
return file_path
def xgboost_train(
training_data_path,
model_path,
model_config_path,
label_column_name,
starting_model_path = None,
num_iterations = 10,
booster_params = None,
# Booster parameters
objective = 'reg:squarederror',
booster = 'gbtree',
learning_rate = 0.3,
min_split_loss = 0,
max_depth = 6,
):
'''Train an XGBoost model.
Args:
training_data_path: Path for the training data in Apache Parquet format.
model_path: Output path for the trained model in binary XGBoost format.
model_config_path: Output path for the internal parameter configuration of Booster as a JSON string.
starting_model_path: Path for the existing trained model to start from.
label_column_name: Name of the column containing the label data.
num_boost_rounds: Number of boosting iterations.
booster_params: Parameters for the booster. See https://xgboost.readthedocs.io/en/latest/parameter.html
objective: The learning task and the corresponding learning objective.
See https://xgboost.readthedocs.io/en/latest/parameter.html#learning-task-parameters
The most common values are:
"reg:squarederror" - Regression with squared loss (default).
"reg:logistic" - Logistic regression.
"binary:logistic" - Logistic regression for binary classification, output probability.
"binary:logitraw" - Logistic regression for binary classification, output score before logistic transformation
"rank:pairwise" - Use LambdaMART to perform pairwise ranking where the pairwise loss is minimized
"rank:ndcg" - Use LambdaMART to perform list-wise ranking where Normalized Discounted Cumulative Gain (NDCG) is maximized
Annotations:
author: Alexey Volkov <[email protected]>
'''
import pandas
import xgboost
# Loading data
df = pandas.read_parquet(training_data_path)
training_data = xgboost.DMatrix(
data=df.drop(columns=[label_column_name]),
label=df[[label_column_name]],
)
# Training
booster_params = booster_params or {}
booster_params.setdefault('objective', objective)
booster_params.setdefault('booster', booster)
booster_params.setdefault('learning_rate', learning_rate)
booster_params.setdefault('min_split_loss', min_split_loss)
booster_params.setdefault('max_depth', max_depth)
starting_model = None
if starting_model_path:
starting_model = xgboost.Booster(model_file=starting_model_path)
model = xgboost.train(
params=booster_params,
dtrain=training_data,
num_boost_round=num_iterations,
xgb_model=starting_model
)
# Saving the model in binary format
model.save_model(model_path)
model_config_str = model.save_config()
with open(model_config_path, 'w') as model_config_file:
model_config_file.write(model_config_str)
import json
import argparse
_parser = argparse.ArgumentParser(prog='Xgboost train', description='Train an XGBoost model.\n\n Args:\n training_data_path: Path for the training data in Apache Parquet format.\n model_path: Output path for the trained model in binary XGBoost format.\n model_config_path: Output path for the internal parameter configuration of Booster as a JSON string.\n starting_model_path: Path for the existing trained model to start from.\n label_column_name: Name of the column containing the label data.\n num_boost_rounds: Number of boosting iterations.\n booster_params: Parameters for the booster. See https://xgboost.readthedocs.io/en/latest/parameter.html\n objective: The learning task and the corresponding learning objective.\n See https://xgboost.readthedocs.io/en/latest/parameter.html#learning-task-parameters\n The most common values are:\n "reg:squarederror" - Regression with squared loss (default).\n "reg:logistic" - Logistic regression.\n "binary:logistic" - Logistic regression for binary classification, output probability.\n "binary:logitraw" - Logistic regression for binary classification, output score before logistic transformation\n "rank:pairwise" - Use LambdaMART to perform pairwise ranking where the pairwise loss is minimized\n "rank:ndcg" - Use LambdaMART to perform list-wise ranking where Normalized Discounted Cumulative Gain (NDCG) is maximized\n\n Annotations:\n author: Alexey Volkov <[email protected]>')
_parser.add_argument("--training-data", dest="training_data_path", type=str, required=True, default=argparse.SUPPRESS)
_parser.add_argument("--label-column-name", dest="label_column_name", type=str, required=True, default=argparse.SUPPRESS)
_parser.add_argument("--starting-model", dest="starting_model_path", type=str, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--num-iterations", dest="num_iterations", type=int, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--booster-params", dest="booster_params", type=json.loads, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--objective", dest="objective", type=str, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--booster", dest="booster", type=str, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--learning-rate", dest="learning_rate", type=float, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--min-split-loss", dest="min_split_loss", type=float, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--max-depth", dest="max_depth", type=int, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--model", dest="model_path", type=_make_parent_dirs_and_return_path, required=True, default=argparse.SUPPRESS)
_parser.add_argument("--model-config", dest="model_config_path", type=_make_parent_dirs_and_return_path, required=True, default=argparse.SUPPRESS)
_parsed_args = vars(_parser.parse_args())
_outputs = xgboost_train(**_parsed_args)
args:
- --training-data
- {inputPath: training_data}
- --label-column-name
- {inputValue: label_column_name}
- if:
cond: {isPresent: starting_model}
then:
- --starting-model
- {inputPath: starting_model}
- if:
cond: {isPresent: num_iterations}
then:
- --num-iterations
- {inputValue: num_iterations}
- if:
cond: {isPresent: booster_params}
then:
- --booster-params
- {inputValue: booster_params}
- if:
cond: {isPresent: objective}
then:
- --objective
- {inputValue: objective}
- if:
cond: {isPresent: booster}
then:
- --booster
- {inputValue: booster}
- if:
cond: {isPresent: learning_rate}
then:
- --learning-rate
- {inputValue: learning_rate}
- if:
cond: {isPresent: min_split_loss}
then:
- --min-split-loss
- {inputValue: min_split_loss}
- if:
cond: {isPresent: max_depth}
then:
- --max-depth
- {inputValue: max_depth}
- --model
- {outputPath: model}
- --model-config
- {outputPath: model_config}
| 547 |
0 | kubeflow_public_repos/pipelines/components/contrib/XGBoost | kubeflow_public_repos/pipelines/components/contrib/XGBoost/Predict/component.py | from kfp.components import InputPath, OutputPath, create_component_from_func
def xgboost_predict(
data_path: InputPath('CSV'), # Also supports LibSVM
model_path: InputPath('XGBoostModel'),
predictions_path: OutputPath('Predictions'),
label_column: int = None,
):
'''Make predictions using a trained XGBoost model.
Args:
data_path: Path for the feature data in CSV format.
model_path: Path for the trained model in binary XGBoost format.
predictions_path: Output path for the predictions.
label_column: Column containing the label data.
Annotations:
author: Alexey Volkov <[email protected]>
'''
from pathlib import Path
import numpy
import pandas
import xgboost
df = pandas.read_csv(
data_path,
)
if label_column is not None:
df = df.drop(columns=[df.columns[label_column]])
testing_data = xgboost.DMatrix(
data=df,
)
model = xgboost.Booster(model_file=model_path)
predictions = model.predict(testing_data)
Path(predictions_path).parent.mkdir(parents=True, exist_ok=True)
numpy.savetxt(predictions_path, predictions)
if __name__ == '__main__':
create_component_from_func(
xgboost_predict,
output_component_file='component.yaml',
base_image='python:3.7',
packages_to_install=[
'xgboost==1.1.1',
'pandas==1.0.5',
],
annotations={
"author": "Alexey Volkov <[email protected]>",
"canonical_location": "https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/XGBoost/Predict/component.yaml",
},
)
| 548 |
0 | kubeflow_public_repos/pipelines/components/contrib/XGBoost | kubeflow_public_repos/pipelines/components/contrib/XGBoost/Predict/component.yaml | name: Xgboost predict
description: |-
Make predictions using a trained XGBoost model.
Args:
data_path: Path for the feature data in CSV format.
model_path: Path for the trained model in binary XGBoost format.
predictions_path: Output path for the predictions.
label_column: Column containing the label data.
Annotations:
author: Alexey Volkov <[email protected]>
inputs:
- {name: data, type: CSV}
- {name: model, type: XGBoostModel}
- {name: label_column, type: Integer, optional: true}
outputs:
- {name: predictions, type: Predictions}
metadata:
annotations:
author: Alexey Volkov <[email protected]>
canonical_location: 'https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/XGBoost/Predict/component.yaml'
implementation:
container:
image: python:3.7
command:
- sh
- -c
- (PIP_DISABLE_PIP_VERSION_CHECK=1 python3 -m pip install --quiet --no-warn-script-location
'xgboost==1.1.1' 'pandas==1.0.5' || PIP_DISABLE_PIP_VERSION_CHECK=1 python3
-m pip install --quiet --no-warn-script-location 'xgboost==1.1.1' 'pandas==1.0.5'
--user) && "$0" "$@"
- python3
- -u
- -c
- |
def _make_parent_dirs_and_return_path(file_path: str):
import os
os.makedirs(os.path.dirname(file_path), exist_ok=True)
return file_path
def xgboost_predict(
data_path, # Also supports LibSVM
model_path,
predictions_path,
label_column = None,
):
'''Make predictions using a trained XGBoost model.
Args:
data_path: Path for the feature data in CSV format.
model_path: Path for the trained model in binary XGBoost format.
predictions_path: Output path for the predictions.
label_column: Column containing the label data.
Annotations:
author: Alexey Volkov <[email protected]>
'''
from pathlib import Path
import numpy
import pandas
import xgboost
df = pandas.read_csv(
data_path,
)
if label_column is not None:
df = df.drop(columns=[df.columns[label_column]])
testing_data = xgboost.DMatrix(
data=df,
)
model = xgboost.Booster(model_file=model_path)
predictions = model.predict(testing_data)
Path(predictions_path).parent.mkdir(parents=True, exist_ok=True)
numpy.savetxt(predictions_path, predictions)
import argparse
_parser = argparse.ArgumentParser(prog='Xgboost predict', description='Make predictions using a trained XGBoost model.\n\n Args:\n data_path: Path for the feature data in CSV format.\n model_path: Path for the trained model in binary XGBoost format.\n predictions_path: Output path for the predictions.\n label_column: Column containing the label data.\n\n Annotations:\n author: Alexey Volkov <[email protected]>')
_parser.add_argument("--data", dest="data_path", type=str, required=True, default=argparse.SUPPRESS)
_parser.add_argument("--model", dest="model_path", type=str, required=True, default=argparse.SUPPRESS)
_parser.add_argument("--label-column", dest="label_column", type=int, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--predictions", dest="predictions_path", type=_make_parent_dirs_and_return_path, required=True, default=argparse.SUPPRESS)
_parsed_args = vars(_parser.parse_args())
_outputs = xgboost_predict(**_parsed_args)
args:
- --data
- {inputPath: data}
- --model
- {inputPath: model}
- if:
cond: {isPresent: label_column}
then:
- --label-column
- {inputValue: label_column}
- --predictions
- {outputPath: predictions}
| 549 |
0 | kubeflow_public_repos/pipelines/components/contrib/XGBoost/Predict | kubeflow_public_repos/pipelines/components/contrib/XGBoost/Predict/from_ApacheParquet/component.py | from kfp.components import InputPath, OutputPath, create_component_from_func
def xgboost_predict(
data_path: InputPath('ApacheParquet'),
model_path: InputPath('XGBoostModel'),
predictions_path: OutputPath('Predictions'),
label_column_name: str = None,
):
'''Make predictions using a trained XGBoost model.
Args:
data_path: Path for the feature data in Apache Parquet format.
model_path: Path for the trained model in binary XGBoost format.
predictions_path: Output path for the predictions.
label_column_name: Optional. Name of the column containing the label data that is excluded during the prediction.
Annotations:
author: Alexey Volkov <[email protected]>
'''
from pathlib import Path
import numpy
import pandas
import xgboost
# Loading data
df = pandas.read_parquet(data_path)
if label_column_name:
df = df.drop(columns=[label_column_name])
evaluation_data = xgboost.DMatrix(
data=df,
)
# Training
model = xgboost.Booster(model_file=model_path)
predictions = model.predict(evaluation_data)
Path(predictions_path).parent.mkdir(parents=True, exist_ok=True)
numpy.savetxt(predictions_path, predictions)
if __name__ == '__main__':
create_component_from_func(
xgboost_predict,
output_component_file='component.yaml',
base_image='python:3.7',
packages_to_install=[
'xgboost==1.1.1',
'pandas==1.0.5',
'pyarrow==0.17.1',
],
annotations={
"author": "Alexey Volkov <[email protected]>",
"canonical_location": "https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/XGBoost/Predict/from_ApacheParquet/component.yaml",
},
)
| 550 |
0 | kubeflow_public_repos/pipelines/components/contrib/XGBoost/Predict | kubeflow_public_repos/pipelines/components/contrib/XGBoost/Predict/from_ApacheParquet/component.yaml | name: Xgboost predict
description: |-
Make predictions using a trained XGBoost model.
Args:
data_path: Path for the feature data in Apache Parquet format.
model_path: Path for the trained model in binary XGBoost format.
predictions_path: Output path for the predictions.
label_column_name: Optional. Name of the column containing the label data that is excluded during the prediction.
Annotations:
author: Alexey Volkov <[email protected]>
inputs:
- {name: data, type: ApacheParquet}
- {name: model, type: XGBoostModel}
- {name: label_column_name, type: String, optional: true}
outputs:
- {name: predictions, type: Predictions}
metadata:
annotations:
author: Alexey Volkov <[email protected]>
canonical_location: 'https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/XGBoost/Predict/from_ApacheParquet/component.yaml'
implementation:
container:
image: python:3.7
command:
- sh
- -c
- (PIP_DISABLE_PIP_VERSION_CHECK=1 python3 -m pip install --quiet --no-warn-script-location
'xgboost==1.1.1' 'pandas==1.0.5' 'pyarrow==0.17.1' || PIP_DISABLE_PIP_VERSION_CHECK=1
python3 -m pip install --quiet --no-warn-script-location 'xgboost==1.1.1' 'pandas==1.0.5'
'pyarrow==0.17.1' --user) && "$0" "$@"
- python3
- -u
- -c
- |
def _make_parent_dirs_and_return_path(file_path: str):
import os
os.makedirs(os.path.dirname(file_path), exist_ok=True)
return file_path
def xgboost_predict(
data_path,
model_path,
predictions_path,
label_column_name = None,
):
'''Make predictions using a trained XGBoost model.
Args:
data_path: Path for the feature data in Apache Parquet format.
model_path: Path for the trained model in binary XGBoost format.
predictions_path: Output path for the predictions.
label_column_name: Optional. Name of the column containing the label data that is excluded during the prediction.
Annotations:
author: Alexey Volkov <[email protected]>
'''
from pathlib import Path
import numpy
import pandas
import xgboost
# Loading data
df = pandas.read_parquet(data_path)
if label_column_name:
df = df.drop(columns=[label_column_name])
evaluation_data = xgboost.DMatrix(
data=df,
)
# Training
model = xgboost.Booster(model_file=model_path)
predictions = model.predict(evaluation_data)
Path(predictions_path).parent.mkdir(parents=True, exist_ok=True)
numpy.savetxt(predictions_path, predictions)
import argparse
_parser = argparse.ArgumentParser(prog='Xgboost predict', description='Make predictions using a trained XGBoost model.\n\n Args:\n data_path: Path for the feature data in Apache Parquet format.\n model_path: Path for the trained model in binary XGBoost format.\n predictions_path: Output path for the predictions.\n label_column_name: Optional. Name of the column containing the label data that is excluded during the prediction.\n\n Annotations:\n author: Alexey Volkov <[email protected]>')
_parser.add_argument("--data", dest="data_path", type=str, required=True, default=argparse.SUPPRESS)
_parser.add_argument("--model", dest="model_path", type=str, required=True, default=argparse.SUPPRESS)
_parser.add_argument("--label-column-name", dest="label_column_name", type=str, required=False, default=argparse.SUPPRESS)
_parser.add_argument("--predictions", dest="predictions_path", type=_make_parent_dirs_and_return_path, required=True, default=argparse.SUPPRESS)
_parsed_args = vars(_parser.parse_args())
_outputs = xgboost_predict(**_parsed_args)
args:
- --data
- {inputPath: data}
- --model
- {inputPath: model}
- if:
cond: {isPresent: label_column_name}
then:
- --label-column-name
- {inputValue: label_column_name}
- --predictions
- {outputPath: predictions}
| 551 |
0 | kubeflow_public_repos/pipelines/components/contrib/XGBoost/Train_and_cross-validate_regression | kubeflow_public_repos/pipelines/components/contrib/XGBoost/Train_and_cross-validate_regression/from_CSV/component.py | from collections import OrderedDict
from kfp import components
xgboost_train_regression_and_calculate_metrics_on_csv_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/18e8974288885086b2fd5351f6333210cd237d1b/components/XGBoost/Train_regression_and_calculate_metrics/from_CSV/component.yaml')
xgboost_5_fold_cross_validation_for_regression_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/271593e4193e2d3e44bdf42269fc03f0fcd2e5e8/components/XGBoost/Cross_validation_for_regression/from_CSV/component.yaml')
def xgboost_train_and_cv_regression_on_csv(
data: 'CSV',
label_column: int = 0,
objective: str = 'reg:squarederror',
num_iterations: int = 200,
):
main_training_and_metrics_task = xgboost_train_regression_and_calculate_metrics_on_csv_op(
training_data=data,
testing_data=data,
label_column=label_column,
objective=objective,
num_iterations=num_iterations,
)
cv_training_and_metrics_task = xgboost_5_fold_cross_validation_for_regression_op(
data=data,
label_column=label_column,
objective=objective,
num_iterations=num_iterations,
)
return OrderedDict([
('model', main_training_and_metrics_task.outputs['model']),
('training_mean_absolute_error', main_training_and_metrics_task.outputs['mean_absolute_error']),
('training_mean_squared_error', main_training_and_metrics_task.outputs['mean_squared_error']),
('training_root_mean_squared_error', main_training_and_metrics_task.outputs['root_mean_squared_error']),
('training_metrics', main_training_and_metrics_task.outputs['metrics']),
('cv_mean_absolute_error', cv_training_and_metrics_task.outputs['mean_absolute_error']),
('cv_mean_squared_error', cv_training_and_metrics_task.outputs['mean_squared_error']),
('cv_root_mean_squared_error', cv_training_and_metrics_task.outputs['root_mean_squared_error']),
('cv_metrics', cv_training_and_metrics_task.outputs['metrics']),
])
if __name__ == '__main__':
xgboost_train_and_cv_regression_on_csv_op = components.create_graph_component_from_pipeline_func(
xgboost_train_and_cv_regression_on_csv,
output_component_file='component.yaml',
annotations={
"author": "Alexey Volkov <[email protected]>",
"canonical_location": "https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/XGBoost/Train_and_cross-validate_regression/from_CSV/component.yaml",
},
)
| 552 |
0 | kubeflow_public_repos/pipelines/components/contrib/XGBoost/Train_and_cross-validate_regression | kubeflow_public_repos/pipelines/components/contrib/XGBoost/Train_and_cross-validate_regression/from_CSV/component.yaml | name: Xgboost train and cv regression on csv
inputs:
- {name: data, type: CSV}
- {name: label_column, type: Integer, default: '0', optional: true}
- {name: objective, type: String, default: 'reg:squarederror', optional: true}
- {name: num_iterations, type: Integer, default: '200', optional: true}
outputs:
- {name: model, type: XGBoostModel}
- {name: training_mean_absolute_error, type: Float}
- {name: training_mean_squared_error, type: Float}
- {name: training_root_mean_squared_error, type: Float}
- {name: training_metrics, type: JsonObject}
- {name: cv_mean_absolute_error, type: Float}
- {name: cv_mean_squared_error, type: Float}
- {name: cv_root_mean_squared_error, type: Float}
- {name: cv_metrics, type: JsonObject}
metadata:
annotations:
author: Alexey Volkov <[email protected]>
canonical_location: 'https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/XGBoost/Train_and_cross-validate_regression/from_CSV/component.yaml'
implementation:
graph:
tasks:
Xgboost train:
componentRef: {digest: 09b80053da29f8f51575b42e5d2e8ad4b7bdcc92a02c3744e189b1f597006b38,
url: 'https://raw.githubusercontent.com/kubeflow/pipelines/567c04c51ff00a1ee525b3458425b17adbe3df61/components/XGBoost/Train/component.yaml'}
arguments:
training_data:
graphInput: {inputName: data}
label_column:
graphInput: {inputName: label_column}
num_iterations:
graphInput: {inputName: num_iterations}
objective:
graphInput: {inputName: objective}
Xgboost predict:
componentRef: {digest: ecdfaf32cff15b6abc3d0dd80365ce00577f1a19a058fbe201f515431cea1357,
url: 'https://raw.githubusercontent.com/kubeflow/pipelines/567c04c51ff00a1ee525b3458425b17adbe3df61/components/XGBoost/Predict/component.yaml'}
arguments:
data:
graphInput: {inputName: data}
model:
taskOutput: {outputName: model, taskId: Xgboost train, type: XGBoostModel}
label_column:
graphInput: {inputName: label_column}
Pandas Transform DataFrame in CSV format:
componentRef: {digest: 58dc88349157bf128021708c316ce4eb60bc1de0a5a7dd3af45fabac3276d510,
url: 'https://raw.githubusercontent.com/kubeflow/pipelines/6162d55998b176b50267d351241100bb0ee715bc/components/pandas/Transform_DataFrame/in_CSV_format/component.yaml'}
arguments:
table:
graphInput: {inputName: data}
transform_code: df = df[["tips"]]
Remove header:
componentRef: {digest: ba35ffea863855b956c3c50aefa0420ba3823949a6c059e6e3971cde960dc5a3,
url: 'https://raw.githubusercontent.com/kubeflow/pipelines/02c9638287468c849632cf9f7885b51de4c66f86/components/tables/Remove_header/component.yaml'}
arguments:
table:
taskOutput: {outputName: transformed_table, taskId: Pandas Transform DataFrame
in CSV format, type: CSV}
Calculate regression metrics from csv:
componentRef: {digest: e3ecbfeb18032820edfee4255e2fb6d15d15ed224e166519d5e528e12053a995,
url: 'https://raw.githubusercontent.com/kubeflow/pipelines/7da1ac9464b4b3e7d95919faa2f1107a9635b7e4/components/ml_metrics/Calculate_regression_metrics/from_CSV/component.yaml'}
arguments:
true_values:
taskOutput: {outputName: table, taskId: Remove header}
predicted_values:
taskOutput: {outputName: predictions, taskId: Xgboost predict, type: Text}
Split table into folds:
componentRef: {digest: 9956223bcecc7294ca1afac39b60ada4a935a571d817c3dfbf2ea4a211afe3d1,
url: 'https://raw.githubusercontent.com/kubeflow/pipelines/e9b4b29b22a5120daf95b581b0392cd461a906f0/components/dataset_manipulation/split_data_into_folds/in_CSV/component.yaml'}
arguments:
table:
graphInput: {inputName: data}
Pandas Transform DataFrame in CSV format 2:
componentRef: {digest: 58dc88349157bf128021708c316ce4eb60bc1de0a5a7dd3af45fabac3276d510,
url: 'https://raw.githubusercontent.com/kubeflow/pipelines/6162d55998b176b50267d351241100bb0ee715bc/components/pandas/Transform_DataFrame/in_CSV_format/component.yaml'}
arguments:
table:
taskOutput: {outputName: test_3, taskId: Split table into folds, type: CSV}
transform_code: df = df[["tips"]]
Remove header 2:
componentRef: {digest: ba35ffea863855b956c3c50aefa0420ba3823949a6c059e6e3971cde960dc5a3,
url: 'https://raw.githubusercontent.com/kubeflow/pipelines/02c9638287468c849632cf9f7885b51de4c66f86/components/tables/Remove_header/component.yaml'}
arguments:
table:
taskOutput: {outputName: transformed_table, taskId: Pandas Transform DataFrame
in CSV format 2, type: CSV}
Xgboost train 2:
componentRef: {digest: 09b80053da29f8f51575b42e5d2e8ad4b7bdcc92a02c3744e189b1f597006b38,
url: 'https://raw.githubusercontent.com/kubeflow/pipelines/567c04c51ff00a1ee525b3458425b17adbe3df61/components/XGBoost/Train/component.yaml'}
arguments:
training_data:
taskOutput: {outputName: train_1, taskId: Split table into folds, type: CSV}
label_column:
graphInput: {inputName: label_column}
num_iterations:
graphInput: {inputName: num_iterations}
objective:
graphInput: {inputName: objective}
Xgboost predict 2:
componentRef: {digest: ecdfaf32cff15b6abc3d0dd80365ce00577f1a19a058fbe201f515431cea1357,
url: 'https://raw.githubusercontent.com/kubeflow/pipelines/567c04c51ff00a1ee525b3458425b17adbe3df61/components/XGBoost/Predict/component.yaml'}
arguments:
data:
taskOutput: {outputName: test_1, taskId: Split table into folds, type: CSV}
model:
taskOutput: {outputName: model, taskId: Xgboost train 2, type: XGBoostModel}
label_column:
graphInput: {inputName: label_column}
Pandas Transform DataFrame in CSV format 3:
componentRef: {digest: 58dc88349157bf128021708c316ce4eb60bc1de0a5a7dd3af45fabac3276d510,
url: 'https://raw.githubusercontent.com/kubeflow/pipelines/6162d55998b176b50267d351241100bb0ee715bc/components/pandas/Transform_DataFrame/in_CSV_format/component.yaml'}
arguments:
table:
taskOutput: {outputName: test_2, taskId: Split table into folds, type: CSV}
transform_code: df = df[["tips"]]
Remove header 3:
componentRef: {digest: ba35ffea863855b956c3c50aefa0420ba3823949a6c059e6e3971cde960dc5a3,
url: 'https://raw.githubusercontent.com/kubeflow/pipelines/02c9638287468c849632cf9f7885b51de4c66f86/components/tables/Remove_header/component.yaml'}
arguments:
table:
taskOutput: {outputName: transformed_table, taskId: Pandas Transform DataFrame
in CSV format 3, type: CSV}
Xgboost train 3:
componentRef: {digest: 09b80053da29f8f51575b42e5d2e8ad4b7bdcc92a02c3744e189b1f597006b38,
url: 'https://raw.githubusercontent.com/kubeflow/pipelines/567c04c51ff00a1ee525b3458425b17adbe3df61/components/XGBoost/Train/component.yaml'}
arguments:
training_data:
taskOutput: {outputName: train_4, taskId: Split table into folds, type: CSV}
label_column:
graphInput: {inputName: label_column}
num_iterations:
graphInput: {inputName: num_iterations}
objective:
graphInput: {inputName: objective}
Pandas Transform DataFrame in CSV format 4:
componentRef: {digest: 58dc88349157bf128021708c316ce4eb60bc1de0a5a7dd3af45fabac3276d510,
url: 'https://raw.githubusercontent.com/kubeflow/pipelines/6162d55998b176b50267d351241100bb0ee715bc/components/pandas/Transform_DataFrame/in_CSV_format/component.yaml'}
arguments:
table:
taskOutput: {outputName: test_4, taskId: Split table into folds, type: CSV}
transform_code: df = df[["tips"]]
Remove header 4:
componentRef: {digest: ba35ffea863855b956c3c50aefa0420ba3823949a6c059e6e3971cde960dc5a3,
url: 'https://raw.githubusercontent.com/kubeflow/pipelines/02c9638287468c849632cf9f7885b51de4c66f86/components/tables/Remove_header/component.yaml'}
arguments:
table:
taskOutput: {outputName: transformed_table, taskId: Pandas Transform DataFrame
in CSV format 4, type: CSV}
Xgboost predict 3:
componentRef: {digest: ecdfaf32cff15b6abc3d0dd80365ce00577f1a19a058fbe201f515431cea1357,
url: 'https://raw.githubusercontent.com/kubeflow/pipelines/567c04c51ff00a1ee525b3458425b17adbe3df61/components/XGBoost/Predict/component.yaml'}
arguments:
data:
taskOutput: {outputName: test_4, taskId: Split table into folds, type: CSV}
model:
taskOutput: {outputName: model, taskId: Xgboost train 3, type: XGBoostModel}
label_column:
graphInput: {inputName: label_column}
Calculate regression metrics from csv 2:
componentRef: {digest: e3ecbfeb18032820edfee4255e2fb6d15d15ed224e166519d5e528e12053a995,
url: 'https://raw.githubusercontent.com/kubeflow/pipelines/7da1ac9464b4b3e7d95919faa2f1107a9635b7e4/components/ml_metrics/Calculate_regression_metrics/from_CSV/component.yaml'}
arguments:
true_values:
taskOutput: {outputName: table, taskId: Remove header 4}
predicted_values:
taskOutput: {outputName: predictions, taskId: Xgboost predict 3, type: Text}
Pandas Transform DataFrame in CSV format 5:
componentRef: {digest: 58dc88349157bf128021708c316ce4eb60bc1de0a5a7dd3af45fabac3276d510,
url: 'https://raw.githubusercontent.com/kubeflow/pipelines/6162d55998b176b50267d351241100bb0ee715bc/components/pandas/Transform_DataFrame/in_CSV_format/component.yaml'}
arguments:
table:
taskOutput: {outputName: test_1, taskId: Split table into folds, type: CSV}
transform_code: df = df[["tips"]]
Remove header 5:
componentRef: {digest: ba35ffea863855b956c3c50aefa0420ba3823949a6c059e6e3971cde960dc5a3,
url: 'https://raw.githubusercontent.com/kubeflow/pipelines/02c9638287468c849632cf9f7885b51de4c66f86/components/tables/Remove_header/component.yaml'}
arguments:
table:
taskOutput: {outputName: transformed_table, taskId: Pandas Transform DataFrame
in CSV format 5, type: CSV}
Calculate regression metrics from csv 3:
componentRef: {digest: e3ecbfeb18032820edfee4255e2fb6d15d15ed224e166519d5e528e12053a995,
url: 'https://raw.githubusercontent.com/kubeflow/pipelines/7da1ac9464b4b3e7d95919faa2f1107a9635b7e4/components/ml_metrics/Calculate_regression_metrics/from_CSV/component.yaml'}
arguments:
true_values:
taskOutput: {outputName: table, taskId: Remove header 5}
predicted_values:
taskOutput: {outputName: predictions, taskId: Xgboost predict 2, type: Text}
Xgboost train 4:
componentRef: {digest: 09b80053da29f8f51575b42e5d2e8ad4b7bdcc92a02c3744e189b1f597006b38,
url: 'https://raw.githubusercontent.com/kubeflow/pipelines/567c04c51ff00a1ee525b3458425b17adbe3df61/components/XGBoost/Train/component.yaml'}
arguments:
training_data:
taskOutput: {outputName: train_2, taskId: Split table into folds, type: CSV}
label_column:
graphInput: {inputName: label_column}
num_iterations:
graphInput: {inputName: num_iterations}
objective:
graphInput: {inputName: objective}
Xgboost predict 4:
componentRef: {digest: ecdfaf32cff15b6abc3d0dd80365ce00577f1a19a058fbe201f515431cea1357,
url: 'https://raw.githubusercontent.com/kubeflow/pipelines/567c04c51ff00a1ee525b3458425b17adbe3df61/components/XGBoost/Predict/component.yaml'}
arguments:
data:
taskOutput: {outputName: test_2, taskId: Split table into folds, type: CSV}
model:
taskOutput: {outputName: model, taskId: Xgboost train 4, type: XGBoostModel}
label_column:
graphInput: {inputName: label_column}
Calculate regression metrics from csv 4:
componentRef: {digest: e3ecbfeb18032820edfee4255e2fb6d15d15ed224e166519d5e528e12053a995,
url: 'https://raw.githubusercontent.com/kubeflow/pipelines/7da1ac9464b4b3e7d95919faa2f1107a9635b7e4/components/ml_metrics/Calculate_regression_metrics/from_CSV/component.yaml'}
arguments:
true_values:
taskOutput: {outputName: table, taskId: Remove header 3}
predicted_values:
taskOutput: {outputName: predictions, taskId: Xgboost predict 4, type: Text}
Xgboost train 5:
componentRef: {digest: 09b80053da29f8f51575b42e5d2e8ad4b7bdcc92a02c3744e189b1f597006b38,
url: 'https://raw.githubusercontent.com/kubeflow/pipelines/567c04c51ff00a1ee525b3458425b17adbe3df61/components/XGBoost/Train/component.yaml'}
arguments:
training_data:
taskOutput: {outputName: train_5, taskId: Split table into folds, type: CSV}
label_column:
graphInput: {inputName: label_column}
num_iterations:
graphInput: {inputName: num_iterations}
objective:
graphInput: {inputName: objective}
Xgboost predict 5:
componentRef: {digest: ecdfaf32cff15b6abc3d0dd80365ce00577f1a19a058fbe201f515431cea1357,
url: 'https://raw.githubusercontent.com/kubeflow/pipelines/567c04c51ff00a1ee525b3458425b17adbe3df61/components/XGBoost/Predict/component.yaml'}
arguments:
data:
taskOutput: {outputName: test_5, taskId: Split table into folds, type: CSV}
model:
taskOutput: {outputName: model, taskId: Xgboost train 5, type: XGBoostModel}
label_column:
graphInput: {inputName: label_column}
Xgboost train 6:
componentRef: {digest: 09b80053da29f8f51575b42e5d2e8ad4b7bdcc92a02c3744e189b1f597006b38,
url: 'https://raw.githubusercontent.com/kubeflow/pipelines/567c04c51ff00a1ee525b3458425b17adbe3df61/components/XGBoost/Train/component.yaml'}
arguments:
training_data:
taskOutput: {outputName: train_3, taskId: Split table into folds, type: CSV}
label_column:
graphInput: {inputName: label_column}
num_iterations:
graphInput: {inputName: num_iterations}
objective:
graphInput: {inputName: objective}
Xgboost predict 6:
componentRef: {digest: ecdfaf32cff15b6abc3d0dd80365ce00577f1a19a058fbe201f515431cea1357,
url: 'https://raw.githubusercontent.com/kubeflow/pipelines/567c04c51ff00a1ee525b3458425b17adbe3df61/components/XGBoost/Predict/component.yaml'}
arguments:
data:
taskOutput: {outputName: test_3, taskId: Split table into folds, type: CSV}
model:
taskOutput: {outputName: model, taskId: Xgboost train 6, type: XGBoostModel}
label_column:
graphInput: {inputName: label_column}
Calculate regression metrics from csv 5:
componentRef: {digest: e3ecbfeb18032820edfee4255e2fb6d15d15ed224e166519d5e528e12053a995,
url: 'https://raw.githubusercontent.com/kubeflow/pipelines/7da1ac9464b4b3e7d95919faa2f1107a9635b7e4/components/ml_metrics/Calculate_regression_metrics/from_CSV/component.yaml'}
arguments:
true_values:
taskOutput: {outputName: table, taskId: Remove header 2}
predicted_values:
taskOutput: {outputName: predictions, taskId: Xgboost predict 6, type: Text}
Pandas Transform DataFrame in CSV format 6:
componentRef: {digest: 58dc88349157bf128021708c316ce4eb60bc1de0a5a7dd3af45fabac3276d510,
url: 'https://raw.githubusercontent.com/kubeflow/pipelines/6162d55998b176b50267d351241100bb0ee715bc/components/pandas/Transform_DataFrame/in_CSV_format/component.yaml'}
arguments:
table:
taskOutput: {outputName: test_5, taskId: Split table into folds, type: CSV}
transform_code: df = df[["tips"]]
Remove header 6:
componentRef: {digest: ba35ffea863855b956c3c50aefa0420ba3823949a6c059e6e3971cde960dc5a3,
url: 'https://raw.githubusercontent.com/kubeflow/pipelines/02c9638287468c849632cf9f7885b51de4c66f86/components/tables/Remove_header/component.yaml'}
arguments:
table:
taskOutput: {outputName: transformed_table, taskId: Pandas Transform DataFrame
in CSV format 6, type: CSV}
Calculate regression metrics from csv 6:
componentRef: {digest: e3ecbfeb18032820edfee4255e2fb6d15d15ed224e166519d5e528e12053a995,
url: 'https://raw.githubusercontent.com/kubeflow/pipelines/7da1ac9464b4b3e7d95919faa2f1107a9635b7e4/components/ml_metrics/Calculate_regression_metrics/from_CSV/component.yaml'}
arguments:
true_values:
taskOutput: {outputName: table, taskId: Remove header 6}
predicted_values:
taskOutput: {outputName: predictions, taskId: Xgboost predict 5, type: Text}
Aggregate regression metrics from csv:
componentRef: {digest: 3e128130521eff8d43764f3dcb037316cdd6490ad2878df5adef416f7c2f3c19,
url: 'https://raw.githubusercontent.com/kubeflow/pipelines/7ea9363fe201918d419fecdc00d1275e657ff712/components/ml_metrics/Aggregate_regression_metrics/component.yaml'}
arguments:
metrics_1:
taskOutput: {outputName: metrics, taskId: Calculate regression metrics
from csv 3, type: JsonObject}
metrics_2:
taskOutput: {outputName: metrics, taskId: Calculate regression metrics
from csv 4, type: JsonObject}
metrics_3:
taskOutput: {outputName: metrics, taskId: Calculate regression metrics
from csv 5, type: JsonObject}
metrics_4:
taskOutput: {outputName: metrics, taskId: Calculate regression metrics
from csv 2, type: JsonObject}
metrics_5:
taskOutput: {outputName: metrics, taskId: Calculate regression metrics
from csv 6, type: JsonObject}
outputValues:
model:
taskOutput: {outputName: model, taskId: Xgboost train, type: XGBoostModel}
training_mean_absolute_error:
taskOutput: {outputName: mean_absolute_error, taskId: Calculate regression
metrics from csv, type: Float}
training_mean_squared_error:
taskOutput: {outputName: mean_squared_error, taskId: Calculate regression
metrics from csv, type: Float}
training_root_mean_squared_error:
taskOutput: {outputName: root_mean_squared_error, taskId: Calculate regression
metrics from csv, type: Float}
training_metrics:
taskOutput: {outputName: metrics, taskId: Calculate regression metrics from
csv, type: JsonObject}
cv_mean_absolute_error:
taskOutput: {outputName: mean_absolute_error, taskId: Aggregate regression
metrics from csv, type: Float}
cv_mean_squared_error:
taskOutput: {outputName: mean_squared_error, taskId: Aggregate regression
metrics from csv, type: Float}
cv_root_mean_squared_error:
taskOutput: {outputName: root_mean_squared_error, taskId: Aggregate regression
metrics from csv, type: Float}
cv_metrics:
taskOutput: {outputName: metrics, taskId: Aggregate regression metrics from
csv, type: JsonObject}
| 553 |
0 | kubeflow_public_repos/pipelines/components/contrib/XGBoost/Cross_validation_for_regression | kubeflow_public_repos/pipelines/components/contrib/XGBoost/Cross_validation_for_regression/from_CSV/component.py | from collections import OrderedDict
from kfp import components
split_table_into_folds_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/e9b4b29b22a5120daf95b581b0392cd461a906f0/components/dataset_manipulation/split_data_into_folds/in_CSV/component.yaml')
xgboost_train_on_csv_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/567c04c51ff00a1ee525b3458425b17adbe3df61/components/XGBoost/Train/component.yaml')
xgboost_predict_on_csv_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/567c04c51ff00a1ee525b3458425b17adbe3df61/components/XGBoost/Predict/component.yaml')
pandas_transform_csv_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/6162d55998b176b50267d351241100bb0ee715bc/components/pandas/Transform_DataFrame/in_CSV_format/component.yaml')
drop_header_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/02c9638287468c849632cf9f7885b51de4c66f86/components/tables/Remove_header/component.yaml')
calculate_regression_metrics_from_csv_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/7da1ac9464b4b3e7d95919faa2f1107a9635b7e4/components/ml_metrics/Calculate_regression_metrics/from_CSV/component.yaml')
aggregate_regression_metrics_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/7ea9363fe201918d419fecdc00d1275e657ff712/components/ml_metrics/Aggregate_regression_metrics/component.yaml')
def xgboost_5_fold_cross_validation_for_regression(
data: 'CSV',
label_column: int = 0,
objective: str = 'reg:squarederror',
num_iterations: int = 200,
):
folds = split_table_into_folds_op(data).outputs
fold_metrics = {}
for i in range(1, 6):
training_data = folds['train_' + str(i)]
testing_data = folds['test_' + str(i)]
model = xgboost_train_on_csv_op(
training_data=training_data,
label_column=label_column,
objective=objective,
num_iterations=num_iterations,
).outputs['model']
predictions = xgboost_predict_on_csv_op(
data=testing_data,
model=model,
label_column=label_column,
).output
true_values_table = pandas_transform_csv_op(
table=testing_data,
transform_code='df = df[["tips"]]',
).output
true_values = drop_header_op(true_values_table).output
metrics = calculate_regression_metrics_from_csv_op(
true_values=true_values,
predicted_values=predictions,
).outputs['metrics']
fold_metrics['metrics_' + str(i)] = metrics
aggregated_metrics_task = aggregate_regression_metrics_op(**fold_metrics)
return OrderedDict([
('mean_absolute_error', aggregated_metrics_task.outputs['mean_absolute_error']),
('mean_squared_error', aggregated_metrics_task.outputs['mean_squared_error']),
('root_mean_squared_error', aggregated_metrics_task.outputs['root_mean_squared_error']),
('metrics', aggregated_metrics_task.outputs['metrics']),
])
if __name__ == '__main__':
xgboost_5_fold_cross_validation_for_regression_op = components.create_graph_component_from_pipeline_func(
xgboost_5_fold_cross_validation_for_regression,
output_component_file='component.yaml',
annotations={
"author": "Alexey Volkov <[email protected]>",
"canonical_location": "https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/XGBoost/Cross_validation_for_regression/from_CSV/component.yaml",
},
)
| 554 |
0 | kubeflow_public_repos/pipelines/components/contrib/XGBoost/Cross_validation_for_regression | kubeflow_public_repos/pipelines/components/contrib/XGBoost/Cross_validation_for_regression/from_CSV/component.yaml | name: Xgboost 5 fold cross validation for regression
inputs:
- {name: data, type: CSV}
- {name: label_column, type: Integer, default: '0', optional: true}
- {name: objective, type: String, default: 'reg:squarederror', optional: true}
- {name: num_iterations, type: Integer, default: '200', optional: true}
outputs:
- {name: mean_absolute_error, type: Float}
- {name: mean_squared_error, type: Float}
- {name: root_mean_squared_error, type: Float}
- {name: metrics, type: JsonObject}
metadata:
annotations:
author: Alexey Volkov <[email protected]>
canonical_location: 'https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/XGBoost/Cross_validation_for_regression/from_CSV/component.yaml'
implementation:
graph:
tasks:
Split table into folds:
componentRef: {digest: 9956223bcecc7294ca1afac39b60ada4a935a571d817c3dfbf2ea4a211afe3d1,
url: 'https://raw.githubusercontent.com/kubeflow/pipelines/e9b4b29b22a5120daf95b581b0392cd461a906f0/components/dataset_manipulation/split_data_into_folds/in_CSV/component.yaml'}
arguments:
table:
graphInput: {inputName: data}
Xgboost train:
componentRef: {digest: 09b80053da29f8f51575b42e5d2e8ad4b7bdcc92a02c3744e189b1f597006b38,
url: 'https://raw.githubusercontent.com/kubeflow/pipelines/567c04c51ff00a1ee525b3458425b17adbe3df61/components/XGBoost/Train/component.yaml'}
arguments:
training_data:
taskOutput: {outputName: train_1, taskId: Split table into folds, type: CSV}
label_column:
graphInput: {inputName: label_column}
num_iterations:
graphInput: {inputName: num_iterations}
objective:
graphInput: {inputName: objective}
Xgboost predict:
componentRef: {digest: ecdfaf32cff15b6abc3d0dd80365ce00577f1a19a058fbe201f515431cea1357,
url: 'https://raw.githubusercontent.com/kubeflow/pipelines/567c04c51ff00a1ee525b3458425b17adbe3df61/components/XGBoost/Predict/component.yaml'}
arguments:
data:
taskOutput: {outputName: test_1, taskId: Split table into folds, type: CSV}
model:
taskOutput: {outputName: model, taskId: Xgboost train, type: XGBoostModel}
label_column:
graphInput: {inputName: label_column}
Pandas Transform DataFrame in CSV format:
componentRef: {digest: 58dc88349157bf128021708c316ce4eb60bc1de0a5a7dd3af45fabac3276d510,
url: 'https://raw.githubusercontent.com/kubeflow/pipelines/6162d55998b176b50267d351241100bb0ee715bc/components/pandas/Transform_DataFrame/in_CSV_format/component.yaml'}
arguments:
table:
taskOutput: {outputName: test_1, taskId: Split table into folds, type: CSV}
transform_code: df = df[["tips"]]
Remove header:
componentRef: {digest: ba35ffea863855b956c3c50aefa0420ba3823949a6c059e6e3971cde960dc5a3,
url: 'https://raw.githubusercontent.com/kubeflow/pipelines/02c9638287468c849632cf9f7885b51de4c66f86/components/tables/Remove_header/component.yaml'}
arguments:
table:
taskOutput: {outputName: transformed_table, taskId: Pandas Transform DataFrame
in CSV format, type: CSV}
Calculate regression metrics from csv:
componentRef: {digest: e3ecbfeb18032820edfee4255e2fb6d15d15ed224e166519d5e528e12053a995,
url: 'https://raw.githubusercontent.com/kubeflow/pipelines/7da1ac9464b4b3e7d95919faa2f1107a9635b7e4/components/ml_metrics/Calculate_regression_metrics/from_CSV/component.yaml'}
arguments:
true_values:
taskOutput: {outputName: table, taskId: Remove header}
predicted_values:
taskOutput: {outputName: predictions, taskId: Xgboost predict, type: Text}
Xgboost train 2:
componentRef: {digest: 09b80053da29f8f51575b42e5d2e8ad4b7bdcc92a02c3744e189b1f597006b38,
url: 'https://raw.githubusercontent.com/kubeflow/pipelines/567c04c51ff00a1ee525b3458425b17adbe3df61/components/XGBoost/Train/component.yaml'}
arguments:
training_data:
taskOutput: {outputName: train_2, taskId: Split table into folds, type: CSV}
label_column:
graphInput: {inputName: label_column}
num_iterations:
graphInput: {inputName: num_iterations}
objective:
graphInput: {inputName: objective}
Xgboost predict 2:
componentRef: {digest: ecdfaf32cff15b6abc3d0dd80365ce00577f1a19a058fbe201f515431cea1357,
url: 'https://raw.githubusercontent.com/kubeflow/pipelines/567c04c51ff00a1ee525b3458425b17adbe3df61/components/XGBoost/Predict/component.yaml'}
arguments:
data:
taskOutput: {outputName: test_2, taskId: Split table into folds, type: CSV}
model:
taskOutput: {outputName: model, taskId: Xgboost train 2, type: XGBoostModel}
label_column:
graphInput: {inputName: label_column}
Pandas Transform DataFrame in CSV format 2:
componentRef: {digest: 58dc88349157bf128021708c316ce4eb60bc1de0a5a7dd3af45fabac3276d510,
url: 'https://raw.githubusercontent.com/kubeflow/pipelines/6162d55998b176b50267d351241100bb0ee715bc/components/pandas/Transform_DataFrame/in_CSV_format/component.yaml'}
arguments:
table:
taskOutput: {outputName: test_2, taskId: Split table into folds, type: CSV}
transform_code: df = df[["tips"]]
Remove header 2:
componentRef: {digest: ba35ffea863855b956c3c50aefa0420ba3823949a6c059e6e3971cde960dc5a3,
url: 'https://raw.githubusercontent.com/kubeflow/pipelines/02c9638287468c849632cf9f7885b51de4c66f86/components/tables/Remove_header/component.yaml'}
arguments:
table:
taskOutput: {outputName: transformed_table, taskId: Pandas Transform DataFrame
in CSV format 2, type: CSV}
Calculate regression metrics from csv 2:
componentRef: {digest: e3ecbfeb18032820edfee4255e2fb6d15d15ed224e166519d5e528e12053a995,
url: 'https://raw.githubusercontent.com/kubeflow/pipelines/7da1ac9464b4b3e7d95919faa2f1107a9635b7e4/components/ml_metrics/Calculate_regression_metrics/from_CSV/component.yaml'}
arguments:
true_values:
taskOutput: {outputName: table, taskId: Remove header 2}
predicted_values:
taskOutput: {outputName: predictions, taskId: Xgboost predict 2, type: Text}
Xgboost train 3:
componentRef: {digest: 09b80053da29f8f51575b42e5d2e8ad4b7bdcc92a02c3744e189b1f597006b38,
url: 'https://raw.githubusercontent.com/kubeflow/pipelines/567c04c51ff00a1ee525b3458425b17adbe3df61/components/XGBoost/Train/component.yaml'}
arguments:
training_data:
taskOutput: {outputName: train_3, taskId: Split table into folds, type: CSV}
label_column:
graphInput: {inputName: label_column}
num_iterations:
graphInput: {inputName: num_iterations}
objective:
graphInput: {inputName: objective}
Xgboost predict 3:
componentRef: {digest: ecdfaf32cff15b6abc3d0dd80365ce00577f1a19a058fbe201f515431cea1357,
url: 'https://raw.githubusercontent.com/kubeflow/pipelines/567c04c51ff00a1ee525b3458425b17adbe3df61/components/XGBoost/Predict/component.yaml'}
arguments:
data:
taskOutput: {outputName: test_3, taskId: Split table into folds, type: CSV}
model:
taskOutput: {outputName: model, taskId: Xgboost train 3, type: XGBoostModel}
label_column:
graphInput: {inputName: label_column}
Pandas Transform DataFrame in CSV format 3:
componentRef: {digest: 58dc88349157bf128021708c316ce4eb60bc1de0a5a7dd3af45fabac3276d510,
url: 'https://raw.githubusercontent.com/kubeflow/pipelines/6162d55998b176b50267d351241100bb0ee715bc/components/pandas/Transform_DataFrame/in_CSV_format/component.yaml'}
arguments:
table:
taskOutput: {outputName: test_3, taskId: Split table into folds, type: CSV}
transform_code: df = df[["tips"]]
Remove header 3:
componentRef: {digest: ba35ffea863855b956c3c50aefa0420ba3823949a6c059e6e3971cde960dc5a3,
url: 'https://raw.githubusercontent.com/kubeflow/pipelines/02c9638287468c849632cf9f7885b51de4c66f86/components/tables/Remove_header/component.yaml'}
arguments:
table:
taskOutput: {outputName: transformed_table, taskId: Pandas Transform DataFrame
in CSV format 3, type: CSV}
Calculate regression metrics from csv 3:
componentRef: {digest: e3ecbfeb18032820edfee4255e2fb6d15d15ed224e166519d5e528e12053a995,
url: 'https://raw.githubusercontent.com/kubeflow/pipelines/7da1ac9464b4b3e7d95919faa2f1107a9635b7e4/components/ml_metrics/Calculate_regression_metrics/from_CSV/component.yaml'}
arguments:
true_values:
taskOutput: {outputName: table, taskId: Remove header 3}
predicted_values:
taskOutput: {outputName: predictions, taskId: Xgboost predict 3, type: Text}
Xgboost train 4:
componentRef: {digest: 09b80053da29f8f51575b42e5d2e8ad4b7bdcc92a02c3744e189b1f597006b38,
url: 'https://raw.githubusercontent.com/kubeflow/pipelines/567c04c51ff00a1ee525b3458425b17adbe3df61/components/XGBoost/Train/component.yaml'}
arguments:
training_data:
taskOutput: {outputName: train_4, taskId: Split table into folds, type: CSV}
label_column:
graphInput: {inputName: label_column}
num_iterations:
graphInput: {inputName: num_iterations}
objective:
graphInput: {inputName: objective}
Xgboost predict 4:
componentRef: {digest: ecdfaf32cff15b6abc3d0dd80365ce00577f1a19a058fbe201f515431cea1357,
url: 'https://raw.githubusercontent.com/kubeflow/pipelines/567c04c51ff00a1ee525b3458425b17adbe3df61/components/XGBoost/Predict/component.yaml'}
arguments:
data:
taskOutput: {outputName: test_4, taskId: Split table into folds, type: CSV}
model:
taskOutput: {outputName: model, taskId: Xgboost train 4, type: XGBoostModel}
label_column:
graphInput: {inputName: label_column}
Pandas Transform DataFrame in CSV format 4:
componentRef: {digest: 58dc88349157bf128021708c316ce4eb60bc1de0a5a7dd3af45fabac3276d510,
url: 'https://raw.githubusercontent.com/kubeflow/pipelines/6162d55998b176b50267d351241100bb0ee715bc/components/pandas/Transform_DataFrame/in_CSV_format/component.yaml'}
arguments:
table:
taskOutput: {outputName: test_4, taskId: Split table into folds, type: CSV}
transform_code: df = df[["tips"]]
Remove header 4:
componentRef: {digest: ba35ffea863855b956c3c50aefa0420ba3823949a6c059e6e3971cde960dc5a3,
url: 'https://raw.githubusercontent.com/kubeflow/pipelines/02c9638287468c849632cf9f7885b51de4c66f86/components/tables/Remove_header/component.yaml'}
arguments:
table:
taskOutput: {outputName: transformed_table, taskId: Pandas Transform DataFrame
in CSV format 4, type: CSV}
Calculate regression metrics from csv 4:
componentRef: {digest: e3ecbfeb18032820edfee4255e2fb6d15d15ed224e166519d5e528e12053a995,
url: 'https://raw.githubusercontent.com/kubeflow/pipelines/7da1ac9464b4b3e7d95919faa2f1107a9635b7e4/components/ml_metrics/Calculate_regression_metrics/from_CSV/component.yaml'}
arguments:
true_values:
taskOutput: {outputName: table, taskId: Remove header 4}
predicted_values:
taskOutput: {outputName: predictions, taskId: Xgboost predict 4, type: Text}
Xgboost train 5:
componentRef: {digest: 09b80053da29f8f51575b42e5d2e8ad4b7bdcc92a02c3744e189b1f597006b38,
url: 'https://raw.githubusercontent.com/kubeflow/pipelines/567c04c51ff00a1ee525b3458425b17adbe3df61/components/XGBoost/Train/component.yaml'}
arguments:
training_data:
taskOutput: {outputName: train_5, taskId: Split table into folds, type: CSV}
label_column:
graphInput: {inputName: label_column}
num_iterations:
graphInput: {inputName: num_iterations}
objective:
graphInput: {inputName: objective}
Xgboost predict 5:
componentRef: {digest: ecdfaf32cff15b6abc3d0dd80365ce00577f1a19a058fbe201f515431cea1357,
url: 'https://raw.githubusercontent.com/kubeflow/pipelines/567c04c51ff00a1ee525b3458425b17adbe3df61/components/XGBoost/Predict/component.yaml'}
arguments:
data:
taskOutput: {outputName: test_5, taskId: Split table into folds, type: CSV}
model:
taskOutput: {outputName: model, taskId: Xgboost train 5, type: XGBoostModel}
label_column:
graphInput: {inputName: label_column}
Pandas Transform DataFrame in CSV format 5:
componentRef: {digest: 58dc88349157bf128021708c316ce4eb60bc1de0a5a7dd3af45fabac3276d510,
url: 'https://raw.githubusercontent.com/kubeflow/pipelines/6162d55998b176b50267d351241100bb0ee715bc/components/pandas/Transform_DataFrame/in_CSV_format/component.yaml'}
arguments:
table:
taskOutput: {outputName: test_5, taskId: Split table into folds, type: CSV}
transform_code: df = df[["tips"]]
Remove header 5:
componentRef: {digest: ba35ffea863855b956c3c50aefa0420ba3823949a6c059e6e3971cde960dc5a3,
url: 'https://raw.githubusercontent.com/kubeflow/pipelines/02c9638287468c849632cf9f7885b51de4c66f86/components/tables/Remove_header/component.yaml'}
arguments:
table:
taskOutput: {outputName: transformed_table, taskId: Pandas Transform DataFrame
in CSV format 5, type: CSV}
Calculate regression metrics from csv 5:
componentRef: {digest: e3ecbfeb18032820edfee4255e2fb6d15d15ed224e166519d5e528e12053a995,
url: 'https://raw.githubusercontent.com/kubeflow/pipelines/7da1ac9464b4b3e7d95919faa2f1107a9635b7e4/components/ml_metrics/Calculate_regression_metrics/from_CSV/component.yaml'}
arguments:
true_values:
taskOutput: {outputName: table, taskId: Remove header 5}
predicted_values:
taskOutput: {outputName: predictions, taskId: Xgboost predict 5, type: Text}
Aggregate regression metrics from csv:
componentRef: {digest: 3e128130521eff8d43764f3dcb037316cdd6490ad2878df5adef416f7c2f3c19,
url: 'https://raw.githubusercontent.com/kubeflow/pipelines/7ea9363fe201918d419fecdc00d1275e657ff712/components/ml_metrics/Aggregate_regression_metrics/component.yaml'}
arguments:
metrics_1:
taskOutput: {outputName: metrics, taskId: Calculate regression metrics
from csv, type: JsonObject}
metrics_2:
taskOutput: {outputName: metrics, taskId: Calculate regression metrics
from csv 2, type: JsonObject}
metrics_3:
taskOutput: {outputName: metrics, taskId: Calculate regression metrics
from csv 3, type: JsonObject}
metrics_4:
taskOutput: {outputName: metrics, taskId: Calculate regression metrics
from csv 4, type: JsonObject}
metrics_5:
taskOutput: {outputName: metrics, taskId: Calculate regression metrics
from csv 5, type: JsonObject}
outputValues:
mean_absolute_error:
taskOutput: {outputName: mean_absolute_error, taskId: Aggregate regression
metrics from csv, type: Float}
mean_squared_error:
taskOutput: {outputName: mean_squared_error, taskId: Aggregate regression
metrics from csv, type: Float}
root_mean_squared_error:
taskOutput: {outputName: root_mean_squared_error, taskId: Aggregate regression
metrics from csv, type: Float}
metrics:
taskOutput: {outputName: metrics, taskId: Aggregate regression metrics from
csv, type: JsonObject}
| 555 |
0 | kubeflow_public_repos/pipelines/components/contrib/XGBoost/Train_regression_and_calculate_metrics | kubeflow_public_repos/pipelines/components/contrib/XGBoost/Train_regression_and_calculate_metrics/from_CSV/component.py | from collections import OrderedDict
from kfp import components
xgboost_train_on_csv_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/567c04c51ff00a1ee525b3458425b17adbe3df61/components/XGBoost/Train/component.yaml')
xgboost_predict_on_csv_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/567c04c51ff00a1ee525b3458425b17adbe3df61/components/XGBoost/Predict/component.yaml')
pandas_transform_csv_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/6162d55998b176b50267d351241100bb0ee715bc/components/pandas/Transform_DataFrame/in_CSV_format/component.yaml')
drop_header_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/02c9638287468c849632cf9f7885b51de4c66f86/components/tables/Remove_header/component.yaml')
calculate_regression_metrics_from_csv_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/7da1ac9464b4b3e7d95919faa2f1107a9635b7e4/components/ml_metrics/Calculate_regression_metrics/from_CSV/component.yaml')
def xgboost_train_regression_and_calculate_metrics_on_csv(
training_data: 'CSV',
testing_data: 'CSV',
label_column: int = 0,
objective: str = 'reg:squarederror',
num_iterations: int = 200,
):
model = xgboost_train_on_csv_op(
training_data=training_data,
label_column=label_column,
objective=objective,
num_iterations=num_iterations,
).outputs['model']
predictions = xgboost_predict_on_csv_op(
data=testing_data,
model=model,
label_column=label_column,
).output
true_values_table = pandas_transform_csv_op(
table=testing_data,
transform_code='df = df[["tips"]]',
).output
true_values = drop_header_op(true_values_table).output
metrics_task = calculate_regression_metrics_from_csv_op(
true_values=true_values,
predicted_values=predictions,
)
return OrderedDict([
('model', model),
('mean_absolute_error', metrics_task.outputs['mean_absolute_error']),
('mean_squared_error', metrics_task.outputs['mean_squared_error']),
('root_mean_squared_error', metrics_task.outputs['root_mean_squared_error']),
('metrics', metrics_task.outputs['metrics']),
])
if __name__ == '__main__':
xgboost_train_regression_and_calculate_metrics_on_csv_op = components.create_graph_component_from_pipeline_func(
xgboost_train_regression_and_calculate_metrics_on_csv,
output_component_file='component.yaml',
annotations={
"author": "Alexey Volkov <[email protected]>",
"canonical_location": "https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/XGBoost/Train_regression_and_calculate_metrics/from_CSV/component.yaml",
},
)
| 556 |
0 | kubeflow_public_repos/pipelines/components/contrib/XGBoost/Train_regression_and_calculate_metrics | kubeflow_public_repos/pipelines/components/contrib/XGBoost/Train_regression_and_calculate_metrics/from_CSV/component.yaml | name: Xgboost train regression and calculate metrics on csv
inputs:
- {name: training_data, type: CSV}
- {name: testing_data, type: CSV}
- {name: label_column, type: Integer, default: '0', optional: true}
- {name: objective, type: String, default: 'reg:squarederror', optional: true}
- {name: num_iterations, type: Integer, default: '200', optional: true}
outputs:
- {name: model, type: XGBoostModel}
- {name: mean_absolute_error, type: Float}
- {name: mean_squared_error, type: Float}
- {name: root_mean_squared_error, type: Float}
- {name: metrics, type: JsonObject}
metadata:
annotations:
author: Alexey Volkov <[email protected]>
canonical_location: 'https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/XGBoost/Train_regression_and_calculate_metrics/from_CSV/component.yaml'
implementation:
graph:
tasks:
Xgboost train:
componentRef: {digest: 09b80053da29f8f51575b42e5d2e8ad4b7bdcc92a02c3744e189b1f597006b38,
url: 'https://raw.githubusercontent.com/kubeflow/pipelines/567c04c51ff00a1ee525b3458425b17adbe3df61/components/XGBoost/Train/component.yaml'}
arguments:
training_data:
graphInput: {inputName: training_data}
label_column:
graphInput: {inputName: label_column}
num_iterations:
graphInput: {inputName: num_iterations}
objective:
graphInput: {inputName: objective}
Xgboost predict:
componentRef: {digest: ecdfaf32cff15b6abc3d0dd80365ce00577f1a19a058fbe201f515431cea1357,
url: 'https://raw.githubusercontent.com/kubeflow/pipelines/567c04c51ff00a1ee525b3458425b17adbe3df61/components/XGBoost/Predict/component.yaml'}
arguments:
data:
graphInput: {inputName: testing_data}
model:
taskOutput: {outputName: model, taskId: Xgboost train, type: XGBoostModel}
label_column:
graphInput: {inputName: label_column}
Pandas Transform DataFrame in CSV format:
componentRef: {digest: 58dc88349157bf128021708c316ce4eb60bc1de0a5a7dd3af45fabac3276d510,
url: 'https://raw.githubusercontent.com/kubeflow/pipelines/6162d55998b176b50267d351241100bb0ee715bc/components/pandas/Transform_DataFrame/in_CSV_format/component.yaml'}
arguments:
table:
graphInput: {inputName: testing_data}
transform_code: df = df[["tips"]]
Remove header:
componentRef: {digest: ba35ffea863855b956c3c50aefa0420ba3823949a6c059e6e3971cde960dc5a3,
url: 'https://raw.githubusercontent.com/kubeflow/pipelines/02c9638287468c849632cf9f7885b51de4c66f86/components/tables/Remove_header/component.yaml'}
arguments:
table:
taskOutput: {outputName: transformed_table, taskId: Pandas Transform DataFrame
in CSV format, type: CSV}
Calculate regression metrics from csv:
componentRef: {digest: e3ecbfeb18032820edfee4255e2fb6d15d15ed224e166519d5e528e12053a995,
url: 'https://raw.githubusercontent.com/kubeflow/pipelines/7da1ac9464b4b3e7d95919faa2f1107a9635b7e4/components/ml_metrics/Calculate_regression_metrics/from_CSV/component.yaml'}
arguments:
true_values:
taskOutput: {outputName: table, taskId: Remove header}
predicted_values:
taskOutput: {outputName: predictions, taskId: Xgboost predict, type: Text}
outputValues:
model:
taskOutput: {outputName: model, taskId: Xgboost train, type: XGBoostModel}
mean_absolute_error:
taskOutput: {outputName: mean_absolute_error, taskId: Calculate regression
metrics from csv, type: Float}
mean_squared_error:
taskOutput: {outputName: mean_squared_error, taskId: Calculate regression
metrics from csv, type: Float}
root_mean_squared_error:
taskOutput: {outputName: root_mean_squared_error, taskId: Calculate regression
metrics from csv, type: Float}
metrics:
taskOutput: {outputName: metrics, taskId: Calculate regression metrics from
csv, type: JsonObject}
| 557 |
0 | kubeflow_public_repos/pipelines/components/contrib/XGBoost | kubeflow_public_repos/pipelines/components/contrib/XGBoost/_samples/recursive_training.py | #!/usr/bin/env python3
# This sample demonstrates continuous training using a train-eval-check recursive loop.
# The main pipeline trains the initial model and then gradually trains the model
# some more until the model evaluation metrics are good enough.
import kfp
from kfp import components
chicago_taxi_dataset_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/e3337b8bdcd63636934954e592d4b32c95b49129/components/datasets/Chicago%20Taxi/component.yaml')
xgboost_train_on_csv_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/567c04c51ff00a1ee525b3458425b17adbe3df61/components/XGBoost/Train/component.yaml')
xgboost_predict_on_csv_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/567c04c51ff00a1ee525b3458425b17adbe3df61/components/XGBoost/Predict/component.yaml')
pandas_transform_csv_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/6162d55998b176b50267d351241100bb0ee715bc/components/pandas/Transform_DataFrame/in_CSV_format/component.yaml')
drop_header_op = kfp.components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/02c9638287468c849632cf9f7885b51de4c66f86/components/tables/Remove_header/component.yaml')
calculate_regression_metrics_from_csv_op = kfp.components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/616542ac0f789914f4eb53438da713dd3004fba4/components/ml_metrics/Calculate_regression_metrics/from_CSV/component.yaml')
# This recursive sub-pipeline trains a model, evaluates it, calculates the metrics and checks them.
# If the model error is too high, then more training is performed until the model is good.
@kfp.dsl.graph_component
def train_until_low_error(starting_model, training_data, true_values):
# Training
model = xgboost_train_on_csv_op(
training_data=training_data,
starting_model=starting_model,
label_column=0,
objective='reg:squarederror',
num_iterations=50,
).outputs['model']
# Predicting
predictions = xgboost_predict_on_csv_op(
data=training_data,
model=model,
label_column=0,
).output
# Calculating the regression metrics
metrics_task = calculate_regression_metrics_from_csv_op(
true_values=true_values,
predicted_values=predictions,
)
# Checking the metrics
with kfp.dsl.Condition(metrics_task.outputs['mean_squared_error'] > 0.01):
# Training some more
train_until_low_error(
starting_model=model,
training_data=training_data,
true_values=true_values,
)
# The main pipleine trains the initial model and then gradually trains the model some more until the model evaluation metrics are good enough.
def train_until_good_pipeline():
# Preparing the training data
training_data = chicago_taxi_dataset_op(
where='trip_start_timestamp >= "2019-01-01" AND trip_start_timestamp < "2019-02-01"',
select='tips,trip_seconds,trip_miles,pickup_community_area,dropoff_community_area,fare,tolls,extras,trip_total',
limit=10000,
).output
# Preparing the true values
true_values_table = pandas_transform_csv_op(
table=training_data,
transform_code='df = df[["tips"]]',
).output
true_values = drop_header_op(true_values_table).output
# Initial model training
first_model = xgboost_train_on_csv_op(
training_data=training_data,
label_column=0,
objective='reg:squarederror',
num_iterations=100,
).outputs['model']
# Recursively training until the error becomes low
train_until_low_error(
starting_model=first_model,
training_data=training_data,
true_values=true_values,
)
if __name__ == '__main__':
kfp_endpoint=None
kfp.Client(host=kfp_endpoint).create_run_from_pipeline_func(train_until_good_pipeline, arguments={})
| 558 |
0 | kubeflow_public_repos/pipelines/components/contrib/XGBoost | kubeflow_public_repos/pipelines/components/contrib/XGBoost/_samples/sample_pipeline.py | import kfp
from kfp import components
chicago_taxi_dataset_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/e3337b8bdcd63636934954e592d4b32c95b49129/components/datasets/Chicago%20Taxi/component.yaml')
convert_csv_to_apache_parquet_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/0d7d6f41c92bdc05c2825232afe2b47e5cb6c4b3/components/_converters/ApacheParquet/from_CSV/component.yaml')
xgboost_train_on_csv_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/567c04c51ff00a1ee525b3458425b17adbe3df61/components/XGBoost/Train/component.yaml')
xgboost_predict_on_csv_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/567c04c51ff00a1ee525b3458425b17adbe3df61/components/XGBoost/Predict/component.yaml')
xgboost_train_on_parquet_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/0ae2f30ff24beeef1c64cc7c434f1f652c065192/components/XGBoost/Train/from_ApacheParquet/component.yaml')
xgboost_predict_on_parquet_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/0ae2f30ff24beeef1c64cc7c434f1f652c065192/components/XGBoost/Predict/from_ApacheParquet/component.yaml')
def xgboost_pipeline():
training_data_csv = chicago_taxi_dataset_op(
where='trip_start_timestamp >= "2019-01-01" AND trip_start_timestamp < "2019-02-01"',
select='tips,trip_seconds,trip_miles,pickup_community_area,dropoff_community_area,fare,tolls,extras,trip_total',
limit=10000,
).output
# Training and prediction on dataset in CSV format
model_trained_on_csv = xgboost_train_on_csv_op(
training_data=training_data_csv,
label_column=0,
objective='reg:squarederror',
num_iterations=200,
).outputs['model']
xgboost_predict_on_csv_op(
data=training_data_csv,
model=model_trained_on_csv,
label_column=0,
)
# Training and prediction on dataset in Apache Parquet format
training_data_parquet = convert_csv_to_apache_parquet_op(
training_data_csv
).output
model_trained_on_parquet = xgboost_train_on_parquet_op(
training_data=training_data_parquet,
label_column_name='tips',
objective='reg:squarederror',
num_iterations=200,
).outputs['model']
xgboost_predict_on_parquet_op(
data=training_data_parquet,
model=model_trained_on_parquet,
label_column_name='tips',
)
# Checking cross-format predictions
xgboost_predict_on_parquet_op(
data=training_data_parquet,
model=model_trained_on_csv,
label_column_name='tips',
)
xgboost_predict_on_csv_op(
data=training_data_csv,
model=model_trained_on_parquet,
label_column=0,
)
if __name__ == '__main__':
kfp_endpoint=None
kfp.Client(host=kfp_endpoint).create_run_from_pipeline_func(xgboost_pipeline, arguments={})
| 559 |
0 | kubeflow_public_repos/pipelines/components/contrib/XGBoost | kubeflow_public_repos/pipelines/components/contrib/XGBoost/_samples/training_with_cross_validation.py | # cross_validation_pipeline compact
import kfp
from kfp import components
chicago_taxi_dataset_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/e3337b8bdcd63636934954e592d4b32c95b49129/components/datasets/Chicago%20Taxi/component.yaml')
xgboost_train_and_cv_regression_on_csv_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/1a11ce2aea5243cdcc2b4721675303f78f49ca21/components/XGBoost/Train_and_cross-validate_regression/from_CSV/component.yaml')
def cross_validation_pipeline(
label_column: int = 0,
objective: str = 'reg:squarederror',
num_iterations: int = 200,
):
data = chicago_taxi_dataset_op(
where='trip_start_timestamp >= "{}" AND trip_start_timestamp < "{}"'.format('2019-01-01', '2019-02-01'),
select='tips,trip_seconds,trip_miles,pickup_community_area,dropoff_community_area,fare,tolls,extras,trip_total',
limit=10000,
).output
xgboost_train_and_cv_regression_on_csv_op(
data=data,
label_column=label_column,
objective=objective,
num_iterations=num_iterations,
)
if __name__ == '__main__':
kfp_endpoint=None
kfp.Client(host=kfp_endpoint).create_run_from_pipeline_func(
cross_validation_pipeline,
arguments={},
)
| 560 |
0 | kubeflow_public_repos/pipelines/components/contrib/openvino | kubeflow_public_repos/pipelines/components/contrib/openvino/ovms-deployer/README.md | # Deployer of OpenVINO Model Server
This component triggers deployment of [OpenVINO Model Server](https://github.com/IntelAI/OpenVINO-model-server) in Kubernetes.
It applies the passed component parameters on jinja template and applied deployment and server records.
```bash
./deploy.sh
--model-export-path
--cluster-name
--namespace
--server-name
--replicas
--batch-size
--model-version-policy
--log-level
```
## building docker image
```bash
docker build --build-arg http_proxy=$http_proxy --build-arg https_proxy=$https_proxy .
```
## testing the image locally
``` | 561 |
0 | kubeflow_public_repos/pipelines/components/contrib/openvino/ovms-deployer | kubeflow_public_repos/pipelines/components/contrib/openvino/ovms-deployer/containers/ovms.j2 | ---
apiVersion: apps/v1
kind: Deployment
metadata:
name: ovms-{{ SERVER_NAME }}
labels:
app: ovms-{{ SERVER_NAME }}
spec:
selector:
matchLabels:
app: ovms-{{ SERVER_NAME }}
replicas: {{ REPLICAS }}
template:
metadata:
labels:
app: ovms-{{ SERVER_NAME }}
spec:
containers:
- name: ovms-{{ SERVER_NAME }}
image: intelaipg/openvino-model-server:latest
ports:
- containerPort: 80
env:
- name: LOG_LEVEL
value: "{{ LOG_LEVEL }}"
command: ["/ie-serving-py/start_server.sh"]
args: ["ie_serving", "model", "--model_path", "{{ MODEL_EXPORT_PATH }}", "--model_name", "{{ SERVER_NAME }}", "--port", "80", "--batch_size", "{{ BATCH_SIZE }}"]
---
apiVersion: v1
kind: Service
metadata:
name: ovms-{{ SERVER_NAME }}
spec:
selector:
app: ovms-{{ SERVER_NAME }}
ports:
- protocol: TCP
port: 80
targetPort: 80
| 562 |
0 | kubeflow_public_repos/pipelines/components/contrib/openvino/ovms-deployer | kubeflow_public_repos/pipelines/components/contrib/openvino/ovms-deployer/containers/apply_template.py | from jinja2 import Template
import os
f = open("ovms.j2","r")
ovms_template = f.read()
t = Template(ovms_template)
ovms_k8s = t.render(os.environ)
f.close
f = open("ovms.yaml", "w")
f.write(ovms_k8s)
f.close
print(ovms_k8s) | 563 |
0 | kubeflow_public_repos/pipelines/components/contrib/openvino/ovms-deployer | kubeflow_public_repos/pipelines/components/contrib/openvino/ovms-deployer/containers/requirements.txt | jinja2==2.11.3
futures==3.1.1
tensorflow-serving-api==1.13.0
| 564 |
0 | kubeflow_public_repos/pipelines/components/contrib/openvino/ovms-deployer | kubeflow_public_repos/pipelines/components/contrib/openvino/ovms-deployer/containers/classes.py | imagenet_classes = {0: 'tench, Tinca tinca',
1: 'goldfish, Carassius auratus',
2: 'great white shark, white shark, man-eater, man-eating shark, Carcharodon carcharias',
3: 'tiger shark, Galeocerdo cuvieri',
4: 'hammerhead, hammerhead shark',
5: 'electric ray, crampfish, numbfish, torpedo',
6: 'stingray',
7: 'cock',
8: 'hen',
9: 'ostrich, Struthio camelus',
10: 'brambling, Fringilla montifringilla',
11: 'goldfinch, Carduelis carduelis',
12: 'house finch, linnet, Carpodacus mexicanus',
13: 'junco, snowbird',
14: 'indigo bunting, indigo finch, indigo bird, Passerina cyanea',
15: 'robin, American robin, Turdus migratorius',
16: 'bulbul',
17: 'jay',
18: 'magpie',
19: 'chickadee',
20: 'water ouzel, dipper',
21: 'kite',
22: 'bald eagle, American eagle, Haliaeetus leucocephalus',
23: 'vulture',
24: 'great grey owl, great gray owl, Strix nebulosa',
25: 'European fire salamander, Salamandra salamandra',
26: 'common newt, Triturus vulgaris',
27: 'eft',
28: 'spotted salamander, Ambystoma maculatum',
29: 'axolotl, mud puppy, Ambystoma mexicanum',
30: 'bullfrog, Rana catesbeiana',
31: 'tree frog, tree-frog',
32: 'tailed frog, bell toad, ribbed toad, tailed toad, Ascaphus trui',
33: 'loggerhead, loggerhead turtle, Caretta caretta',
34: 'leatherback turtle, leatherback, leathery turtle, Dermochelys coriacea',
35: 'mud turtle',
36: 'terrapin',
37: 'box turtle, box tortoise',
38: 'banded gecko',
39: 'common iguana, iguana, Iguana iguana',
40: 'American chameleon, anole, Anolis carolinensis',
41: 'whiptail, whiptail lizard',
42: 'agama',
43: 'frilled lizard, Chlamydosaurus kingi',
44: 'alligator lizard',
45: 'Gila monster, Heloderma suspectum',
46: 'green lizard, Lacerta viridis',
47: 'African chameleon, Chamaeleo chamaeleon',
48: 'Komodo dragon, Komodo lizard, dragon lizard, giant lizard, Varanus komodoensis',
49: 'African crocodile, Nile crocodile, Crocodylus niloticus',
50: 'American alligator, Alligator mississipiensis',
51: 'triceratops',
52: 'thunder snake, worm snake, Carphophis amoenus',
53: 'ringneck snake, ring-necked snake, ring snake',
54: 'hognose snake, puff adder, sand viper',
55: 'green snake, grass snake',
56: 'king snake, kingsnake',
57: 'garter snake, grass snake',
58: 'water snake',
59: 'vine snake',
60: 'night snake, Hypsiglena torquata',
61: 'boa constrictor, Constrictor constrictor',
62: 'rock python, rock snake, Python sebae',
63: 'Indian cobra, Naja naja',
64: 'green mamba',
65: 'sea snake',
66: 'horned viper, cerastes, sand viper, horned asp, Cerastes cornutus',
67: 'diamondback, diamondback rattlesnake, Crotalus adamanteus',
68: 'sidewinder, horned rattlesnake, Crotalus cerastes',
69: 'trilobite',
70: 'harvestman, daddy longlegs, Phalangium opilio',
71: 'scorpion',
72: 'black and gold garden spider, Argiope aurantia',
73: 'barn spider, Araneus cavaticus',
74: 'garden spider, Aranea diademata',
75: 'black widow, Latrodectus mactans',
76: 'tarantula',
77: 'wolf spider, hunting spider',
78: 'tick',
79: 'centipede',
80: 'black grouse',
81: 'ptarmigan',
82: 'ruffed grouse, partridge, Bonasa umbellus',
83: 'prairie chicken, prairie grouse, prairie fowl',
84: 'peacock',
85: 'quail',
86: 'partridge',
87: 'African grey, African gray, Psittacus erithacus',
88: 'macaw',
89: 'sulphur-crested cockatoo, Kakatoe galerita, Cacatua galerita',
90: 'lorikeet',
91: 'coucal',
92: 'bee eater',
93: 'hornbill',
94: 'hummingbird',
95: 'jacamar',
96: 'toucan',
97: 'drake',
98: 'red-breasted merganser, Mergus serrator',
99: 'goose',
100: 'black swan, Cygnus atratus',
101: 'tusker',
102: 'echidna, spiny anteater, anteater',
103: 'platypus, duckbill, duckbilled platypus, duck-billed platypus, Ornithorhynchus anatinus',
104: 'wallaby, brush kangaroo',
105: 'koala, koala bear, kangaroo bear, native bear, Phascolarctos cinereus',
106: 'wombat',
107: 'jellyfish',
108: 'sea anemone, anemone',
109: 'brain coral',
110: 'flatworm, platyhelminth',
111: 'nematode, nematode worm, roundworm',
112: 'conch',
113: 'snail',
114: 'slug',
115: 'sea slug, nudibranch',
116: 'chiton, coat-of-mail shell, sea cradle, polyplacophore',
117: 'chambered nautilus, pearly nautilus, nautilus',
118: 'Dungeness crab, Cancer magister',
119: 'rock crab, Cancer irroratus',
120: 'fiddler crab',
121: 'king crab, Alaska crab, Alaskan king crab, Alaska king crab, Paralithodes camtschatica',
122: 'American lobster, Northern lobster, Maine lobster, Homarus americanus',
123: 'spiny lobster, langouste, rock lobster, crawfish, crayfish, sea crawfish',
124: 'crayfish, crawfish, crawdad, crawdaddy',
125: 'hermit crab',
126: 'isopod',
127: 'white stork, Ciconia ciconia',
128: 'black stork, Ciconia nigra',
129: 'spoonbill',
130: 'flamingo',
131: 'little blue heron, Egretta caerulea',
132: 'American egret, great white heron, Egretta albus',
133: 'bittern',
134: 'crane',
135: 'limpkin, Aramus pictus',
136: 'European gallinule, Porphyrio porphyrio',
137: 'American coot, marsh hen, mud hen, water hen, Fulica americana',
138: 'bustard',
139: 'ruddy turnstone, Arenaria interpres',
140: 'red-backed sandpiper, dunlin, Erolia alpina',
141: 'redshank, Tringa totanus',
142: 'dowitcher',
143: 'oystercatcher, oyster catcher',
144: 'pelican',
145: 'king penguin, Aptenodytes patagonica',
146: 'albatross, mollymawk',
147: 'grey whale, gray whale, devilfish, Eschrichtius gibbosus, Eschrichtius robustus',
148: 'killer whale, killer, orca, grampus, sea wolf, Orcinus orca',
149: 'dugong, Dugong dugon',
150: 'sea lion',
151: 'Chihuahua',
152: 'Japanese spaniel',
153: 'Maltese dog, Maltese terrier, Maltese',
154: 'Pekinese, Pekingese, Peke',
155: 'Shih-Tzu',
156: 'Blenheim spaniel',
157: 'papillon',
158: 'toy terrier',
159: 'Rhodesian ridgeback',
160: 'Afghan hound, Afghan',
161: 'basset, basset hound',
162: 'beagle',
163: 'bloodhound, sleuthhound',
164: 'bluetick',
165: 'black-and-tan coonhound',
166: 'Walker hound, Walker foxhound',
167: 'English foxhound',
168: 'redbone',
169: 'borzoi, Russian wolfhound',
170: 'Irish wolfhound',
171: 'Italian greyhound',
172: 'whippet',
173: 'Ibizan hound, Ibizan Podenco',
174: 'Norwegian elkhound, elkhound',
175: 'otterhound, otter hound',
176: 'Saluki, gazelle hound',
177: 'Scottish deerhound, deerhound',
178: 'Weimaraner',
179: 'Staffordshire bullterrier, Staffordshire bull terrier',
180: 'American Staffordshire terrier, Staffordshire terrier, American pit bull terrier, pit bull terrier',
181: 'Bedlington terrier',
182: 'Border terrier',
183: 'Kerry blue terrier',
184: 'Irish terrier',
185: 'Norfolk terrier',
186: 'Norwich terrier',
187: 'Yorkshire terrier',
188: 'wire-haired fox terrier',
189: 'Lakeland terrier',
190: 'Sealyham terrier, Sealyham',
191: 'Airedale, Airedale terrier',
192: 'cairn, cairn terrier',
193: 'Australian terrier',
194: 'Dandie Dinmont, Dandie Dinmont terrier',
195: 'Boston bull, Boston terrier',
196: 'miniature schnauzer',
197: 'giant schnauzer',
198: 'standard schnauzer',
199: 'Scotch terrier, Scottish terrier, Scottie',
200: 'Tibetan terrier, chrysanthemum dog',
201: 'silky terrier, Sydney silky',
202: 'soft-coated wheaten terrier',
203: 'West Highland white terrier',
204: 'Lhasa, Lhasa apso',
205: 'flat-coated retriever',
206: 'curly-coated retriever',
207: 'golden retriever',
208: 'Labrador retriever',
209: 'Chesapeake Bay retriever',
210: 'German short-haired pointer',
211: 'vizsla, Hungarian pointer',
212: 'English setter',
213: 'Irish setter, red setter',
214: 'Gordon setter',
215: 'Brittany spaniel',
216: 'clumber, clumber spaniel',
217: 'English springer, English springer spaniel',
218: 'Welsh springer spaniel',
219: 'cocker spaniel, English cocker spaniel, cocker',
220: 'Sussex spaniel',
221: 'Irish water spaniel',
222: 'kuvasz',
223: 'schipperke',
224: 'groenendael',
225: 'malinois',
226: 'briard',
227: 'kelpie',
228: 'komondor',
229: 'Old English sheepdog, bobtail',
230: 'Shetland sheepdog, Shetland sheep dog, Shetland',
231: 'collie',
232: 'Border collie',
233: 'Bouvier des Flandres, Bouviers des Flandres',
234: 'Rottweiler',
235: 'German shepherd, German shepherd dog, German police dog, alsatian',
236: 'Doberman, Doberman pinscher',
237: 'miniature pinscher',
238: 'Greater Swiss Mountain dog',
239: 'Bernese mountain dog',
240: 'Appenzeller',
241: 'EntleBucher',
242: 'boxer',
243: 'bull mastiff',
244: 'Tibetan mastiff',
245: 'French bulldog',
246: 'Great Dane',
247: 'Saint Bernard, St Bernard',
248: 'Eskimo dog, husky',
249: 'malamute, malemute, Alaskan malamute',
250: 'Siberian husky',
251: 'dalmatian, coach dog, carriage dog',
252: 'affenpinscher, monkey pinscher, monkey dog',
253: 'basenji',
254: 'pug, pug-dog',
255: 'Leonberg',
256: 'Newfoundland, Newfoundland dog',
257: 'Great Pyrenees',
258: 'Samoyed, Samoyede',
259: 'Pomeranian',
260: 'chow, chow chow',
261: 'keeshond',
262: 'Brabancon griffon',
263: 'Pembroke, Pembroke Welsh corgi',
264: 'Cardigan, Cardigan Welsh corgi',
265: 'toy poodle',
266: 'miniature poodle',
267: 'standard poodle',
268: 'Mexican hairless',
269: 'timber wolf, grey wolf, gray wolf, Canis lupus',
270: 'white wolf, Arctic wolf, Canis lupus tundrarum',
271: 'red wolf, maned wolf, Canis rufus, Canis niger',
272: 'coyote, prairie wolf, brush wolf, Canis latrans',
273: 'dingo, warrigal, warragal, Canis dingo',
274: 'dhole, Cuon alpinus',
275: 'African hunting dog, hyena dog, Cape hunting dog, Lycaon pictus',
276: 'hyena, hyaena',
277: 'red fox, Vulpes vulpes',
278: 'kit fox, Vulpes macrotis',
279: 'Arctic fox, white fox, Alopex lagopus',
280: 'grey fox, gray fox, Urocyon cinereoargenteus',
281: 'tabby, tabby cat',
282: 'tiger cat',
283: 'Persian cat',
284: 'Siamese cat, Siamese',
285: 'Egyptian cat',
286: 'cougar, puma, catamount, mountain lion, painter, panther, Felis concolor',
287: 'lynx, catamount',
288: 'leopard, Panthera pardus',
289: 'snow leopard, ounce, Panthera uncia',
290: 'jaguar, panther, Panthera onca, Felis onca',
291: 'lion, king of beasts, Panthera leo',
292: 'tiger, Panthera tigris',
293: 'cheetah, chetah, Acinonyx jubatus',
294: 'brown bear, bruin, Ursus arctos',
295: 'American black bear, black bear, Ursus americanus, Euarctos americanus',
296: 'ice bear, polar bear, Ursus Maritimus, Thalarctos maritimus',
297: 'sloth bear, Melursus ursinus, Ursus ursinus',
298: 'mongoose',
299: 'meerkat, mierkat',
300: 'tiger beetle',
301: 'ladybug, ladybeetle, lady beetle, ladybird, ladybird beetle',
302: 'ground beetle, carabid beetle',
303: 'long-horned beetle, longicorn, longicorn beetle',
304: 'leaf beetle, chrysomelid',
305: 'dung beetle',
306: 'rhinoceros beetle',
307: 'weevil',
308: 'fly',
309: 'bee',
310: 'ant, emmet, pismire',
311: 'grasshopper, hopper',
312: 'cricket',
313: 'walking stick, walkingstick, stick insect',
314: 'cockroach, roach',
315: 'mantis, mantid',
316: 'cicada, cicala',
317: 'leafhopper',
318: 'lacewing, lacewing fly',
319: "dragonfly, darning needle, devil's darning needle, sewing needle, snake feeder, snake doctor, mosquito hawk, skeeter hawk",
320: 'damselfly',
321: 'admiral',
322: 'ringlet, ringlet butterfly',
323: 'monarch, monarch butterfly, milkweed butterfly, Danaus plexippus',
324: 'cabbage butterfly',
325: 'sulphur butterfly, sulfur butterfly',
326: 'lycaenid, lycaenid butterfly',
327: 'starfish, sea star',
328: 'sea urchin',
329: 'sea cucumber, holothurian',
330: 'wood rabbit, cottontail, cottontail rabbit',
331: 'hare',
332: 'Angora, Angora rabbit',
333: 'hamster',
334: 'porcupine, hedgehog',
335: 'fox squirrel, eastern fox squirrel, Sciurus niger',
336: 'marmot',
337: 'beaver',
338: 'guinea pig, Cavia cobaya',
339: 'sorrel',
340: 'zebra',
341: 'hog, pig, grunter, squealer, Sus scrofa',
342: 'wild boar, boar, Sus scrofa',
343: 'warthog',
344: 'hippopotamus, hippo, river horse, Hippopotamus amphibius',
345: 'ox',
346: 'water buffalo, water ox, Asiatic buffalo, Bubalus bubalis',
347: 'bison',
348: 'ram, tup',
349: 'bighorn, bighorn sheep, cimarron, Rocky Mountain bighorn, Rocky Mountain sheep, Ovis canadensis',
350: 'ibex, Capra ibex',
351: 'hartebeest',
352: 'impala, Aepyceros melampus',
353: 'gazelle',
354: 'Arabian camel, dromedary, Camelus dromedarius',
355: 'llama',
356: 'weasel',
357: 'mink',
358: 'polecat, fitch, foulmart, foumart, Mustela putorius',
359: 'black-footed ferret, ferret, Mustela nigripes',
360: 'otter',
361: 'skunk, polecat, wood pussy',
362: 'badger',
363: 'armadillo',
364: 'three-toed sloth, ai, Bradypus tridactylus',
365: 'orangutan, orang, orangutang, Pongo pygmaeus',
366: 'gorilla, Gorilla gorilla',
367: 'chimpanzee, chimp, Pan troglodytes',
368: 'gibbon, Hylobates lar',
369: 'siamang, Hylobates syndactylus, Symphalangus syndactylus',
370: 'guenon, guenon monkey',
371: 'patas, hussar monkey, Erythrocebus patas',
372: 'baboon',
373: 'macaque',
374: 'langur',
375: 'colobus, colobus monkey',
376: 'proboscis monkey, Nasalis larvatus',
377: 'marmoset',
378: 'capuchin, ringtail, Cebus capucinus',
379: 'howler monkey, howler',
380: 'titi, titi monkey',
381: 'spider monkey, Ateles geoffroyi',
382: 'squirrel monkey, Saimiri sciureus',
383: 'Madagascar cat, ring-tailed lemur, Lemur catta',
384: 'indri, indris, Indri indri, Indri brevicaudatus',
385: 'Indian elephant, Elephas maximus',
386: 'African elephant, Loxodonta africana',
387: 'lesser panda, red panda, panda, bear cat, cat bear, Ailurus fulgens',
388: 'giant panda, panda, panda bear, coon bear, Ailuropoda melanoleuca',
389: 'barracouta, snoek',
390: 'eel',
391: 'coho, cohoe, coho salmon, blue jack, silver salmon, Oncorhynchus kisutch',
392: 'rock beauty, Holocanthus tricolor',
393: 'anemone fish',
394: 'sturgeon',
395: 'gar, garfish, garpike, billfish, Lepisosteus osseus',
396: 'lionfish',
397: 'puffer, pufferfish, blowfish, globefish',
398: 'abacus',
399: 'abaya',
400: "academic gown, academic robe, judge's robe",
401: 'accordion, piano accordion, squeeze box',
402: 'acoustic guitar',
403: 'aircraft carrier, carrier, flattop, attack aircraft carrier',
404: 'airliner',
405: 'airship, dirigible',
406: 'altar',
407: 'ambulance',
408: 'amphibian, amphibious vehicle',
409: 'analog clock',
410: 'apiary, bee house',
411: 'apron',
412: 'ashcan, trash can, garbage can, wastebin, ash bin, ash-bin, ashbin, dustbin, trash barrel, trash bin',
413: 'assault rifle, assault gun',
414: 'backpack, back pack, knapsack, packsack, rucksack, haversack',
415: 'bakery, bakeshop, bakehouse',
416: 'balance beam, beam',
417: 'balloon',
418: 'ballpoint, ballpoint pen, ballpen, Biro',
419: 'Band Aid',
420: 'banjo',
421: 'bannister, banister, balustrade, balusters, handrail',
422: 'barbell',
423: 'barber chair',
424: 'barbershop',
425: 'barn',
426: 'barometer',
427: 'barrel, cask',
428: 'barrow, garden cart, lawn cart, wheelbarrow',
429: 'baseball',
430: 'basketball',
431: 'bassinet',
432: 'bassoon',
433: 'bathing cap, swimming cap',
434: 'bath towel',
435: 'bathtub, bathing tub, bath, tub',
436: 'beach wagon, station wagon, wagon, estate car, beach waggon, station waggon, waggon',
437: 'beacon, lighthouse, beacon light, pharos',
438: 'beaker',
439: 'bearskin, busby, shako',
440: 'beer bottle',
441: 'beer glass',
442: 'bell cote, bell cot',
443: 'bib',
444: 'bicycle-built-for-two, tandem bicycle, tandem',
445: 'bikini, two-piece',
446: 'binder, ring-binder',
447: 'binoculars, field glasses, opera glasses',
448: 'birdhouse',
449: 'boathouse',
450: 'bobsled, bobsleigh, bob',
451: 'bolo tie, bolo, bola tie, bola',
452: 'bonnet, poke bonnet',
453: 'bookcase',
454: 'bookshop, bookstore, bookstall',
455: 'bottlecap',
456: 'bow',
457: 'bow tie, bow-tie, bowtie',
458: 'brass, memorial tablet, plaque',
459: 'brassiere, bra, bandeau',
460: 'breakwater, groin, groyne, mole, bulwark, seawall, jetty',
461: 'breastplate, aegis, egis',
462: 'broom',
463: 'bucket, pail',
464: 'buckle',
465: 'bulletproof vest',
466: 'bullet train, bullet',
467: 'butcher shop, meat market',
468: 'cab, hack, taxi, taxicab',
469: 'caldron, cauldron',
470: 'candle, taper, wax light',
471: 'cannon',
472: 'canoe',
473: 'can opener, tin opener',
474: 'cardigan',
475: 'car mirror',
476: 'carousel, carrousel, merry-go-round, roundabout, whirligig',
477: "carpenter's kit, tool kit",
478: 'carton',
479: 'car wheel',
480: 'cash machine, cash dispenser, automated teller machine, automatic teller machine, automated teller, automatic teller, ATM',
481: 'cassette',
482: 'cassette player',
483: 'castle',
484: 'catamaran',
485: 'CD player',
486: 'cello, violoncello',
487: 'cellular telephone, cellular phone, cellphone, cell, mobile phone',
488: 'chain',
489: 'chainlink fence',
490: 'chain mail, ring mail, mail, chain armor, chain armour, ring armor, ring armour',
491: 'chain saw, chainsaw',
492: 'chest',
493: 'chiffonier, commode',
494: 'chime, bell, gong',
495: 'china cabinet, china closet',
496: 'Christmas stocking',
497: 'church, church building',
498: 'cinema, movie theater, movie theatre, movie house, picture palace',
499: 'cleaver, meat cleaver, chopper',
500: 'cliff dwelling',
501: 'cloak',
502: 'clog, geta, patten, sabot',
503: 'cocktail shaker',
504: 'coffee mug',
505: 'coffeepot',
506: 'coil, spiral, volute, whorl, helix',
507: 'combination lock',
508: 'computer keyboard, keypad',
509: 'confectionery, confectionary, candy store',
510: 'container ship, containership, container vessel',
511: 'convertible',
512: 'corkscrew, bottle screw',
513: 'cornet, horn, trumpet, trump',
514: 'cowboy boot',
515: 'cowboy hat, ten-gallon hat',
516: 'cradle',
517: 'crane',
518: 'crash helmet',
519: 'crate',
520: 'crib, cot',
521: 'Crock Pot',
522: 'croquet ball',
523: 'crutch',
524: 'cuirass',
525: 'dam, dike, dyke',
526: 'desk',
527: 'desktop computer',
528: 'dial telephone, dial phone',
529: 'diaper, nappy, napkin',
530: 'digital clock',
531: 'digital watch',
532: 'dining table, board',
533: 'dishrag, dishcloth',
534: 'dishwasher, dish washer, dishwashing machine',
535: 'disk brake, disc brake',
536: 'dock, dockage, docking facility',
537: 'dogsled, dog sled, dog sleigh',
538: 'dome',
539: 'doormat, welcome mat',
540: 'drilling platform, offshore rig',
541: 'drum, membranophone, tympan',
542: 'drumstick',
543: 'dumbbell',
544: 'Dutch oven',
545: 'electric fan, blower',
546: 'electric guitar',
547: 'electric locomotive',
548: 'entertainment center',
549: 'envelope',
550: 'espresso maker',
551: 'face powder',
552: 'feather boa, boa',
553: 'file, file cabinet, filing cabinet',
554: 'fireboat',
555: 'fire engine, fire truck',
556: 'fire screen, fireguard',
557: 'flagpole, flagstaff',
558: 'flute, transverse flute',
559: 'folding chair',
560: 'football helmet',
561: 'forklift',
562: 'fountain',
563: 'fountain pen',
564: 'four-poster',
565: 'freight car',
566: 'French horn, horn',
567: 'frying pan, frypan, skillet',
568: 'fur coat',
569: 'garbage truck, dustcart',
570: 'gasmask, respirator, gas helmet',
571: 'gas pump, gasoline pump, petrol pump, island dispenser',
572: 'goblet',
573: 'go-kart',
574: 'golf ball',
575: 'golfcart, golf cart',
576: 'gondola',
577: 'gong, tam-tam',
578: 'gown',
579: 'grand piano, grand',
580: 'greenhouse, nursery, glasshouse',
581: 'grille, radiator grille',
582: 'grocery store, grocery, food market, market',
583: 'guillotine',
584: 'hair slide',
585: 'hair spray',
586: 'half track',
587: 'hammer',
588: 'hamper',
589: 'hand blower, blow dryer, blow drier, hair dryer, hair drier',
590: 'hand-held computer, hand-held microcomputer',
591: 'handkerchief, hankie, hanky, hankey',
592: 'hard disc, hard disk, fixed disk',
593: 'harmonica, mouth organ, harp, mouth harp',
594: 'harp',
595: 'harvester, reaper',
596: 'hatchet',
597: 'holster',
598: 'home theater, home theatre',
599: 'honeycomb',
600: 'hook, claw',
601: 'hoopskirt, crinoline',
602: 'horizontal bar, high bar',
603: 'horse cart, horse-cart',
604: 'hourglass',
605: 'iPod',
606: 'iron, smoothing iron',
607: "jack-o'-lantern",
608: 'jean, blue jean, denim',
609: 'jeep, landrover',
610: 'jersey, T-shirt, tee shirt',
611: 'jigsaw puzzle',
612: 'jinrikisha, ricksha, rickshaw',
613: 'joystick',
614: 'kimono',
615: 'knee pad',
616: 'knot',
617: 'lab coat, laboratory coat',
618: 'ladle',
619: 'lampshade, lamp shade',
620: 'laptop, laptop computer',
621: 'lawn mower, mower',
622: 'lens cap, lens cover',
623: 'letter opener, paper knife, paperknife',
624: 'library',
625: 'lifeboat',
626: 'lighter, light, igniter, ignitor',
627: 'limousine, limo',
628: 'liner, ocean liner',
629: 'lipstick, lip rouge',
630: 'Loafer',
631: 'lotion',
632: 'loudspeaker, speaker, speaker unit, loudspeaker system, speaker system',
633: "loupe, jeweler's loupe",
634: 'lumbermill, sawmill',
635: 'magnetic compass',
636: 'mailbag, postbag',
637: 'mailbox, letter box',
638: 'maillot',
639: 'maillot, tank suit',
640: 'manhole cover',
641: 'maraca',
642: 'marimba, xylophone',
643: 'mask',
644: 'matchstick',
645: 'maypole',
646: 'maze, labyrinth',
647: 'measuring cup',
648: 'medicine chest, medicine cabinet',
649: 'megalith, megalithic structure',
650: 'microphone, mike',
651: 'microwave, microwave oven',
652: 'military uniform',
653: 'milk can',
654: 'minibus',
655: 'miniskirt, mini',
656: 'minivan',
657: 'missile',
658: 'mitten',
659: 'mixing bowl',
660: 'mobile home, manufactured home',
661: 'Model T',
662: 'modem',
663: 'monastery',
664: 'monitor',
665: 'moped',
666: 'mortar',
667: 'mortarboard',
668: 'mosque',
669: 'mosquito net',
670: 'motor scooter, scooter',
671: 'mountain bike, all-terrain bike, off-roader',
672: 'mountain tent',
673: 'mouse, computer mouse',
674: 'mousetrap',
675: 'moving van',
676: 'muzzle',
677: 'nail',
678: 'neck brace',
679: 'necklace',
680: 'nipple',
681: 'notebook, notebook computer',
682: 'obelisk',
683: 'oboe, hautboy, hautbois',
684: 'ocarina, sweet potato',
685: 'odometer, hodometer, mileometer, milometer',
686: 'oil filter',
687: 'organ, pipe organ',
688: 'oscilloscope, scope, cathode-ray oscilloscope, CRO',
689: 'overskirt',
690: 'oxcart',
691: 'oxygen mask',
692: 'packet',
693: 'paddle, boat paddle',
694: 'paddlewheel, paddle wheel',
695: 'padlock',
696: 'paintbrush',
697: "pajama, pyjama, pj's, jammies",
698: 'palace',
699: 'panpipe, pandean pipe, syrinx',
700: 'paper towel',
701: 'parachute, chute',
702: 'parallel bars, bars',
703: 'park bench',
704: 'parking meter',
705: 'passenger car, coach, carriage',
706: 'patio, terrace',
707: 'pay-phone, pay-station',
708: 'pedestal, plinth, footstall',
709: 'pencil box, pencil case',
710: 'pencil sharpener',
711: 'perfume, essence',
712: 'Petri dish',
713: 'photocopier',
714: 'pick, plectrum, plectron',
715: 'pickelhaube',
716: 'picket fence, paling',
717: 'pickup, pickup truck',
718: 'pier',
719: 'piggy bank, penny bank',
720: 'pill bottle',
721: 'pillow',
722: 'ping-pong ball',
723: 'pinwheel',
724: 'pirate, pirate ship',
725: 'pitcher, ewer',
726: "plane, carpenter's plane, woodworking plane",
727: 'planetarium',
728: 'plastic bag',
729: 'plate rack',
730: 'plow, plough',
731: "plunger, plumber's helper",
732: 'Polaroid camera, Polaroid Land camera',
733: 'pole',
734: 'police van, police wagon, paddy wagon, patrol wagon, wagon, black Maria',
735: 'poncho',
736: 'pool table, billiard table, snooker table',
737: 'pop bottle, soda bottle',
738: 'pot, flowerpot',
739: "potter's wheel",
740: 'power drill',
741: 'prayer rug, prayer mat',
742: 'printer',
743: 'prison, prison house',
744: 'projectile, missile',
745: 'projector',
746: 'puck, hockey puck',
747: 'punching bag, punch bag, punching ball, punchball',
748: 'purse',
749: 'quill, quill pen',
750: 'quilt, comforter, comfort, puff',
751: 'racer, race car, racing car',
752: 'racket, racquet',
753: 'radiator',
754: 'radio, wireless',
755: 'radio telescope, radio reflector',
756: 'rain barrel',
757: 'recreational vehicle, RV, R.V.',
758: 'reel',
759: 'reflex camera',
760: 'refrigerator, icebox',
761: 'remote control, remote',
762: 'restaurant, eating house, eating place, eatery',
763: 'revolver, six-gun, six-shooter',
764: 'rifle',
765: 'rocking chair, rocker',
766: 'rotisserie',
767: 'rubber eraser, rubber, pencil eraser',
768: 'rugby ball',
769: 'rule, ruler',
770: 'running shoe',
771: 'safe',
772: 'safety pin',
773: 'saltshaker, salt shaker',
774: 'sandal',
775: 'sarong',
776: 'sax, saxophone',
777: 'scabbard',
778: 'scale, weighing machine',
779: 'school bus',
780: 'schooner',
781: 'scoreboard',
782: 'screen, CRT screen',
783: 'screw',
784: 'screwdriver',
785: 'seat belt, seatbelt',
786: 'sewing machine',
787: 'shield, buckler',
788: 'shoe shop, shoe-shop, shoe store',
789: 'shoji',
790: 'shopping basket',
791: 'shopping cart',
792: 'shovel',
793: 'shower cap',
794: 'shower curtain',
795: 'ski',
796: 'ski mask',
797: 'sleeping bag',
798: 'slide rule, slipstick',
799: 'sliding door',
800: 'slot, one-armed bandit',
801: 'snorkel',
802: 'snowmobile',
803: 'snowplow, snowplough',
804: 'soap dispenser',
805: 'soccer ball',
806: 'sock',
807: 'solar dish, solar collector, solar furnace',
808: 'sombrero',
809: 'soup bowl',
810: 'space bar',
811: 'space heater',
812: 'space shuttle',
813: 'spatula',
814: 'speedboat',
815: "spider web, spider's web",
816: 'spindle',
817: 'sports car, sport car',
818: 'spotlight, spot',
819: 'stage',
820: 'steam locomotive',
821: 'steel arch bridge',
822: 'steel drum',
823: 'stethoscope',
824: 'stole',
825: 'stone wall',
826: 'stopwatch, stop watch',
827: 'stove',
828: 'strainer',
829: 'streetcar, tram, tramcar, trolley, trolley car',
830: 'stretcher',
831: 'studio couch, day bed',
832: 'stupa, tope',
833: 'submarine, pigboat, sub, U-boat',
834: 'suit, suit of clothes',
835: 'sundial',
836: 'sunglass',
837: 'sunglasses, dark glasses, shades',
838: 'sunscreen, sunblock, sun blocker',
839: 'suspension bridge',
840: 'swab, swob, mop',
841: 'sweatshirt',
842: 'swimming trunks, bathing trunks',
843: 'swing',
844: 'switch, electric switch, electrical switch',
845: 'syringe',
846: 'table lamp',
847: 'tank, army tank, armored combat vehicle, armoured combat vehicle',
848: 'tape player',
849: 'teapot',
850: 'teddy, teddy bear',
851: 'television, television system',
852: 'tennis ball',
853: 'thatch, thatched roof',
854: 'theater curtain, theatre curtain',
855: 'thimble',
856: 'thresher, thrasher, threshing machine',
857: 'throne',
858: 'tile roof',
859: 'toaster',
860: 'tobacco shop, tobacconist shop, tobacconist',
861: 'toilet seat',
862: 'torch',
863: 'totem pole',
864: 'tow truck, tow car, wrecker',
865: 'toyshop',
866: 'tractor',
867: 'trailer truck, tractor trailer, trucking rig, rig, articulated lorry, semi',
868: 'tray',
869: 'trench coat',
870: 'tricycle, trike, velocipede',
871: 'trimaran',
872: 'tripod',
873: 'triumphal arch',
874: 'trolleybus, trolley coach, trackless trolley',
875: 'trombone',
876: 'tub, vat',
877: 'turnstile',
878: 'typewriter keyboard',
879: 'umbrella',
880: 'unicycle, monocycle',
881: 'upright, upright piano',
882: 'vacuum, vacuum cleaner',
883: 'vase',
884: 'vault',
885: 'velvet',
886: 'vending machine',
887: 'vestment',
888: 'viaduct',
889: 'violin, fiddle',
890: 'volleyball',
891: 'waffle iron',
892: 'wall clock',
893: 'wallet, billfold, notecase, pocketbook',
894: 'wardrobe, closet, press',
895: 'warplane, military plane',
896: 'washbasin, handbasin, washbowl, lavabo, wash-hand basin',
897: 'washer, automatic washer, washing machine',
898: 'water bottle',
899: 'water jug',
900: 'water tower',
901: 'whiskey jug',
902: 'whistle',
903: 'wig',
904: 'window screen',
905: 'window shade',
906: 'Windsor tie',
907: 'wine bottle',
908: 'wing',
909: 'wok',
910: 'wooden spoon',
911: 'wool, woolen, woollen',
912: 'worm fence, snake fence, snake-rail fence, Virginia fence',
913: 'wreck',
914: 'yawl',
915: 'yurt',
916: 'web site, website, internet site, site',
917: 'comic book',
918: 'crossword puzzle, crossword',
919: 'street sign',
920: 'traffic light, traffic signal, stoplight',
921: 'book jacket, dust cover, dust jacket, dust wrapper',
922: 'menu',
923: 'plate',
924: 'guacamole',
925: 'consomme',
926: 'hot pot, hotpot',
927: 'trifle',
928: 'ice cream, icecream',
929: 'ice lolly, lolly, lollipop, popsicle',
930: 'French loaf',
931: 'bagel, beigel',
932: 'pretzel',
933: 'cheeseburger',
934: 'hotdog, hot dog, red hot',
935: 'mashed potato',
936: 'head cabbage',
937: 'broccoli',
938: 'cauliflower',
939: 'zucchini, courgette',
940: 'spaghetti squash',
941: 'acorn squash',
942: 'butternut squash',
943: 'cucumber, cuke',
944: 'artichoke, globe artichoke',
945: 'bell pepper',
946: 'cardoon',
947: 'mushroom',
948: 'Granny Smith',
949: 'strawberry',
950: 'orange',
951: 'lemon',
952: 'fig',
953: 'pineapple, ananas',
954: 'banana',
955: 'jackfruit, jak, jack',
956: 'custard apple',
957: 'pomegranate',
958: 'hay',
959: 'carbonara',
960: 'chocolate sauce, chocolate syrup',
961: 'dough',
962: 'meat loaf, meatloaf',
963: 'pizza, pizza pie',
964: 'potpie',
965: 'burrito',
966: 'red wine',
967: 'espresso',
968: 'cup',
969: 'eggnog',
970: 'alp',
971: 'bubble',
972: 'cliff, drop, drop-off',
973: 'coral reef',
974: 'geyser',
975: 'lakeside, lakeshore',
976: 'promontory, headland, head, foreland',
977: 'sandbar, sand bar',
978: 'seashore, coast, seacoast, sea-coast',
979: 'valley, vale',
980: 'volcano',
981: 'ballplayer, baseball player',
982: 'groom, bridegroom',
983: 'scuba diver',
984: 'rapeseed',
985: 'daisy',
986: "yellow lady's slipper, yellow lady-slipper, Cypripedium calceolus, Cypripedium parviflorum",
987: 'corn',
988: 'acorn',
989: 'hip, rose hip, rosehip',
990: 'buckeye, horse chestnut, conker',
991: 'coral fungus',
992: 'agaric',
993: 'gyromitra',
994: 'stinkhorn, carrion fungus',
995: 'earthstar',
996: 'hen-of-the-woods, hen of the woods, Polyporus frondosus, Grifola frondosa',
997: 'bolete',
998: 'ear, spike, capitulum',
999: 'toilet tissue, toilet paper, bathroom tissue'}
| 565 |
0 | kubeflow_public_repos/pipelines/components/contrib/openvino/ovms-deployer | kubeflow_public_repos/pipelines/components/contrib/openvino/ovms-deployer/containers/Dockerfile | FROM intelpython/intelpython3_core
RUN apt-get update -q && apt-get upgrade -y && \
apt-get install -y -qq --no-install-recommends \
apt-transport-https \
ca-certificates \
git \
gnupg \
lsb-release \
unzip \
wget && \
wget --no-verbose -O /opt/ks_0.12.0_linux_amd64.tar.gz \
https://github.com/ksonnet/ksonnet/releases/download/v0.12.0/ks_0.12.0_linux_amd64.tar.gz && \
tar -C /opt -xzf /opt/ks_0.12.0_linux_amd64.tar.gz && \
cp /opt/ks_0.12.0_linux_amd64/ks /bin/. && \
rm -f /opt/ks_0.12.0_linux_amd64.tar.gz && \
wget --no-verbose -O /bin/kubectl \
https://storage.googleapis.com/kubernetes-release/release/v1.11.2/bin/linux/amd64/kubectl && \
chmod u+x /bin/kubectl && \
wget --no-verbose -O /opt/kubernetes_v1.11.2 \
https://github.com/kubernetes/kubernetes/archive/v1.11.2.tar.gz && \
mkdir -p /src && \
tar -C /src -xzf /opt/kubernetes_v1.11.2 && \
rm -rf /opt/kubernetes_v1.11.2 && \
wget --no-verbose -O /opt/google-apt-key.gpg \
https://packages.cloud.google.com/apt/doc/apt-key.gpg && \
apt-key add /opt/google-apt-key.gpg && \
export CLOUD_SDK_REPO="cloud-sdk-$(lsb_release -c -s)" && \
echo "deb https://packages.cloud.google.com/apt $CLOUD_SDK_REPO main" >> \
/etc/apt/sources.list.d/google-cloud-sdk.list && \
apt-get update -q && \
apt-get install -y -qq --no-install-recommends google-cloud-sdk && \
gcloud config set component_manager/disable_update_check true
RUN conda install -y opencv && conda clean -a -y
ADD requirements.txt /deploy/
WORKDIR /deploy
RUN pip install -r requirements.txt
ADD apply_template.py deploy.sh evaluate.py ovms.j2 classes.py /deploy/
ENTRYPOINT ["./deploy.sh"]
| 566 |
0 | kubeflow_public_repos/pipelines/components/contrib/openvino/ovms-deployer | kubeflow_public_repos/pipelines/components/contrib/openvino/ovms-deployer/containers/evaluate.py | #!/usr/bin/env python
import grpc
import numpy as np
import tensorflow.contrib.util as tf_contrib_util
import datetime
import argparse
from tensorflow_serving.apis import predict_pb2
from tensorflow_serving.apis import prediction_service_pb2_grpc
from urllib.parse import urlparse
import requests
import cv2
import os
import json
import classes
def crop_resize(img,cropx,cropy):
y,x,c = img.shape
if y < cropy:
img = cv2.resize(img, (x, cropy))
y = cropy
if x < cropx:
img = cv2.resize(img, (cropx,y))
x = cropx
startx = x//2-(cropx//2)
starty = y//2-(cropy//2)
return img[starty:starty+cropy,startx:startx+cropx,:]
def get_file_content(source_path):
parsed_path = urlparse(source_path)
if parsed_path.scheme == "http" or parsed_path.scheme == "https":
try:
response = requests.get(source_path, stream=True)
content = response.content
except requests.exceptions.RequestException as e:
print(e)
content = None
elif parsed_path.scheme == "":
if os.path.isfile(source_path):
with open(input_images) as f:
content = f.readlines()
f.close
else:
print("file " + source_path + "is not accessible")
content = None
return content
def getJpeg(path, size, path_prefix):
print(os.path.join(path_prefix,path))
content = get_file_content(os.path.join(path_prefix,path))
if content:
try:
img = np.frombuffer(content, dtype=np.uint8)
img = cv2.imdecode(img, cv2.IMREAD_COLOR) # BGR format
# retrived array has BGR format and 0-255 normalization
# add image preprocessing if needed by the model
img = crop_resize(img, size, size)
img = img.astype('float32')
img = img.transpose(2,0,1).reshape(1,3,size,size)
print(path, img.shape, "; data range:",np.amin(img),":",np.amax(img))
except Exception as e:
print("Can not read the image file", e)
img = None
else:
print("Can not open ", os.path(path_prefix,path))
img = None
return img
parser = argparse.ArgumentParser(description='Sends requests to OVMS and TF Serving using images in numpy format')
parser.add_argument('--images_list', required=False, default='input_images.txt', help='Path to a file with a list of labeled images. It should include in every line a path to the image file and a numerical label separate by space.')
parser.add_argument('--grpc_endpoint',required=False, default='localhost:9000', help='Specify endpoint of grpc service. default:localhost:9000')
parser.add_argument('--input_name',required=False, default='input', help='Specify input tensor name. default: input')
parser.add_argument('--output_name',required=False, default='resnet_v1_50/predictions/Reshape_1', help='Specify output name. default: output')
parser.add_argument('--model_name', default='resnet', help='Define model name, must be same as is in service. default: resnet',
dest='model_name')
parser.add_argument('--size',required=False, default=224, type=int, help='The size of the image in the model')
parser.add_argument('--image_path_prefix',required=False, default="", type=str, help='Path prefix to be added to every image in the list')
args = vars(parser.parse_args())
channel = grpc.insecure_channel(args['grpc_endpoint'])
stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)
input_images = args.get('images_list')
size = args.get('size')
input_list_content = get_file_content(input_images)
if input_list_content is None:
print("Can not open input images file", input_images)
exit(1)
else:
lines = input_list_content.decode().split("\n")
print(lines)
print('Start processing:')
print('\tModel name: {}'.format(args.get('model_name')))
print('\tImages list file: {}'.format(args.get('images_list')))
i = 0
matched = 0
processing_times = np.zeros((0),int)
imgs = np.zeros((0,3,size, size), np.dtype('<f'))
lbs = np.zeros((0), int)
for line in lines:
path, label = line.strip().split(" ")
img = getJpeg(path, size, args.get('image_path_prefix'))
if img is not None:
request = predict_pb2.PredictRequest()
request.model_spec.name = args.get('model_name')
request.inputs[args['input_name']].CopyFrom(tf_contrib_util.make_tensor_proto(img, shape=(img.shape)))
start_time = datetime.datetime.now()
result = stub.Predict(request, 10.0) # result includes a dictionary with all model outputs
end_time = datetime.datetime.now()
if args['output_name'] not in result.outputs:
print("Invalid output name", args['output_name'])
print("Available outputs:")
for Y in result.outputs:
print(Y)
exit(1)
duration = (end_time - start_time).total_seconds() * 1000
processing_times = np.append(processing_times,np.array([int(duration)]))
output = tf_contrib_util.make_ndarray(result.outputs[args['output_name']])
nu = np.array(output)
# for object classification models show imagenet class
print('Processing time: {:.2f} ms; speed {:.2f} fps'.format(round(duration), 2),
round(1000 / duration, 2)
)
ma = np.argmax(nu)
if int(label) == ma:
matched += 1
i += 1
print("Detected: {} - {} ; Should be: {} - {}".format(ma,classes.imagenet_classes[int(ma)],label,classes.imagenet_classes[int(label)]))
accuracy = matched/i
latency = np.average(processing_times)
metrics = {'metrics': [{'name': 'accuracy-score','numberValue': accuracy,'format': "PERCENTAGE"},
{'name': 'latency','numberValue': latency,'format': "RAW"}]}
with open('/mlpipeline-metrics.json', 'w') as f:
json.dump(metrics, f)
f.close
print("\nOverall accuracy=",matched/i*100,"%")
print("Average latency=",latency,"ms")
| 567 |
0 | kubeflow_public_repos/pipelines/components/contrib/openvino/ovms-deployer | kubeflow_public_repos/pipelines/components/contrib/openvino/ovms-deployer/containers/deploy.sh | #!/bin/bash -e
set -x
KUBERNETES_NAMESPACE="${KUBERNETES_NAMESPACE:-kubeflow}"
SERVER_NAME="${SERVER_NAME:-model-server}"
SERVER_ENDPOINT_OUTPUT_FILE="${SERVER_ENDPOINT_OUTPUT_FILE:-/tmp/server_endpoint/data}"
while (($#)); do
case $1 in
"--model-export-path")
shift
export MODEL_EXPORT_PATH="$1"
shift
;;
"--cluster-name")
shift
CLUSTER_NAME="$1"
shift
;;
"--namespace")
shift
KUBERNETES_NAMESPACE="$1"
shift
;;
"--server-name")
shift
SERVER_NAME="$1"
shift
;;
"--replicas")
shift
export REPLICAS="$1"
shift
;;
"--batch-size")
shift
export BATCH_SIZE="$1"
shift
;;
"--model-version-policy")
shift
export MODEL_VERSION_POLICY="$1"
shift
;;
"--log-level")
shift
export LOG_LEVEL="$1"
shift
;;
"--server-endpoint-output-file")
shift
SERVER_ENDPOINT_OUTPUT_FILE = "$1"
shift
;;
*)
echo "Unknown argument: '$1'"
exit 1
;;
esac
done
if [ -z "${MODEL_EXPORT_PATH}" ]; then
echo "You must specify a path to the saved model"
exit 1
fi
echo "Deploying the model '${MODEL_EXPORT_PATH}'"
if [ -z "${CLUSTER_NAME}" ]; then
CLUSTER_NAME=$(wget -q -O- --header="Metadata-Flavor: Google" http://metadata.google.internal/computeMetadata/v1/instance/attributes/cluster-name)
fi
# Ensure the server name is not more than 63 characters.
export SERVER_NAME="${SERVER_NAME:0:63}"
# Trim any trailing hyphens from the server name.
while [[ "${SERVER_NAME:(-1)}" == "-" ]]; do SERVER_NAME="${SERVER_NAME::-1}"; done
echo "Deploying ${SERVER_NAME} to the cluster ${CLUSTER_NAME}"
# Connect kubectl to the local cluster
kubectl config set-cluster "${CLUSTER_NAME}" --server=https://kubernetes.default --certificate-authority=/var/run/secrets/kubernetes.io/serviceaccount/ca.crt
kubectl config set-credentials pipeline --token "$(cat /var/run/secrets/kubernetes.io/serviceaccount/token)"
kubectl config set-context kubeflow --cluster "${CLUSTER_NAME}" --user pipeline
kubectl config use-context kubeflow
echo "Generating service and deployment yaml files"
python apply_template.py
kubectl apply -f ovms.yaml
sleep 10
echo "Waiting for the TF Serving deployment to have at least one available replica..."
timeout="1000"
start_time=`date +%s`
while [[ $(kubectl get deploy --namespace "${KUBERNETES_NAMESPACE}" --selector=app="ovms-${SERVER_NAME}" --output=jsonpath='{.items[0].status.availableReplicas}') < "1" ]]; do
current_time=`date +%s`
elapsed_time=$(expr $current_time + 1 - $start_time)
if [[ $elapsed_time -gt $timeout ]];then
echo "timeout"
exit 1
fi
sleep 5
done
echo "Obtaining the pod name..."
start_time=`date +%s`
pod_name=""
while [[ $pod_name == "" ]];do
pod_name=$(kubectl get pods --namespace "${KUBERNETES_NAMESPACE}" --selector=app="ovms-${SERVER_NAME}" --template '{{range .items}}{{.metadata.name}}{{"\n"}}{{end}}')
current_time=`date +%s`
elapsed_time=$(expr $current_time + 1 - $start_time)
if [[ $elapsed_time -gt $timeout ]];then
echo "timeout"
exit 1
fi
sleep 2
done
echo "Pod name is: " $pod_name
# Wait for the pod container to start running
echo "Waiting for the TF Serving pod to start running..."
start_time=`date +%s`
exit_code="1"
while [[ $exit_code != "0" ]];do
kubectl get po ${pod_name} --namespace "${KUBERNETES_NAMESPACE}" -o jsonpath='{.status.containerStatuses[0].state.running}'
exit_code=$?
current_time=`date +%s`
elapsed_time=$(expr $current_time + 1 - $start_time)
if [[ $elapsed_time -gt $timeout ]];then
echo "timeout"
exit 1
fi
sleep 2
done
start_time=`date +%s`
while [ -z "$(kubectl get po ${pod_name} --namespace "${KUBERNETES_NAMESPACE}" -o jsonpath='{.status.containerStatuses[0].state.running}')" ]; do
current_time=`date +%s`
elapsed_time=$(expr $current_time + 1 - $start_time)
if [[ $elapsed_time -gt $timeout ]];then
echo "timeout"
exit 1
fi
sleep 5
done
# Wait a little while and then grab the logs of the running server
sleep 10
echo "Logs from the TF Serving pod:"
kubectl logs ${pod_name} --namespace "${KUBERNETES_NAMESPACE}"
mkdir -p "$(dirname "$SERVER_ENDPOINT_OUTPUT_FILE")"
echo "ovms-${SERVER_NAME}:80" > "$SERVER_ENDPOINT_OUTPUT_FILE"
| 568 |
0 | kubeflow_public_repos/pipelines/components/contrib/openvino | kubeflow_public_repos/pipelines/components/contrib/openvino/tf-slim/README.md | # Slim models generator
This component is automating implementation of [slim models](https://github.com/tensorflow/models/blob/master/research/slim).
It can create a graph from slim models zoo, load the variables pre-trained checkpoint and export the model in the form
of Tensorflow `frozen graph` and `saved model`.
The results of the component can be saved in a local path or in GCS cloud storage. The can be used to other ML pipeline
components like OpenVINO model optimizer, OpenVINO predict or OpenVINO Model Server.
## Building
```bash
docker build --build-arg http_proxy=$http_proxy --build-arg https_proxy=$https_proxy .
```
## Using the component
```bash
python slim_model.py --help
usage: slim_model.py [-h] [--model_name MODEL_NAME] [--export_dir EXPORT_DIR]
[--batch_size BATCH_SIZE]
[--checkpoint_url CHECKPOINT_URL]
[--num_classes NUM_CLASSES]
Slim model generator
optional arguments:
-h, --help show this help message and exit
--model_name MODEL_NAME
--export_dir EXPORT_DIR
GCS or local path to save the generated model
--batch_size BATCH_SIZE
batch size to be used in the exported model
--checkpoint_url CHECKPOINT_URL
URL to the pretrained compressed checkpoint
--num_classes NUM_CLASSES
number of model classes
```
*Model name* can be any model defined in the slim repository. The naming convention needs to match the key name from
[net_factory.py]()https://github.com/tensorflow/models/blob/master/research/slim/nets/nets_factory.py#L39)
*export dir* can be a local path in the container or it might be GCS path to store generated files:
- model graph file in pb format
- frozen graph including weights from the provided checkpoint
- event file which can be imported in tensorboard
- saved model which will be stored in subfolder called `1`.
*batch size* represent the batch used in the exported models. It can be natural number to represent fixed batch size
or `-1` value can be set for dynamic batch size.
*checkpoint_url* is the URL to a pre-trained checkpoint https://github.com/tensorflow/models/tree/master/research/slim#pre-trained-models
It must match the model specified in model_name parameter.
*num classes* should include model specific number of classes in the outputs. For slim models it should be a value
of `1000` or `1001`. It must match the number of classes used in the requested model name.
## Examples
```
python slim_model.py --model_name mobilenet_v1_050 --export_dir /tmp/mobilnet
--batch_size 1 --num_classes=1001 \
--checkpoint_url http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_0.5_160.tgz
python slim_model.py --model_name resnet_v1_50 --export_dir gs://<bucket_name>/resnet \
--batch_size -1 --num_classes=1000 \
--checkpoint_url http://download.tensorflow.org/models/resnet_v1_50_2016_08_28.tar.gz
python slim_model.py --model_name inception_v4 --export_dir gs://<bucket_name>/inception \
--batch_size -1 --num_classes=1001 \
--checkpoint_url http://download.tensorflow.org/models/inception_v4_2016_09_09.tar.gz
python slim_model.py --model_name vgg_19 --export_dir /tmp/vgg \
--batch_size 1 --num_classes=1000 \
--checkpoint_url http://download.tensorflow.org/models/vgg_19_2016_08_28.tar.gz
```
| 569 |
0 | kubeflow_public_repos/pipelines/components/contrib/openvino/tf-slim | kubeflow_public_repos/pipelines/components/contrib/openvino/tf-slim/containers/slim_model.py | import tensorflow as tf
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model import tag_constants
from nets import nets_factory
from tensorflow.python.platform import gfile
import argparse
import validators
import os
import requests
import tarfile
from subprocess import Popen, PIPE
import shutil
import glob
import re
import json
from tensorflow.python.tools.freeze_graph import freeze_graph
from tensorflow.python.tools.saved_model_cli import _show_all
from urllib.parse import urlparse
from shutil import copyfile
from google.cloud import storage
def upload_to_gcs(src, dst):
parsed_path = urlparse(dst)
bucket_name = parsed_path.netloc
file_path = parsed_path.path[1:]
gs_client = storage.Client()
bucket = gs_client.get_bucket(bucket_name)
blob = bucket.blob(file_path)
blob.upload_from_filename(src)
def main():
parser = argparse.ArgumentParser(
description='Slim model generator')
parser.add_argument('--model_name', type=str,
help='')
parser.add_argument('--export_dir', type=str, default="/tmp/export_dir",
help='GCS or local path to save graph files')
parser.add_argument('--saved_model_dir', type=str,
help='GCS or local path to save the generated model')
parser.add_argument('--batch_size', type=str, default=1,
help='batch size to be used in the exported model')
parser.add_argument('--checkpoint_url', type=str,
help='URL to the pretrained compressed checkpoint')
parser.add_argument('--num_classes', type=int, default=1000,
help='number of model classes')
args = parser.parse_args()
MODEL = args.model_name
URL = args.checkpoint_url
if not validators.url(args.checkpoint_url):
print('use a valid URL parameter')
exit(1)
TMP_DIR = "/tmp/slim_tmp"
NUM_CLASSES = args.num_classes
BATCH_SIZE = args.batch_size
MODEL_FILE_NAME = URL.rsplit('/', 1)[-1]
EXPORT_DIR = args.export_dir
SAVED_MODEL_DIR = args.saved_model_dir
tmp_graph_file = os.path.join(TMP_DIR, MODEL + '_graph.pb')
export_graph_file = os.path.join(EXPORT_DIR, MODEL + '_graph.pb')
frozen_file = os.path.join(EXPORT_DIR, 'frozen_graph_' + MODEL + '.pb')
if not os.path.exists(TMP_DIR):
os.makedirs(TMP_DIR)
if not os.path.exists(TMP_DIR + '/' + MODEL_FILE_NAME):
print("Downloading and decompressing the model checkpoint...")
response = requests.get(URL, stream=True)
with open(os.path.join(TMP_DIR, MODEL_FILE_NAME), 'wb') as output:
output.write(response.content)
tar = tarfile.open(os.path.join(TMP_DIR, MODEL_FILE_NAME))
tar.extractall(path=TMP_DIR)
tar.close()
print("Model checkpoint downloaded and decompressed to:", TMP_DIR)
else:
print("Reusing existing model file ",
os.path.join(TMP_DIR, MODEL_FILE_NAME))
checkpoint = glob.glob(TMP_DIR + '/*.ckpt*')
print("checkpoint", checkpoint)
if len(checkpoint) > 0:
m = re.match(r"([\S]*.ckpt)", checkpoint[-1])
print("checkpoint match", m)
checkpoint = m[0]
print(checkpoint)
else:
print("checkpoint file not detected in " + URL)
exit(1)
print("Saving graph def file")
with tf.Graph().as_default() as graph:
network_fn = nets_factory.get_network_fn(MODEL,
num_classes=NUM_CLASSES,
is_training=False)
image_size = network_fn.default_image_size
if BATCH_SIZE == "None" or BATCH_SIZE == "-1":
batchsize = None
else:
batchsize = BATCH_SIZE
placeholder = tf.placeholder(name='input', dtype=tf.float32,
shape=[batchsize, image_size,
image_size, 3])
network_fn(placeholder)
graph_def = graph.as_graph_def()
with gfile.GFile(tmp_graph_file, 'wb') as f:
f.write(graph_def.SerializeToString())
if urlparse(EXPORT_DIR).scheme == 'gs':
upload_to_gcs(tmp_graph_file, export_graph_file)
elif urlparse(EXPORT_DIR).scheme == '':
if not os.path.exists(EXPORT_DIR):
os.makedirs(EXPORT_DIR)
copyfile(tmp_graph_file, export_graph_file)
else:
print("Invalid format of model export path")
print("Graph file saved to ",
os.path.join(EXPORT_DIR, MODEL + '_graph.pb'))
print("Analysing graph")
p = Popen("./summarize_graph --in_graph=" + tmp_graph_file +
" --print_structure=false", shell=True, stdout=PIPE, stderr=PIPE)
summary, err = p.communicate()
inputs = []
outputs = []
for line in summary.split(b'\n'):
line_str = line.decode()
if re.match(r"Found [\d]* possible inputs", line_str) is not None:
print("in", line)
m = re.findall(r'name=[\S]*,', line.decode())
for match in m:
print("match", match)
input = match[5:-1]
inputs.append(input)
print("inputs", inputs)
if re.match(r"Found [\d]* possible outputs", line_str) is not None:
print("out", line)
m = re.findall(r'name=[\S]*,', line_str)
for match in m:
print("match", match)
output = match[5:-1]
outputs.append(output)
print("outputs", outputs)
output_node_names = ",".join(outputs)
print("Creating freezed graph based on pretrained checkpoint")
freeze_graph(input_graph=tmp_graph_file,
input_checkpoint=checkpoint,
input_binary=True,
clear_devices=True,
input_saver='',
output_node_names=output_node_names,
restore_op_name="save/restore_all",
filename_tensor_name="save/Const:0",
output_graph=frozen_file,
initializer_nodes="")
if urlparse(SAVED_MODEL_DIR).scheme == '' and \
os.path.exists(SAVED_MODEL_DIR):
shutil.rmtree(SAVED_MODEL_DIR)
builder = tf.saved_model.builder.SavedModelBuilder(SAVED_MODEL_DIR)
with tf.gfile.GFile(frozen_file, "rb") as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
sigs = {}
with tf.Session(graph=tf.Graph()) as sess:
tf.import_graph_def(graph_def, name="")
g = tf.get_default_graph()
inp_dic = {}
for inp in inputs:
inp_t = g.get_tensor_by_name(inp+":0")
inp_dic[inp] = inp_t
out_dic = {}
for out in outputs:
out_t = g.get_tensor_by_name(out+":0")
out_dic[out] = out_t
sigs[signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY] = \
tf.saved_model.signature_def_utils.predict_signature_def(
inp_dic, out_dic)
builder.add_meta_graph_and_variables(sess, [tag_constants.SERVING],
signature_def_map=sigs)
print("Exporting saved model to:", SAVED_MODEL_DIR + ' ...')
builder.save()
print("Saved model exported to:", SAVED_MODEL_DIR)
_show_all(SAVED_MODEL_DIR)
pb_visual_writer = tf.summary.FileWriter(SAVED_MODEL_DIR)
pb_visual_writer.add_graph(sess.graph)
print("Visualize the model by running: "
"tensorboard --logdir={}".format(EXPORT_DIR))
with open('/tmp/saved_model_dir.txt', 'w') as f:
f.write(SAVED_MODEL_DIR)
with open('/tmp/export_dir.txt', 'w') as f:
f.write(EXPORT_DIR)
artifacts = {"version": 1,"outputs": [
{
"type": "tensorboard",
"source": SAVED_MODEL_DIR
}
]
}
with open('/mlpipeline-ui-metadata.json', 'w') as f:
json.dump(artifacts, f)
if __name__ == "__main__":
main()
| 570 |
0 | kubeflow_public_repos/pipelines/components/contrib/openvino/tf-slim | kubeflow_public_repos/pipelines/components/contrib/openvino/tf-slim/containers/Dockerfile | FROM intelpython/intelpython3_core as BUILD
RUN apt-get update && apt-get install -y --no-install-recommends \
openjdk-8-jdk \
openjdk-8-jre-headless \
build-essential \
curl \
git \
libcurl3-dev \
libfreetype6-dev \
libhdf5-serial-dev \
libpng-dev \
libzmq3-dev \
pkg-config \
rsync \
software-properties-common \
unzip \
zip \
zlib1g-dev && \
apt-get clean
RUN git clone --depth 1 https://github.com/tensorflow/tensorflow
RUN conda create --name myenv -y
ENV PATH /opt/conda/envs/myenv/bin:$PATH
# Set up Bazel.
# Running bazel inside a `docker build` command causes trouble, cf:
# https://github.com/bazelbuild/bazel/issues/134
# The easiest solution is to set up a bazelrc file forcing --batch.
RUN echo "startup --batch" >>/etc/bazel.bazelrc
# Similarly, we need to workaround sandboxing issues:
# https://github.com/bazelbuild/bazel/issues/418
RUN echo "build --spawn_strategy=standalone --genrule_strategy=standalone" \
>>/etc/bazel.bazelrc
# Install the most recent bazel release.
ENV BAZEL_VERSION 0.19.2
WORKDIR /
RUN mkdir /bazel && \
cd /bazel && \
curl -H "User-Agent: Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.133 Safari/537.36" -fSsL -O https://github.com/bazelbuild/bazel/releases/download/$BAZEL_VERSION/bazel-$BAZEL_VERSION-installer-linux-x86_64.sh && \
curl -H "User-Agent: Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.133 Safari/537.36" -fSsL -o /bazel/LICENSE.txt https://raw.githubusercontent.com/bazelbuild/bazel/master/LICENSE && \
chmod +x bazel-*.sh && \
./bazel-$BAZEL_VERSION-installer-linux-x86_64.sh && \
cd / && \
rm -f /bazel/bazel-$BAZEL_VERSION-installer-linux-x86_64.sh
RUN cd tensorflow && bazel build tensorflow/tools/graph_transforms:summarize_graph
FROM intelpython/intelpython3_core as PROD
RUN apt-get update && apt-get install -y --no-install-recommends \
git && \
apt-get clean
WORKDIR /slim
RUN git clone --depth 1 https://github.com/tensorflow/models && rm -Rf models/.git && \
git clone --depth 1 https://github.com/tensorflow/tensorflow && rm -Rf tensorflow/.git
RUN conda create --name myenv -y
ENV PATH /opt/conda/envs/myenv/bin:$PATH
RUN pip install --no-cache-dir tensorflow validators google-cloud-storage
ENV PYTHONPATH=models/research/slim:tensorflow/python/tools
COPY --from=BUILD /tensorflow/bazel-bin/tensorflow/tools/graph_transforms/summarize_graph summarize_graph
COPY --from=BUILD /root/.cache/bazel/_bazel_root/*/execroot/org_tensorflow/bazel-out/k8-opt/bin/_solib_k8/_U_S_Stensorflow_Stools_Sgraph_Utransforms_Csummarize_Ugraph___Utensorflow/libtensorflow_framework.so libtensorflow_framework.so
COPY slim_model.py .
| 571 |
0 | kubeflow_public_repos/pipelines/components/contrib/openvino | kubeflow_public_repos/pipelines/components/contrib/openvino/predict/README.md | # Inference component with OpenVINO inference engine
This component takes the following parameters:
* path to the model in Intermediate Representation format ( xml and bin files)
* numpy file with the input dataset. Input shape should fit to the used model definition.
* classification labels which can be used to calculate model accuracy
* input data can be scaled using parameters scale_div and scale_sub
* path to the folder where the inference results in numpy format should be uploaded
In the component logs are included inference performance details.
This component is tuned for classification models but can be considered as exemplary for arbitrary OpenVINO models.
There are generated 2 metrics including inference latency and accuracy
```bash
predict.py --help
usage: predict.py [-h] --model_bin MODEL_BIN --model_xml MODEL_XML
--input_numpy_file INPUT_NUMPY_FILE --label_numpy_file
LABEL_NUMPY_FILE --output_folder OUTPUT_FOLDER
[--batch_size BATCH_SIZE] [--scale_div SCALE_DIV]
[--scale_sub SCALE_SUB]
Component executing inference operation
optional arguments:
-h, --help show this help message and exit
--model_bin MODEL_BIN
GCS or local path to model weights file (.bin)
--model_xml MODEL_XML
GCS or local path to model graph (.xml)
--input_numpy_file INPUT_NUMPY_FILE
GCS or local path to input dataset numpy file
--label_numpy_file LABEL_NUMPY_FILE
GCS or local path to numpy file with labels
--output_folder OUTPUT_FOLDER
GCS or local path to results upload folder
--batch_size BATCH_SIZE
batch size to be used for inference
--scale_div SCALE_DIV
scale the np input by division of by the value
--scale_sub SCALE_SUB
scale the np input by substraction of the value
```
## building docker image
```bash
docker build --build-arg http_proxy=$http_proxy --build-arg https_proxy=$https_proxy .
```
## testing the image locally
```bash
COMMAND = python3 predict.py \
--model_bin gs://<path>/model.bin \
--model_xml gs://<path>/model.xml \
--input_numpy_file gs://<path>/datasets/imgs.npy \
--output_folder gs://<path>/outputs
docker run --rm -it -e GOOGLE_APPLICATION_CREDENTIALS=/etc/credentials/gcp_key.json \
-v ${PWD}/key.json:/etc/credentials/gcp_key.json <image name> $COMMAND
``` | 572 |
0 | kubeflow_public_repos/pipelines/components/contrib/openvino/predict | kubeflow_public_repos/pipelines/components/contrib/openvino/predict/containers/requirements.txt | numpy
google-cloud-storage
| 573 |
0 | kubeflow_public_repos/pipelines/components/contrib/openvino/predict | kubeflow_public_repos/pipelines/components/contrib/openvino/predict/containers/classes.py | #
# Copyright (c) 2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
imagenet_classes = {0: 'tench, Tinca tinca',
1: 'goldfish, Carassius auratus',
2: 'great white shark, white shark, man-eater, man-eating shark, Carcharodon carcharias',
3: 'tiger shark, Galeocerdo cuvieri',
4: 'hammerhead, hammerhead shark',
5: 'electric ray, crampfish, numbfish, torpedo',
6: 'stingray',
7: 'cock',
8: 'hen',
9: 'ostrich, Struthio camelus',
10: 'brambling, Fringilla montifringilla',
11: 'goldfinch, Carduelis carduelis',
12: 'house finch, linnet, Carpodacus mexicanus',
13: 'junco, snowbird',
14: 'indigo bunting, indigo finch, indigo bird, Passerina cyanea',
15: 'robin, American robin, Turdus migratorius',
16: 'bulbul',
17: 'jay',
18: 'magpie',
19: 'chickadee',
20: 'water ouzel, dipper',
21: 'kite',
22: 'bald eagle, American eagle, Haliaeetus leucocephalus',
23: 'vulture',
24: 'great grey owl, great gray owl, Strix nebulosa',
25: 'European fire salamander, Salamandra salamandra',
26: 'common newt, Triturus vulgaris',
27: 'eft',
28: 'spotted salamander, Ambystoma maculatum',
29: 'axolotl, mud puppy, Ambystoma mexicanum',
30: 'bullfrog, Rana catesbeiana',
31: 'tree frog, tree-frog',
32: 'tailed frog, bell toad, ribbed toad, tailed toad, Ascaphus trui',
33: 'loggerhead, loggerhead turtle, Caretta caretta',
34: 'leatherback turtle, leatherback, leathery turtle, Dermochelys coriacea',
35: 'mud turtle',
36: 'terrapin',
37: 'box turtle, box tortoise',
38: 'banded gecko',
39: 'common iguana, iguana, Iguana iguana',
40: 'American chameleon, anole, Anolis carolinensis',
41: 'whiptail, whiptail lizard',
42: 'agama',
43: 'frilled lizard, Chlamydosaurus kingi',
44: 'alligator lizard',
45: 'Gila monster, Heloderma suspectum',
46: 'green lizard, Lacerta viridis',
47: 'African chameleon, Chamaeleo chamaeleon',
48: 'Komodo dragon, Komodo lizard, dragon lizard, giant lizard, Varanus komodoensis',
49: 'African crocodile, Nile crocodile, Crocodylus niloticus',
50: 'American alligator, Alligator mississipiensis',
51: 'triceratops',
52: 'thunder snake, worm snake, Carphophis amoenus',
53: 'ringneck snake, ring-necked snake, ring snake',
54: 'hognose snake, puff adder, sand viper',
55: 'green snake, grass snake',
56: 'king snake, kingsnake',
57: 'garter snake, grass snake',
58: 'water snake',
59: 'vine snake',
60: 'night snake, Hypsiglena torquata',
61: 'boa constrictor, Constrictor constrictor',
62: 'rock python, rock snake, Python sebae',
63: 'Indian cobra, Naja naja',
64: 'green mamba',
65: 'sea snake',
66: 'horned viper, cerastes, sand viper, horned asp, Cerastes cornutus',
67: 'diamondback, diamondback rattlesnake, Crotalus adamanteus',
68: 'sidewinder, horned rattlesnake, Crotalus cerastes',
69: 'trilobite',
70: 'harvestman, daddy longlegs, Phalangium opilio',
71: 'scorpion',
72: 'black and gold garden spider, Argiope aurantia',
73: 'barn spider, Araneus cavaticus',
74: 'garden spider, Aranea diademata',
75: 'black widow, Latrodectus mactans',
76: 'tarantula',
77: 'wolf spider, hunting spider',
78: 'tick',
79: 'centipede',
80: 'black grouse',
81: 'ptarmigan',
82: 'ruffed grouse, partridge, Bonasa umbellus',
83: 'prairie chicken, prairie grouse, prairie fowl',
84: 'peacock',
85: 'quail',
86: 'partridge',
87: 'African grey, African gray, Psittacus erithacus',
88: 'macaw',
89: 'sulphur-crested cockatoo, Kakatoe galerita, Cacatua galerita',
90: 'lorikeet',
91: 'coucal',
92: 'bee eater',
93: 'hornbill',
94: 'hummingbird',
95: 'jacamar',
96: 'toucan',
97: 'drake',
98: 'red-breasted merganser, Mergus serrator',
99: 'goose',
100: 'black swan, Cygnus atratus',
101: 'tusker',
102: 'echidna, spiny anteater, anteater',
103: 'platypus, duckbill, duckbilled platypus, duck-billed platypus, Ornithorhynchus anatinus',
104: 'wallaby, brush kangaroo',
105: 'koala, koala bear, kangaroo bear, native bear, Phascolarctos cinereus',
106: 'wombat',
107: 'jellyfish',
108: 'sea anemone, anemone',
109: 'brain coral',
110: 'flatworm, platyhelminth',
111: 'nematode, nematode worm, roundworm',
112: 'conch',
113: 'snail',
114: 'slug',
115: 'sea slug, nudibranch',
116: 'chiton, coat-of-mail shell, sea cradle, polyplacophore',
117: 'chambered nautilus, pearly nautilus, nautilus',
118: 'Dungeness crab, Cancer magister',
119: 'rock crab, Cancer irroratus',
120: 'fiddler crab',
121: 'king crab, Alaska crab, Alaskan king crab, Alaska king crab, Paralithodes camtschatica',
122: 'American lobster, Northern lobster, Maine lobster, Homarus americanus',
123: 'spiny lobster, langouste, rock lobster, crawfish, crayfish, sea crawfish',
124: 'crayfish, crawfish, crawdad, crawdaddy',
125: 'hermit crab',
126: 'isopod',
127: 'white stork, Ciconia ciconia',
128: 'black stork, Ciconia nigra',
129: 'spoonbill',
130: 'flamingo',
131: 'little blue heron, Egretta caerulea',
132: 'American egret, great white heron, Egretta albus',
133: 'bittern',
134: 'crane',
135: 'limpkin, Aramus pictus',
136: 'European gallinule, Porphyrio porphyrio',
137: 'American coot, marsh hen, mud hen, water hen, Fulica americana',
138: 'bustard',
139: 'ruddy turnstone, Arenaria interpres',
140: 'red-backed sandpiper, dunlin, Erolia alpina',
141: 'redshank, Tringa totanus',
142: 'dowitcher',
143: 'oystercatcher, oyster catcher',
144: 'pelican',
145: 'king penguin, Aptenodytes patagonica',
146: 'albatross, mollymawk',
147: 'grey whale, gray whale, devilfish, Eschrichtius gibbosus, Eschrichtius robustus',
148: 'killer whale, killer, orca, grampus, sea wolf, Orcinus orca',
149: 'dugong, Dugong dugon',
150: 'sea lion',
151: 'Chihuahua',
152: 'Japanese spaniel',
153: 'Maltese dog, Maltese terrier, Maltese',
154: 'Pekinese, Pekingese, Peke',
155: 'Shih-Tzu',
156: 'Blenheim spaniel',
157: 'papillon',
158: 'toy terrier',
159: 'Rhodesian ridgeback',
160: 'Afghan hound, Afghan',
161: 'basset, basset hound',
162: 'beagle',
163: 'bloodhound, sleuthhound',
164: 'bluetick',
165: 'black-and-tan coonhound',
166: 'Walker hound, Walker foxhound',
167: 'English foxhound',
168: 'redbone',
169: 'borzoi, Russian wolfhound',
170: 'Irish wolfhound',
171: 'Italian greyhound',
172: 'whippet',
173: 'Ibizan hound, Ibizan Podenco',
174: 'Norwegian elkhound, elkhound',
175: 'otterhound, otter hound',
176: 'Saluki, gazelle hound',
177: 'Scottish deerhound, deerhound',
178: 'Weimaraner',
179: 'Staffordshire bullterrier, Staffordshire bull terrier',
180: 'American Staffordshire terrier, Staffordshire terrier, American pit bull terrier, pit bull terrier',
181: 'Bedlington terrier',
182: 'Border terrier',
183: 'Kerry blue terrier',
184: 'Irish terrier',
185: 'Norfolk terrier',
186: 'Norwich terrier',
187: 'Yorkshire terrier',
188: 'wire-haired fox terrier',
189: 'Lakeland terrier',
190: 'Sealyham terrier, Sealyham',
191: 'Airedale, Airedale terrier',
192: 'cairn, cairn terrier',
193: 'Australian terrier',
194: 'Dandie Dinmont, Dandie Dinmont terrier',
195: 'Boston bull, Boston terrier',
196: 'miniature schnauzer',
197: 'giant schnauzer',
198: 'standard schnauzer',
199: 'Scotch terrier, Scottish terrier, Scottie',
200: 'Tibetan terrier, chrysanthemum dog',
201: 'silky terrier, Sydney silky',
202: 'soft-coated wheaten terrier',
203: 'West Highland white terrier',
204: 'Lhasa, Lhasa apso',
205: 'flat-coated retriever',
206: 'curly-coated retriever',
207: 'golden retriever',
208: 'Labrador retriever',
209: 'Chesapeake Bay retriever',
210: 'German short-haired pointer',
211: 'vizsla, Hungarian pointer',
212: 'English setter',
213: 'Irish setter, red setter',
214: 'Gordon setter',
215: 'Brittany spaniel',
216: 'clumber, clumber spaniel',
217: 'English springer, English springer spaniel',
218: 'Welsh springer spaniel',
219: 'cocker spaniel, English cocker spaniel, cocker',
220: 'Sussex spaniel',
221: 'Irish water spaniel',
222: 'kuvasz',
223: 'schipperke',
224: 'groenendael',
225: 'malinois',
226: 'briard',
227: 'kelpie',
228: 'komondor',
229: 'Old English sheepdog, bobtail',
230: 'Shetland sheepdog, Shetland sheep dog, Shetland',
231: 'collie',
232: 'Border collie',
233: 'Bouvier des Flandres, Bouviers des Flandres',
234: 'Rottweiler',
235: 'German shepherd, German shepherd dog, German police dog, alsatian',
236: 'Doberman, Doberman pinscher',
237: 'miniature pinscher',
238: 'Greater Swiss Mountain dog',
239: 'Bernese mountain dog',
240: 'Appenzeller',
241: 'EntleBucher',
242: 'boxer',
243: 'bull mastiff',
244: 'Tibetan mastiff',
245: 'French bulldog',
246: 'Great Dane',
247: 'Saint Bernard, St Bernard',
248: 'Eskimo dog, husky',
249: 'malamute, malemute, Alaskan malamute',
250: 'Siberian husky',
251: 'dalmatian, coach dog, carriage dog',
252: 'affenpinscher, monkey pinscher, monkey dog',
253: 'basenji',
254: 'pug, pug-dog',
255: 'Leonberg',
256: 'Newfoundland, Newfoundland dog',
257: 'Great Pyrenees',
258: 'Samoyed, Samoyede',
259: 'Pomeranian',
260: 'chow, chow chow',
261: 'keeshond',
262: 'Brabancon griffon',
263: 'Pembroke, Pembroke Welsh corgi',
264: 'Cardigan, Cardigan Welsh corgi',
265: 'toy poodle',
266: 'miniature poodle',
267: 'standard poodle',
268: 'Mexican hairless',
269: 'timber wolf, grey wolf, gray wolf, Canis lupus',
270: 'white wolf, Arctic wolf, Canis lupus tundrarum',
271: 'red wolf, maned wolf, Canis rufus, Canis niger',
272: 'coyote, prairie wolf, brush wolf, Canis latrans',
273: 'dingo, warrigal, warragal, Canis dingo',
274: 'dhole, Cuon alpinus',
275: 'African hunting dog, hyena dog, Cape hunting dog, Lycaon pictus',
276: 'hyena, hyaena',
277: 'red fox, Vulpes vulpes',
278: 'kit fox, Vulpes macrotis',
279: 'Arctic fox, white fox, Alopex lagopus',
280: 'grey fox, gray fox, Urocyon cinereoargenteus',
281: 'tabby, tabby cat',
282: 'tiger cat',
283: 'Persian cat',
284: 'Siamese cat, Siamese',
285: 'Egyptian cat',
286: 'cougar, puma, catamount, mountain lion, painter, panther, Felis concolor',
287: 'lynx, catamount',
288: 'leopard, Panthera pardus',
289: 'snow leopard, ounce, Panthera uncia',
290: 'jaguar, panther, Panthera onca, Felis onca',
291: 'lion, king of beasts, Panthera leo',
292: 'tiger, Panthera tigris',
293: 'cheetah, chetah, Acinonyx jubatus',
294: 'brown bear, bruin, Ursus arctos',
295: 'American black bear, black bear, Ursus americanus, Euarctos americanus',
296: 'ice bear, polar bear, Ursus Maritimus, Thalarctos maritimus',
297: 'sloth bear, Melursus ursinus, Ursus ursinus',
298: 'mongoose',
299: 'meerkat, mierkat',
300: 'tiger beetle',
301: 'ladybug, ladybeetle, lady beetle, ladybird, ladybird beetle',
302: 'ground beetle, carabid beetle',
303: 'long-horned beetle, longicorn, longicorn beetle',
304: 'leaf beetle, chrysomelid',
305: 'dung beetle',
306: 'rhinoceros beetle',
307: 'weevil',
308: 'fly',
309: 'bee',
310: 'ant, emmet, pismire',
311: 'grasshopper, hopper',
312: 'cricket',
313: 'walking stick, walkingstick, stick insect',
314: 'cockroach, roach',
315: 'mantis, mantid',
316: 'cicada, cicala',
317: 'leafhopper',
318: 'lacewing, lacewing fly',
319: "dragonfly, darning needle, devil's darning needle, sewing needle, snake feeder, snake doctor, mosquito hawk, skeeter hawk",
320: 'damselfly',
321: 'admiral',
322: 'ringlet, ringlet butterfly',
323: 'monarch, monarch butterfly, milkweed butterfly, Danaus plexippus',
324: 'cabbage butterfly',
325: 'sulphur butterfly, sulfur butterfly',
326: 'lycaenid, lycaenid butterfly',
327: 'starfish, sea star',
328: 'sea urchin',
329: 'sea cucumber, holothurian',
330: 'wood rabbit, cottontail, cottontail rabbit',
331: 'hare',
332: 'Angora, Angora rabbit',
333: 'hamster',
334: 'porcupine, hedgehog',
335: 'fox squirrel, eastern fox squirrel, Sciurus niger',
336: 'marmot',
337: 'beaver',
338: 'guinea pig, Cavia cobaya',
339: 'sorrel',
340: 'zebra',
341: 'hog, pig, grunter, squealer, Sus scrofa',
342: 'wild boar, boar, Sus scrofa',
343: 'warthog',
344: 'hippopotamus, hippo, river horse, Hippopotamus amphibius',
345: 'ox',
346: 'water buffalo, water ox, Asiatic buffalo, Bubalus bubalis',
347: 'bison',
348: 'ram, tup',
349: 'bighorn, bighorn sheep, cimarron, Rocky Mountain bighorn, Rocky Mountain sheep, Ovis canadensis',
350: 'ibex, Capra ibex',
351: 'hartebeest',
352: 'impala, Aepyceros melampus',
353: 'gazelle',
354: 'Arabian camel, dromedary, Camelus dromedarius',
355: 'llama',
356: 'weasel',
357: 'mink',
358: 'polecat, fitch, foulmart, foumart, Mustela putorius',
359: 'black-footed ferret, ferret, Mustela nigripes',
360: 'otter',
361: 'skunk, polecat, wood pussy',
362: 'badger',
363: 'armadillo',
364: 'three-toed sloth, ai, Bradypus tridactylus',
365: 'orangutan, orang, orangutang, Pongo pygmaeus',
366: 'gorilla, Gorilla gorilla',
367: 'chimpanzee, chimp, Pan troglodytes',
368: 'gibbon, Hylobates lar',
369: 'siamang, Hylobates syndactylus, Symphalangus syndactylus',
370: 'guenon, guenon monkey',
371: 'patas, hussar monkey, Erythrocebus patas',
372: 'baboon',
373: 'macaque',
374: 'langur',
375: 'colobus, colobus monkey',
376: 'proboscis monkey, Nasalis larvatus',
377: 'marmoset',
378: 'capuchin, ringtail, Cebus capucinus',
379: 'howler monkey, howler',
380: 'titi, titi monkey',
381: 'spider monkey, Ateles geoffroyi',
382: 'squirrel monkey, Saimiri sciureus',
383: 'Madagascar cat, ring-tailed lemur, Lemur catta',
384: 'indri, indris, Indri indri, Indri brevicaudatus',
385: 'Indian elephant, Elephas maximus',
386: 'African elephant, Loxodonta africana',
387: 'lesser panda, red panda, panda, bear cat, cat bear, Ailurus fulgens',
388: 'giant panda, panda, panda bear, coon bear, Ailuropoda melanoleuca',
389: 'barracouta, snoek',
390: 'eel',
391: 'coho, cohoe, coho salmon, blue jack, silver salmon, Oncorhynchus kisutch',
392: 'rock beauty, Holocanthus tricolor',
393: 'anemone fish',
394: 'sturgeon',
395: 'gar, garfish, garpike, billfish, Lepisosteus osseus',
396: 'lionfish',
397: 'puffer, pufferfish, blowfish, globefish',
398: 'abacus',
399: 'abaya',
400: "academic gown, academic robe, judge's robe",
401: 'accordion, piano accordion, squeeze box',
402: 'acoustic guitar',
403: 'aircraft carrier, carrier, flattop, attack aircraft carrier',
404: 'airliner',
405: 'airship, dirigible',
406: 'altar',
407: 'ambulance',
408: 'amphibian, amphibious vehicle',
409: 'analog clock',
410: 'apiary, bee house',
411: 'apron',
412: 'ashcan, trash can, garbage can, wastebin, ash bin, ash-bin, ashbin, dustbin, trash barrel, trash bin',
413: 'assault rifle, assault gun',
414: 'backpack, back pack, knapsack, packsack, rucksack, haversack',
415: 'bakery, bakeshop, bakehouse',
416: 'balance beam, beam',
417: 'balloon',
418: 'ballpoint, ballpoint pen, ballpen, Biro',
419: 'Band Aid',
420: 'banjo',
421: 'bannister, banister, balustrade, balusters, handrail',
422: 'barbell',
423: 'barber chair',
424: 'barbershop',
425: 'barn',
426: 'barometer',
427: 'barrel, cask',
428: 'barrow, garden cart, lawn cart, wheelbarrow',
429: 'baseball',
430: 'basketball',
431: 'bassinet',
432: 'bassoon',
433: 'bathing cap, swimming cap',
434: 'bath towel',
435: 'bathtub, bathing tub, bath, tub',
436: 'beach wagon, station wagon, wagon, estate car, beach waggon, station waggon, waggon',
437: 'beacon, lighthouse, beacon light, pharos',
438: 'beaker',
439: 'bearskin, busby, shako',
440: 'beer bottle',
441: 'beer glass',
442: 'bell cote, bell cot',
443: 'bib',
444: 'bicycle-built-for-two, tandem bicycle, tandem',
445: 'bikini, two-piece',
446: 'binder, ring-binder',
447: 'binoculars, field glasses, opera glasses',
448: 'birdhouse',
449: 'boathouse',
450: 'bobsled, bobsleigh, bob',
451: 'bolo tie, bolo, bola tie, bola',
452: 'bonnet, poke bonnet',
453: 'bookcase',
454: 'bookshop, bookstore, bookstall',
455: 'bottlecap',
456: 'bow',
457: 'bow tie, bow-tie, bowtie',
458: 'brass, memorial tablet, plaque',
459: 'brassiere, bra, bandeau',
460: 'breakwater, groin, groyne, mole, bulwark, seawall, jetty',
461: 'breastplate, aegis, egis',
462: 'broom',
463: 'bucket, pail',
464: 'buckle',
465: 'bulletproof vest',
466: 'bullet train, bullet',
467: 'butcher shop, meat market',
468: 'cab, hack, taxi, taxicab',
469: 'caldron, cauldron',
470: 'candle, taper, wax light',
471: 'cannon',
472: 'canoe',
473: 'can opener, tin opener',
474: 'cardigan',
475: 'car mirror',
476: 'carousel, carrousel, merry-go-round, roundabout, whirligig',
477: "carpenter's kit, tool kit",
478: 'carton',
479: 'car wheel',
480: 'cash machine, cash dispenser, automated teller machine, automatic teller machine, automated teller, automatic teller, ATM',
481: 'cassette',
482: 'cassette player',
483: 'castle',
484: 'catamaran',
485: 'CD player',
486: 'cello, violoncello',
487: 'cellular telephone, cellular phone, cellphone, cell, mobile phone',
488: 'chain',
489: 'chainlink fence',
490: 'chain mail, ring mail, mail, chain armor, chain armour, ring armor, ring armour',
491: 'chain saw, chainsaw',
492: 'chest',
493: 'chiffonier, commode',
494: 'chime, bell, gong',
495: 'china cabinet, china closet',
496: 'Christmas stocking',
497: 'church, church building',
498: 'cinema, movie theater, movie theatre, movie house, picture palace',
499: 'cleaver, meat cleaver, chopper',
500: 'cliff dwelling',
501: 'cloak',
502: 'clog, geta, patten, sabot',
503: 'cocktail shaker',
504: 'coffee mug',
505: 'coffeepot',
506: 'coil, spiral, volute, whorl, helix',
507: 'combination lock',
508: 'computer keyboard, keypad',
509: 'confectionery, confectionary, candy store',
510: 'container ship, containership, container vessel',
511: 'convertible',
512: 'corkscrew, bottle screw',
513: 'cornet, horn, trumpet, trump',
514: 'cowboy boot',
515: 'cowboy hat, ten-gallon hat',
516: 'cradle',
517: 'crane',
518: 'crash helmet',
519: 'crate',
520: 'crib, cot',
521: 'Crock Pot',
522: 'croquet ball',
523: 'crutch',
524: 'cuirass',
525: 'dam, dike, dyke',
526: 'desk',
527: 'desktop computer',
528: 'dial telephone, dial phone',
529: 'diaper, nappy, napkin',
530: 'digital clock',
531: 'digital watch',
532: 'dining table, board',
533: 'dishrag, dishcloth',
534: 'dishwasher, dish washer, dishwashing machine',
535: 'disk brake, disc brake',
536: 'dock, dockage, docking facility',
537: 'dogsled, dog sled, dog sleigh',
538: 'dome',
539: 'doormat, welcome mat',
540: 'drilling platform, offshore rig',
541: 'drum, membranophone, tympan',
542: 'drumstick',
543: 'dumbbell',
544: 'Dutch oven',
545: 'electric fan, blower',
546: 'electric guitar',
547: 'electric locomotive',
548: 'entertainment center',
549: 'envelope',
550: 'espresso maker',
551: 'face powder',
552: 'feather boa, boa',
553: 'file, file cabinet, filing cabinet',
554: 'fireboat',
555: 'fire engine, fire truck',
556: 'fire screen, fireguard',
557: 'flagpole, flagstaff',
558: 'flute, transverse flute',
559: 'folding chair',
560: 'football helmet',
561: 'forklift',
562: 'fountain',
563: 'fountain pen',
564: 'four-poster',
565: 'freight car',
566: 'French horn, horn',
567: 'frying pan, frypan, skillet',
568: 'fur coat',
569: 'garbage truck, dustcart',
570: 'gasmask, respirator, gas helmet',
571: 'gas pump, gasoline pump, petrol pump, island dispenser',
572: 'goblet',
573: 'go-kart',
574: 'golf ball',
575: 'golfcart, golf cart',
576: 'gondola',
577: 'gong, tam-tam',
578: 'gown',
579: 'grand piano, grand',
580: 'greenhouse, nursery, glasshouse',
581: 'grille, radiator grille',
582: 'grocery store, grocery, food market, market',
583: 'guillotine',
584: 'hair slide',
585: 'hair spray',
586: 'half track',
587: 'hammer',
588: 'hamper',
589: 'hand blower, blow dryer, blow drier, hair dryer, hair drier',
590: 'hand-held computer, hand-held microcomputer',
591: 'handkerchief, hankie, hanky, hankey',
592: 'hard disc, hard disk, fixed disk',
593: 'harmonica, mouth organ, harp, mouth harp',
594: 'harp',
595: 'harvester, reaper',
596: 'hatchet',
597: 'holster',
598: 'home theater, home theatre',
599: 'honeycomb',
600: 'hook, claw',
601: 'hoopskirt, crinoline',
602: 'horizontal bar, high bar',
603: 'horse cart, horse-cart',
604: 'hourglass',
605: 'iPod',
606: 'iron, smoothing iron',
607: "jack-o'-lantern",
608: 'jean, blue jean, denim',
609: 'jeep, landrover',
610: 'jersey, T-shirt, tee shirt',
611: 'jigsaw puzzle',
612: 'jinrikisha, ricksha, rickshaw',
613: 'joystick',
614: 'kimono',
615: 'knee pad',
616: 'knot',
617: 'lab coat, laboratory coat',
618: 'ladle',
619: 'lampshade, lamp shade',
620: 'laptop, laptop computer',
621: 'lawn mower, mower',
622: 'lens cap, lens cover',
623: 'letter opener, paper knife, paperknife',
624: 'library',
625: 'lifeboat',
626: 'lighter, light, igniter, ignitor',
627: 'limousine, limo',
628: 'liner, ocean liner',
629: 'lipstick, lip rouge',
630: 'Loafer',
631: 'lotion',
632: 'loudspeaker, speaker, speaker unit, loudspeaker system, speaker system',
633: "loupe, jeweler's loupe",
634: 'lumbermill, sawmill',
635: 'magnetic compass',
636: 'mailbag, postbag',
637: 'mailbox, letter box',
638: 'maillot',
639: 'maillot, tank suit',
640: 'manhole cover',
641: 'maraca',
642: 'marimba, xylophone',
643: 'mask',
644: 'matchstick',
645: 'maypole',
646: 'maze, labyrinth',
647: 'measuring cup',
648: 'medicine chest, medicine cabinet',
649: 'megalith, megalithic structure',
650: 'microphone, mike',
651: 'microwave, microwave oven',
652: 'military uniform',
653: 'milk can',
654: 'minibus',
655: 'miniskirt, mini',
656: 'minivan',
657: 'missile',
658: 'mitten',
659: 'mixing bowl',
660: 'mobile home, manufactured home',
661: 'Model T',
662: 'modem',
663: 'monastery',
664: 'monitor',
665: 'moped',
666: 'mortar',
667: 'mortarboard',
668: 'mosque',
669: 'mosquito net',
670: 'motor scooter, scooter',
671: 'mountain bike, all-terrain bike, off-roader',
672: 'mountain tent',
673: 'mouse, computer mouse',
674: 'mousetrap',
675: 'moving van',
676: 'muzzle',
677: 'nail',
678: 'neck brace',
679: 'necklace',
680: 'nipple',
681: 'notebook, notebook computer',
682: 'obelisk',
683: 'oboe, hautboy, hautbois',
684: 'ocarina, sweet potato',
685: 'odometer, hodometer, mileometer, milometer',
686: 'oil filter',
687: 'organ, pipe organ',
688: 'oscilloscope, scope, cathode-ray oscilloscope, CRO',
689: 'overskirt',
690: 'oxcart',
691: 'oxygen mask',
692: 'packet',
693: 'paddle, boat paddle',
694: 'paddlewheel, paddle wheel',
695: 'padlock',
696: 'paintbrush',
697: "pajama, pyjama, pj's, jammies",
698: 'palace',
699: 'panpipe, pandean pipe, syrinx',
700: 'paper towel',
701: 'parachute, chute',
702: 'parallel bars, bars',
703: 'park bench',
704: 'parking meter',
705: 'passenger car, coach, carriage',
706: 'patio, terrace',
707: 'pay-phone, pay-station',
708: 'pedestal, plinth, footstall',
709: 'pencil box, pencil case',
710: 'pencil sharpener',
711: 'perfume, essence',
712: 'Petri dish',
713: 'photocopier',
714: 'pick, plectrum, plectron',
715: 'pickelhaube',
716: 'picket fence, paling',
717: 'pickup, pickup truck',
718: 'pier',
719: 'piggy bank, penny bank',
720: 'pill bottle',
721: 'pillow',
722: 'ping-pong ball',
723: 'pinwheel',
724: 'pirate, pirate ship',
725: 'pitcher, ewer',
726: "plane, carpenter's plane, woodworking plane",
727: 'planetarium',
728: 'plastic bag',
729: 'plate rack',
730: 'plow, plough',
731: "plunger, plumber's helper",
732: 'Polaroid camera, Polaroid Land camera',
733: 'pole',
734: 'police van, police wagon, paddy wagon, patrol wagon, wagon, black Maria',
735: 'poncho',
736: 'pool table, billiard table, snooker table',
737: 'pop bottle, soda bottle',
738: 'pot, flowerpot',
739: "potter's wheel",
740: 'power drill',
741: 'prayer rug, prayer mat',
742: 'printer',
743: 'prison, prison house',
744: 'projectile, missile',
745: 'projector',
746: 'puck, hockey puck',
747: 'punching bag, punch bag, punching ball, punchball',
748: 'purse',
749: 'quill, quill pen',
750: 'quilt, comforter, comfort, puff',
751: 'racer, race car, racing car',
752: 'racket, racquet',
753: 'radiator',
754: 'radio, wireless',
755: 'radio telescope, radio reflector',
756: 'rain barrel',
757: 'recreational vehicle, RV, R.V.',
758: 'reel',
759: 'reflex camera',
760: 'refrigerator, icebox',
761: 'remote control, remote',
762: 'restaurant, eating house, eating place, eatery',
763: 'revolver, six-gun, six-shooter',
764: 'rifle',
765: 'rocking chair, rocker',
766: 'rotisserie',
767: 'rubber eraser, rubber, pencil eraser',
768: 'rugby ball',
769: 'rule, ruler',
770: 'running shoe',
771: 'safe',
772: 'safety pin',
773: 'saltshaker, salt shaker',
774: 'sandal',
775: 'sarong',
776: 'sax, saxophone',
777: 'scabbard',
778: 'scale, weighing machine',
779: 'school bus',
780: 'schooner',
781: 'scoreboard',
782: 'screen, CRT screen',
783: 'screw',
784: 'screwdriver',
785: 'seat belt, seatbelt',
786: 'sewing machine',
787: 'shield, buckler',
788: 'shoe shop, shoe-shop, shoe store',
789: 'shoji',
790: 'shopping basket',
791: 'shopping cart',
792: 'shovel',
793: 'shower cap',
794: 'shower curtain',
795: 'ski',
796: 'ski mask',
797: 'sleeping bag',
798: 'slide rule, slipstick',
799: 'sliding door',
800: 'slot, one-armed bandit',
801: 'snorkel',
802: 'snowmobile',
803: 'snowplow, snowplough',
804: 'soap dispenser',
805: 'soccer ball',
806: 'sock',
807: 'solar dish, solar collector, solar furnace',
808: 'sombrero',
809: 'soup bowl',
810: 'space bar',
811: 'space heater',
812: 'space shuttle',
813: 'spatula',
814: 'speedboat',
815: "spider web, spider's web",
816: 'spindle',
817: 'sports car, sport car',
818: 'spotlight, spot',
819: 'stage',
820: 'steam locomotive',
821: 'steel arch bridge',
822: 'steel drum',
823: 'stethoscope',
824: 'stole',
825: 'stone wall',
826: 'stopwatch, stop watch',
827: 'stove',
828: 'strainer',
829: 'streetcar, tram, tramcar, trolley, trolley car',
830: 'stretcher',
831: 'studio couch, day bed',
832: 'stupa, tope',
833: 'submarine, pigboat, sub, U-boat',
834: 'suit, suit of clothes',
835: 'sundial',
836: 'sunglass',
837: 'sunglasses, dark glasses, shades',
838: 'sunscreen, sunblock, sun blocker',
839: 'suspension bridge',
840: 'swab, swob, mop',
841: 'sweatshirt',
842: 'swimming trunks, bathing trunks',
843: 'swing',
844: 'switch, electric switch, electrical switch',
845: 'syringe',
846: 'table lamp',
847: 'tank, army tank, armored combat vehicle, armoured combat vehicle',
848: 'tape player',
849: 'teapot',
850: 'teddy, teddy bear',
851: 'television, television system',
852: 'tennis ball',
853: 'thatch, thatched roof',
854: 'theater curtain, theatre curtain',
855: 'thimble',
856: 'thresher, thrasher, threshing machine',
857: 'throne',
858: 'tile roof',
859: 'toaster',
860: 'tobacco shop, tobacconist shop, tobacconist',
861: 'toilet seat',
862: 'torch',
863: 'totem pole',
864: 'tow truck, tow car, wrecker',
865: 'toyshop',
866: 'tractor',
867: 'trailer truck, tractor trailer, trucking rig, rig, articulated lorry, semi',
868: 'tray',
869: 'trench coat',
870: 'tricycle, trike, velocipede',
871: 'trimaran',
872: 'tripod',
873: 'triumphal arch',
874: 'trolleybus, trolley coach, trackless trolley',
875: 'trombone',
876: 'tub, vat',
877: 'turnstile',
878: 'typewriter keyboard',
879: 'umbrella',
880: 'unicycle, monocycle',
881: 'upright, upright piano',
882: 'vacuum, vacuum cleaner',
883: 'vase',
884: 'vault',
885: 'velvet',
886: 'vending machine',
887: 'vestment',
888: 'viaduct',
889: 'violin, fiddle',
890: 'volleyball',
891: 'waffle iron',
892: 'wall clock',
893: 'wallet, billfold, notecase, pocketbook',
894: 'wardrobe, closet, press',
895: 'warplane, military plane',
896: 'washbasin, handbasin, washbowl, lavabo, wash-hand basin',
897: 'washer, automatic washer, washing machine',
898: 'water bottle',
899: 'water jug',
900: 'water tower',
901: 'whiskey jug',
902: 'whistle',
903: 'wig',
904: 'window screen',
905: 'window shade',
906: 'Windsor tie',
907: 'wine bottle',
908: 'wing',
909: 'wok',
910: 'wooden spoon',
911: 'wool, woolen, woollen',
912: 'worm fence, snake fence, snake-rail fence, Virginia fence',
913: 'wreck',
914: 'yawl',
915: 'yurt',
916: 'web site, website, internet site, site',
917: 'comic book',
918: 'crossword puzzle, crossword',
919: 'street sign',
920: 'traffic light, traffic signal, stoplight',
921: 'book jacket, dust cover, dust jacket, dust wrapper',
922: 'menu',
923: 'plate',
924: 'guacamole',
925: 'consomme',
926: 'hot pot, hotpot',
927: 'trifle',
928: 'ice cream, icecream',
929: 'ice lolly, lolly, lollipop, popsicle',
930: 'French loaf',
931: 'bagel, beigel',
932: 'pretzel',
933: 'cheeseburger',
934: 'hotdog, hot dog, red hot',
935: 'mashed potato',
936: 'head cabbage',
937: 'broccoli',
938: 'cauliflower',
939: 'zucchini, courgette',
940: 'spaghetti squash',
941: 'acorn squash',
942: 'butternut squash',
943: 'cucumber, cuke',
944: 'artichoke, globe artichoke',
945: 'bell pepper',
946: 'cardoon',
947: 'mushroom',
948: 'Granny Smith',
949: 'strawberry',
950: 'orange',
951: 'lemon',
952: 'fig',
953: 'pineapple, ananas',
954: 'banana',
955: 'jackfruit, jak, jack',
956: 'custard apple',
957: 'pomegranate',
958: 'hay',
959: 'carbonara',
960: 'chocolate sauce, chocolate syrup',
961: 'dough',
962: 'meat loaf, meatloaf',
963: 'pizza, pizza pie',
964: 'potpie',
965: 'burrito',
966: 'red wine',
967: 'espresso',
968: 'cup',
969: 'eggnog',
970: 'alp',
971: 'bubble',
972: 'cliff, drop, drop-off',
973: 'coral reef',
974: 'geyser',
975: 'lakeside, lakeshore',
976: 'promontory, headland, head, foreland',
977: 'sandbar, sand bar',
978: 'seashore, coast, seacoast, sea-coast',
979: 'valley, vale',
980: 'volcano',
981: 'ballplayer, baseball player',
982: 'groom, bridegroom',
983: 'scuba diver',
984: 'rapeseed',
985: 'daisy',
986: "yellow lady's slipper, yellow lady-slipper, Cypripedium calceolus, Cypripedium parviflorum",
987: 'corn',
988: 'acorn',
989: 'hip, rose hip, rosehip',
990: 'buckeye, horse chestnut, conker',
991: 'coral fungus',
992: 'agaric',
993: 'gyromitra',
994: 'stinkhorn, carrion fungus',
995: 'earthstar',
996: 'hen-of-the-woods, hen of the woods, Polyporus frondosus, Grifola frondosa',
997: 'bolete',
998: 'ear, spike, capitulum',
999: 'toilet tissue, toilet paper, bathroom tissue'}
| 574 |
0 | kubeflow_public_repos/pipelines/components/contrib/openvino/predict | kubeflow_public_repos/pipelines/components/contrib/openvino/predict/containers/Dockerfile | FROM ubuntu:16.04 as DEV
RUN apt-get update && apt-get install -y \
curl \
ca-certificates \
python3-pip \
python-dev \
libgfortran3 \
vim \
build-essential \
cmake \
curl \
wget \
libssl-dev \
ca-certificates \
git \
libboost-regex-dev \
gcc-multilib \
g++-multilib \
libgtk2.0-dev \
pkg-config \
unzip \
automake \
libtool \
autoconf \
libpng12-dev \
libcairo2-dev \
libpango1.0-dev \
libglib2.0-dev \
libgtk2.0-dev \
libswscale-dev \
libavcodec-dev \
libavformat-dev \
libgstreamer1.0-0 \
gstreamer1.0-plugins-base \
libusb-1.0-0-dev \
libopenblas-dev
ARG DLDT_DIR=/dldt-2018_R5
RUN git clone --depth=1 -b 2018_R5 https://github.com/opencv/dldt.git ${DLDT_DIR} && \
cd ${DLDT_DIR} && git submodule init && git submodule update --recursive && \
rm -Rf .git && rm -Rf model-optimizer
WORKDIR ${DLDT_DIR}
RUN curl -L -o ${DLDT_DIR}/mklml_lnx_2019.0.1.20180928.tgz https://github.com/intel/mkl-dnn/releases/download/v0.17.2/mklml_lnx_2019.0.1.20180928.tgz && \
tar -xzf ${DLDT_DIR}/mklml_lnx_2019.0.1.20180928.tgz && rm ${DLDT_DIR}/mklml_lnx_2019.0.1.20180928.tgz
WORKDIR ${DLDT_DIR}/inference-engine
RUN mkdir build && cd build && cmake -DGEMM=MKL -DMKLROOT=${DLDT_DIR}/mklml_lnx_2019.0.1.20180928 -DENABLE_MKL_DNN=ON -DCMAKE_BUILD_TYPE=Release ..
RUN cd build && make -j4
RUN pip3 install cython numpy && mkdir ie_bridges/python/build && cd ie_bridges/python/build && \
cmake -DInferenceEngine_DIR=${DLDT_DIR}/inference-engine/build -DPYTHON_EXECUTABLE=`which python3` -DPYTHON_LIBRARY=/usr/lib/x86_64-linux-gnu/libpython3.5m.so -DPYTHON_INCLUDE_DIR=/usr/include/python3.5m .. && \
make -j4
FROM ubuntu:16.04 as PROD
RUN apt-get update && apt-get install -y --no-install-recommends \
curl \
ca-certificates \
python3-pip \
python3-dev \
virtualenv \
libgomp1
COPY --from=DEV /dldt-2018_R5/inference-engine/bin/intel64/Release/lib/*.so /usr/local/lib/
COPY --from=DEV /dldt-2018_R5/inference-engine/ie_bridges/python/bin/intel64/Release/python_api/python3.5/openvino/ /usr/local/lib/openvino/
COPY --from=DEV /dldt-2018_R5/mklml_lnx_2019.0.1.20180928/lib/lib*.so /usr/local/lib/
ENV LD_LIBRARY_PATH=/usr/local/lib
ENV PYTHONPATH=/usr/local/lib
COPY requirements.txt .
RUN pip3 install setuptools wheel
RUN pip3 install -r requirements.txt
COPY predict.py classes.py ./
| 575 |
0 | kubeflow_public_repos/pipelines/components/contrib/openvino/predict | kubeflow_public_repos/pipelines/components/contrib/openvino/predict/containers/predict.py | from openvino.inference_engine import IENetwork, IEPlugin
import argparse
import numpy as np
from urllib.parse import urlparse
from google.cloud import storage
from google.auth import exceptions
import classes
import datetime
from shutil import copy
import os
import json
def get_local_file(source_path):
parsed_path = urlparse(source_path)
if parsed_path.scheme == "gs":
bucket_name = parsed_path.netloc
file_path = parsed_path.path[1:]
file_name = os.path.split(parsed_path.path)[1]
try:
gs_client = storage.Client()
bucket = gs_client.get_bucket(bucket_name)
except exceptions.DefaultCredentialsError:
# if credentials fails, try to connect as anonymous user
gs_client = storage.Client.create_anonymous_client()
bucket = gs_client.bucket(bucket_name, user_project=None)
blob = bucket.blob(file_path)
blob.download_to_filename(file_name)
elif parsed_path.scheme == "":
# in case of local path just pass the input argument
if os.path.isfile(source_path):
file_name = source_path
else:
print("file " + source_path + "is not accessible")
file_name = ""
return file_name
def upload_file(source_file, target_folder):
parsed_path = urlparse(target_folder)
if parsed_path.scheme == "gs":
bucket_name = parsed_path.netloc
folder_path = parsed_path.path[1:]
try:
gs_client = storage.Client()
bucket = gs_client.get_bucket(bucket_name)
blob = bucket.blob(folder_path + "/" + source_file)
blob.upload_from_filename(source_file)
except Exception as er:
print(er)
return False
elif parsed_path.scheme == "":
if target_folder != ".":
copy(source_file, target_folder)
return True
def main():
parser = argparse.ArgumentParser(
description='Component executing inference operation')
parser.add_argument('--model_bin', type=str, required=True,
help='GCS or local path to model weights file (.bin)')
parser.add_argument('--model_xml', type=str, required=True,
help='GCS or local path to model graph (.xml)')
parser.add_argument('--input_numpy_file', type=str, required=True,
help='GCS or local path to input dataset numpy file')
parser.add_argument('--label_numpy_file', type=str, required=True,
help='GCS or local path to numpy file with labels')
parser.add_argument('--output_folder', type=str, required=True,
help='GCS or local path to results upload folder')
parser.add_argument('--batch_size', type=int, default=1,
help='batch size to be used for inference')
parser.add_argument('--scale_div', type=float, default=1,
help='scale the np input by division of by the value')
parser.add_argument('--scale_sub', type=float, default=128,
help='scale the np input by substraction of the value')
args = parser.parse_args()
print(args)
device = "CPU"
plugin_dir = None
model_xml = get_local_file(args.model_xml)
print("model xml", model_xml)
if model_xml == "":
exit(1)
model_bin = get_local_file(args.model_bin)
print("model bin", model_bin)
if model_bin == "":
exit(1)
input_numpy_file = get_local_file(args.input_numpy_file)
print("input_numpy_file", input_numpy_file)
if input_numpy_file == "":
exit(1)
label_numpy_file = get_local_file(args.label_numpy_file)
print("label_numpy_file", label_numpy_file)
if label_numpy_file == "":
exit(1)
cpu_extension = "/usr/local/lib/libcpu_extension.so"
plugin = IEPlugin(device=device, plugin_dirs=plugin_dir)
if cpu_extension and 'CPU' in device:
plugin.add_cpu_extension(cpu_extension)
print("inference engine:", model_xml, model_bin, device)
# Read IR
print("Reading IR...")
net = IENetwork(model=model_xml, weights=model_bin)
batch_size = args.batch_size
net.batch_size = batch_size
print("Model loaded. Batch size", batch_size)
input_blob = next(iter(net.inputs))
output_blob = next(iter(net.outputs))
print(output_blob)
print("Loading IR to the plugin...")
exec_net = plugin.load(network=net, num_requests=1)
print("Loading input numpy")
imgs = np.load(input_numpy_file, mmap_mode='r', allow_pickle=False)
imgs = (imgs / args.scale_div) - args.scale_div
lbs = np.load(label_numpy_file, mmap_mode='r', allow_pickle=False)
print("Loaded input data", imgs.shape, imgs.dtype, "Min value:", np.min(imgs), "Max value", np.max(imgs))
combined_results = {} # dictionary storing results for all model outputs
processing_times = np.zeros((0),int)
matched_count = 0
total_executed = 0
for x in range(0, imgs.shape[0] - batch_size + 1, batch_size):
img = imgs[x:(x + batch_size)]
lb = lbs[x:(x + batch_size)]
start_time = datetime.datetime.now()
results = exec_net.infer(inputs={input_blob: img})
end_time = datetime.datetime.now()
duration = (end_time - start_time).total_seconds() * 1000
print("Inference duration:", duration, "ms")
processing_times = np.append(processing_times,np.array([int(duration)]))
output = list(results.keys())[0] # check only one output
nu = results[output]
for i in range(nu.shape[0]):
single_result = nu[[i],...]
ma = np.argmax(single_result)
total_executed += 1
if ma == lb[i]:
matched_count += 1
mark_message = "; Correct match."
else:
mark_message = "; Incorrect match. Should be {} {}".format(lb[i], classes.imagenet_classes[lb[i]] )
print("\t",i, classes.imagenet_classes[ma],ma, mark_message)
if output in combined_results:
combined_results[output] = np.append(combined_results[output],
results[output], 0)
else:
combined_results[output] = results[output]
filename = output.replace("/", "_") + ".npy"
np.save(filename, combined_results[output])
upload_file(filename, args.output_folder)
print("Inference results uploaded to", filename)
print('Classification accuracy: {:.2f}'.format(100*matched_count/total_executed))
print('Average time: {:.2f} ms; average speed: {:.2f} fps'.format(round(np.average(processing_times), 2),round(1000 * batch_size / np.average(processing_times), 2)))
accuracy = matched_count/total_executed
latency = np.average(processing_times)
metrics = {'metrics': [{'name': 'accuracy-score','numberValue': accuracy,'format': "PERCENTAGE"},
{'name': 'latency','numberValue': latency,'format': "RAW"}]}
with open('/mlpipeline-metrics.json', 'w') as f:
json.dump(metrics, f)
if __name__ == "__main__":
main()
| 576 |
0 | kubeflow_public_repos/pipelines/components/contrib/openvino | kubeflow_public_repos/pipelines/components/contrib/openvino/model_convert/README.md | # Model optimization component
This component is executing model optimization process using OpenVINO Toolkit and generate as output the model in
Intermediate Representation format.
Component takes the following arguments:
* model input GCS path
* model optimizer parameters
* model output GCS path
```bash
usage: convert_model.py [-h] [--input_path INPUT_PATH]
[--mo_options MO_OPTIONS] [--output_path OUTPUT_PATH]
Model converter to OpenVINO Intermediate Representation format
optional arguments:
-h, --help show this help message and exit
--input_path INPUT_PATH
GCS path of input model file or folder
--mo_options MO_OPTIONS
OpenVINO Model Optimizer options
--output_path OUTPUT_PATH
GCS path of output folder
```
## Component parameters
It takes as input GCS path to the input model in any of the OpenVINO supported frameworks:
* Tensorflow
* Caffe
* MXNET
* Kaldi
* ONNX
Input model path can be a folder or an individual file which will be copied to a component working directory
Model optimizer options can include any of the parameters supported by OpenVINO toolkit model optimizer.
Refer to OpenVINO [documentation](https://software.intel.com/en-us/articles/OpenVINO-ModelOptimizer) for details.
```bash
mo.py --help
usage: mo.py [-h] [--framework {tf,caffe,mxnet,kaldi,onnx}]
[--input_model INPUT_MODEL] [--model_name MODEL_NAME]
[--output_dir OUTPUT_DIR] [--input_shape INPUT_SHAPE]
[--scale SCALE] [--reverse_input_channels]
[--log_level {CRITICAL,ERROR,WARN,WARNING,INFO,DEBUG,NOTSET}]
[--input INPUT] [--output OUTPUT] [--mean_values MEAN_VALUES]
[--scale_values SCALE_VALUES]
[--data_type {FP16,FP32,half,float}] [--disable_fusing]
[--disable_resnet_optimization]
[--finegrain_fusing FINEGRAIN_FUSING] [--disable_gfusing]
[--move_to_preprocess] [--extensions EXTENSIONS] [--batch BATCH]
[--version] [--silent]
[--freeze_placeholder_with_value FREEZE_PLACEHOLDER_WITH_VALUE]
[--generate_deprecated_IR_V2] [--input_model_is_text]
[--input_checkpoint INPUT_CHECKPOINT]
[--input_meta_graph INPUT_META_GRAPH]
[--saved_model_dir SAVED_MODEL_DIR]
[--saved_model_tags SAVED_MODEL_TAGS]
[--offload_unsupported_operations_to_tf]
[--tensorflow_subgraph_patterns TENSORFLOW_SUBGRAPH_PATTERNS]
[--tensorflow_operation_patterns TENSORFLOW_OPERATION_PATTERNS]
[--tensorflow_custom_operations_config_update TENSORFLOW_CUSTOM_OPERATIONS_CONFIG_UPDATE]
[--tensorflow_use_custom_operations_config TENSORFLOW_USE_CUSTOM_OPERATIONS_CONFIG]
[--tensorflow_object_detection_api_pipeline_config TENSORFLOW_OBJECT_DETECTION_API_PIPELINE_CONFIG]
[--tensorboard_logdir TENSORBOARD_LOGDIR]
[--tensorflow_custom_layer_libraries TENSORFLOW_CUSTOM_LAYER_LIBRARIES]
[--disable_nhwc_to_nchw] [--input_proto INPUT_PROTO] [-k K]
[--mean_file MEAN_FILE] [--mean_file_offsets MEAN_FILE_OFFSETS]
[--disable_omitting_optional] [--enable_flattening_nested_params]
[--input_symbol INPUT_SYMBOL] [--nd_prefix_name ND_PREFIX_NAME]
[--pretrained_model_name PRETRAINED_MODEL_NAME]
[--save_params_from_nd] [--legacy_mxnet_model] [--counts COUNTS]
[--remove_output_softmax]
optional arguments:
-h, --help show this help message and exit
--framework {tf,caffe,mxnet,kaldi,onnx}
Name of the framework used to train the input model.
Framework-agnostic parameters:
--input_model INPUT_MODEL, -w INPUT_MODEL, -m INPUT_MODEL
Tensorflow*: a file with a pre-trained model (binary
or text .pb file after freezing). Caffe*: a model
proto file with model weights
--model_name MODEL_NAME, -n MODEL_NAME
Model_name parameter passed to the final create_ir
transform. This parameter is used to name a network in
a generated IR and output .xml/.bin files.
--output_dir OUTPUT_DIR, -o OUTPUT_DIR
Directory that stores the generated IR. By default, it
is the directory from where the Model Optimizer is
launched.
--input_shape INPUT_SHAPE
Input shape(s) that should be fed to an input node(s)
of the model. Shape is defined as a comma-separated
list of integer numbers enclosed in parentheses or
square brackets, for example [1,3,227,227] or
(1,227,227,3), where the order of dimensions depends
on the framework input layout of the model. For
example, [N,C,H,W] is used for Caffe* models and
[N,H,W,C] for TensorFlow* models. Model Optimizer
performs necessary transformations to convert the
shape to the layout required by Inference Engine
(N,C,H,W). The shape should not contain undefined
dimensions (? or -1) and should fit the dimensions
defined in the input operation of the graph. If there
are multiple inputs in the model, --input_shape should
contain definition of shape for each input separated
by a comma, for example: [1,3,227,227],[2,4] for a
model with two inputs with 4D and 2D shapes.
--scale SCALE, -s SCALE
All input values coming from original network inputs
will be divided by this value. When a list of inputs
is overridden by the --input parameter, this scale is
not applied for any input that does not match with the
original input of the model.
--reverse_input_channels
Switch the input channels order from RGB to BGR (or
vice versa). Applied to original inputs of the model
if and only if a number of channels equals 3. Applied
after application of --mean_values and --scale_values
options, so numbers in --mean_values and
--scale_values go in the order of channels used in the
original model.
--log_level {CRITICAL,ERROR,WARN,WARNING,INFO,DEBUG,NOTSET}
Logger level
--input INPUT The name of the input operation of the given model.
Usually this is a name of the input placeholder of the
model.
--output OUTPUT The name of the output operation of the model. For
TensorFlow*, do not add :0 to this name.
--mean_values MEAN_VALUES, -ms MEAN_VALUES
Mean values to be used for the input image per
channel. Values to be provided in the (R,G,B) or
[R,G,B] format. Can be defined for desired input of
the model, for example: "--mean_values
data[255,255,255],info[255,255,255]". The exact
meaning and order of channels depend on how the
original model was trained.
--scale_values SCALE_VALUES
Scale values to be used for the input image per
channel. Values are provided in the (R,G,B) or [R,G,B]
format. Can be defined for desired input of the model,
for example: "--scale_values
data[255,255,255],info[255,255,255]". The exact
meaning and order of channels depend on how the
original model was trained.
--data_type {FP16,FP32,half,float}
Data type for all intermediate tensors and weights. If
original model is in FP32 and --data_type=FP16 is
specified, all model weights and biases are quantized
to FP16.
--disable_fusing Turn off fusing of linear operations to Convolution
--disable_resnet_optimization
Turn off resnet optimization
--finegrain_fusing FINEGRAIN_FUSING
Regex for layers/operations that won't be fused.
Example: --finegrain_fusing Convolution1,.*Scale.*
--disable_gfusing Turn off fusing of grouped convolutions
--move_to_preprocess Move mean values to IR preprocess section
--extensions EXTENSIONS
Directory or a comma separated list of directories
with extensions. To disable all extensions including
those that are placed at the default location, pass an
empty string.
--batch BATCH, -b BATCH
Input batch size
--version Version of Model Optimizer
--silent Prevent any output messages except those that
correspond to log level equals ERROR, that can be set
with the following option: --log_level. By default,
log level is already ERROR.
--freeze_placeholder_with_value FREEZE_PLACEHOLDER_WITH_VALUE
Replaces input layer with constant node with provided
value, e.g.: "node_name->True"
--generate_deprecated_IR_V2
Force to generate legacy/deprecated IR V2 to work with
previous versions of the Inference Engine. The
resulting IR may or may not be correctly loaded by
Inference Engine API (including the most recent and
old versions of Inference Engine) and provided as a
partially-validated backup option for specific
deployment scenarios. Use it at your own discretion.
By default, without this option, the Model Optimizer
generates IR V3.
TensorFlow*-specific parameters:
--input_model_is_text
TensorFlow*: treat the input model file as a text
protobuf format. If not specified, the Model Optimizer
treats it as a binary file by default.
--input_checkpoint INPUT_CHECKPOINT
TensorFlow*: variables file to load.
--input_meta_graph INPUT_META_GRAPH
Tensorflow*: a file with a meta-graph of the model
before freezing
--saved_model_dir SAVED_MODEL_DIR
TensorFlow*: directory representing non frozen model
--saved_model_tags SAVED_MODEL_TAGS
Group of tag(s) of the MetaGraphDef to load, in string
format, separated by ','. For tag-set contains
multiple tags, all tags must be passed in.
--offload_unsupported_operations_to_tf
TensorFlow*: automatically offload unsupported
operations to TensorFlow*
--tensorflow_subgraph_patterns TENSORFLOW_SUBGRAPH_PATTERNS
TensorFlow*: a list of comma separated patterns that
will be applied to TensorFlow* node names to infer a
part of the graph using TensorFlow*.
--tensorflow_operation_patterns TENSORFLOW_OPERATION_PATTERNS
TensorFlow*: a list of comma separated patterns that
will be applied to TensorFlow* node type (ops) to
infer these operations using TensorFlow*.
--tensorflow_custom_operations_config_update TENSORFLOW_CUSTOM_OPERATIONS_CONFIG_UPDATE
TensorFlow*: update the configuration file with node
name patterns with input/output nodes information.
--tensorflow_use_custom_operations_config TENSORFLOW_USE_CUSTOM_OPERATIONS_CONFIG
TensorFlow*: use the configuration file with custom
operation description.
--tensorflow_object_detection_api_pipeline_config TENSORFLOW_OBJECT_DETECTION_API_PIPELINE_CONFIG
TensorFlow*: path to the pipeline configuration file
used to generate model created with help of Object
Detection API.
--tensorboard_logdir TENSORBOARD_LOGDIR
TensorFlow*: dump the input graph to a given directory
that should be used with TensorBoard.
--tensorflow_custom_layer_libraries TENSORFLOW_CUSTOM_LAYER_LIBRARIES
TensorFlow*: comma separated list of shared libraries
with TensorFlow* custom operations implementation.
--disable_nhwc_to_nchw
Disables default translation from NHWC to NCHW
Caffe*-specific parameters:
--input_proto INPUT_PROTO, -d INPUT_PROTO
Deploy-ready prototxt file that contains a topology
structure and layer attributes
-k K Path to CustomLayersMapping.xml to register custom
layers
--mean_file MEAN_FILE, -mf MEAN_FILE
Mean image to be used for the input. Should be a
binaryproto file
--mean_file_offsets MEAN_FILE_OFFSETS, -mo MEAN_FILE_OFFSETS
Mean image offsets to be used for the input
binaryproto file. When the mean image is bigger than
the expected input, it is cropped. By default, centers
of the input image and the mean image are the same and
the mean image is cropped by dimensions of the input
image. The format to pass this option is the
following: "-mo (x,y)". In this case, the mean file is
cropped by dimensions of the input image with offset
(x,y) from the upper left corner of the mean image
--disable_omitting_optional
Disable omitting optional attributes to be used for
custom layers. Use this option if you want to transfer
all attributes of a custom layer to IR. Default
behavior is to transfer the attributes with default
values and the attributes defined by the user to IR.
--enable_flattening_nested_params
Enable flattening optional params to be used for
custom layers. Use this option if you want to transfer
attributes of a custom layer to IR with flattened
nested parameters. Default behavior is to transfer the
attributes without flattening nested parameters.
Mxnet-specific parameters:
--input_symbol INPUT_SYMBOL
Symbol file (for example, model-symbol.json) that
contains a topology structure and layer attributes
--nd_prefix_name ND_PREFIX_NAME
Prefix name for args.nd and argx.nd files.
--pretrained_model_name PRETRAINED_MODEL_NAME
Name of a pretrained MXNet model without extension and
epoch number. This model will be merged with args.nd
and argx.nd files
--save_params_from_nd
Enable saving built parameters file from .nd files
--legacy_mxnet_model Enable MXNet loader to make a model compatible with
the latest MXNet version. Use only if your model was
trained with MXNet version lower than 1.0.0
Kaldi-specific parameters:
--counts COUNTS Path to the counts file
--remove_output_softmax
Removes the SoftMax layer that is the output layer
```
The output folder specify then should be uploaded the generated model file in IR format with .bin and .xml
extensions.
The component also creates 3 files including the paths to generated model:
- `/tmp/output.txt` - GSC path to the folder including the generated model files.
- `/tmp/bin_path.txt` - GSC path to weights model file
- `/tmp/xml_path.txt` - GSC path to graph model file
They can be used as parameters to be passed to other jobs in ML pipelines.
## Examples
Input path - gs://tensorflow_model_path/resnet/1/saved_model.pb<br />
MO options - --saved_model_dir .<br />
Output path - gs://tensorflow_model_path/resnet/1
Input path - gs://tensorflow_model_path/resnet/1<br />
MO options - --saved_model_dir 1<br />
Output path - gs://tensorflow_model_path/resnet/dldt/1<br />
## Building docker image
```bash
docker build --build-arg http_proxy=$http_proxy --build-arg https_proxy=$https_proxy .
```
## Starting and testing the component locally
This component requires GCP authentication token in json format generated for the service account,
which has access to GCS location. In the example below it is in key.json in the current path.
```bash
COMMAND="convert_model.py --mo_options \"--saved_model_dir .\" --input_path gs://tensorflow_model_path/resnet/1/saved_model.pb --output_path gs://tensorflow_model_path/resnet/1"
docker run --rm -it -v $(pwd)/key.json:/etc/credentials/gcp-key.json \
-e GOOGLE_APPLICATION_CREDENTIALS=/etc/credentials/gcp-key.json <image_name> $COMMAND
```
| 577 |
0 | kubeflow_public_repos/pipelines/components/contrib/openvino/model_convert | kubeflow_public_repos/pipelines/components/contrib/openvino/model_convert/containers/Dockerfile | FROM ubuntu:16.04
RUN apt-get update && apt-get install -y --no-install-recommends \
curl ca-certificates \
python3-pip \
python-dev \
gcc \
python-setuptools \
python3-setuptools \
libgfortran3 \
unzip \
vim && \
apt-get clean
RUN curl -L -o 2018_R5.tar.gz https://github.com/opencv/dldt/archive/2018_R5.tar.gz && \
tar -zxf 2018_R5.tar.gz && \
rm 2018_R5.tar.gz && \
rm -Rf dldt-2018_R5/inference-engine
WORKDIR dldt-2018_R5/model-optimizer
RUN pip3 install --upgrade pip setuptools
RUN pip3 install -r requirements.txt
RUN curl -L -o google-cloud-sdk.zip https://dl.google.com/dl/cloudsdk/release/google-cloud-sdk.zip && \
unzip -qq google-cloud-sdk.zip -d tools && \
rm google-cloud-sdk.zip && \
tools/google-cloud-sdk/install.sh --usage-reporting=false \
--path-update=true --bash-completion=false \
--disable-installation-options && \
tools/google-cloud-sdk/bin/gcloud -q components update \
gcloud core gsutil && \
tools/google-cloud-sdk/bin/gcloud config set component_manager/disable_update_check true && \
touch tools/google-cloud-sdk/lib/third_party/google.py && \
pip install -U crcmod
ENV PATH ${PATH}:/dldt-2018_R5/model-optimizer:/dldt-2018_R5/model-optimizer/tools/google-cloud-sdk/bin
COPY convert_model.py .
RUN chmod 755 *.py
WORKDIR input
| 578 |
0 | kubeflow_public_repos/pipelines/components/contrib/openvino/model_convert | kubeflow_public_repos/pipelines/components/contrib/openvino/model_convert/containers/convert_model.py | #!/usr/bin/python3
import argparse
import subprocess
import re
import os
def is_insecure_path(path):
# check if the path do not include insecure characters
if not re.match(r"^gs:\/\/[\.\w\/-]*$", path):
is_insecure = True
else:
is_insecure = False
return is_insecure
def are_insecure_mo_options(all_options):
# check if all passed options do not include insecure characters
is_insecure = False
for option in all_options.split():
if not re.match(r"^[\.\w:\/-]*$", option):
is_insecure = True
return is_insecure
def main():
parser = argparse.ArgumentParser(
description='Model converter to OpenVINO IR format')
parser.add_argument(
'--input_path', type=str, help='GCS path of input model file or folder')
parser.add_argument(
'--mo_options', type=str, help='OpenVINO Model Optimizer options')
parser.add_argument(
'--output_path', type=str, help='GCS path of output folder')
args = parser.parse_args()
# Validate parameters
if is_insecure_path(args.input_path):
print("Invalid input format")
exit(1)
if is_insecure_path(args.output_path):
print("Invalid output format")
exit(1)
if are_insecure_mo_options(args.mo_options):
print("Invalid model optimizer options")
exit(1)
# Initialize gsutil creds if needed
if "GOOGLE_APPLICATION_CREDENTIALS" in os.environ:
command = "gcloud auth activate-service-account " \
"--key-file=${GOOGLE_APPLICATION_CREDENTIALS}"
print("auth command", command)
return_code = subprocess.call(command, shell=True)
print("return code", return_code)
# Downloading input model or GCS folder with a model to current folder
command = "gsutil cp -r " + args.input_path + " ."
print("gsutil download command", command)
return_code = subprocess.call(command, shell=True)
print("return code", return_code)
if return_code:
exit(1)
# Executing model optimization
command = "mo.py " + args.mo_options
print("Starting model optimization:", command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE,
universal_newlines=True)
print("Model optimization output",output.stdout)
XML = ""
BIN = ""
for line in output.stdout.splitlines():
if "[ SUCCESS ] XML file" in line:
XML = line.split(":")[1].strip()
if "[ SUCCESS ] BIN file" in line:
BIN = line.split(":")[1].strip()
if XML == "" or BIN == "":
print("Error, model optimization failed")
exit(1)
command = "gsutil cp " + XML + " " + os.path.join(args.output_path, os.path.split(XML)[1])
print("gsutil upload command", command)
return_code = subprocess.call(command, shell=True)
print("return code", return_code)
command = "gsutil cp " + BIN + " " + os.path.join(args.output_path, os.path.split(BIN)[1])
print("gsutil upload command", command)
return_code = subprocess.call(command, shell=True)
print("return code", return_code)
if return_code:
exit(1)
with open('/tmp/output_path.txt', 'w') as f:
f.write(args.output_path)
with open('/tmp/bin_path.txt', 'w') as f:
f.write(os.path.join(args.output_path, os.path.split(BIN)[1]))
with open('/tmp/xml_path.txt', 'w') as f:
f.write(os.path.join(args.output_path, os.path.split(XML)[1]))
print("Model successfully generated and uploaded to ", args.output_path)
if __name__ == "__main__":
main()
| 579 |
0 | kubeflow_public_repos/pipelines/components/contrib/filesystem | kubeflow_public_repos/pipelines/components/contrib/filesystem/list_items/component.yaml | name: List items
description: Recursively list directory contents.
inputs:
- {name: Directory, type: Directory}
outputs:
- {name: Items}
metadata:
annotations:
author: Alexey Volkov <[email protected]>
canonical_location: 'https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/filesystem/list_items/component.yaml'
implementation:
container:
image: alpine
command:
- sh
- -ex
- -c
- |
mkdir -p "$(dirname "$1")"
#ls --almost-all --recursive "$0" > "$1"
ls -A -R "$0" > "$1"
- inputPath: Directory
- outputPath: Items
| 580 |
0 | kubeflow_public_repos/pipelines/components/contrib/filesystem | kubeflow_public_repos/pipelines/components/contrib/filesystem/get_file/component.yaml | name: Get file
description: Get file from directory.
inputs:
- {name: Directory, type: Directory}
- {name: Subpath, type: String}
outputs:
- {name: File}
metadata:
annotations:
author: Alexey Volkov <[email protected]>
canonical_location: 'https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/filesystem/get_file/component.yaml'
implementation:
container:
image: alpine
command:
- sh
- -ex
- -c
- |
mkdir -p "$(dirname "$2")"
cp -r "$0/$1" "$2"
- inputPath: Directory
- inputValue: Subpath
- outputPath: File
| 581 |
0 | kubeflow_public_repos/pipelines/components/contrib/filesystem | kubeflow_public_repos/pipelines/components/contrib/filesystem/get_subdirectory/component.yaml | name: Get subdirectory
description: Get subdirectory from directory.
inputs:
- {name: Directory, type: Directory}
- {name: Subpath, type: String}
outputs:
- {name: Subdir, type: Directory}
metadata:
annotations:
author: Alexey Volkov <[email protected]>
canonical_location: 'https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/filesystem/get_subdirectory/component.yaml'
implementation:
container:
image: alpine
command:
- sh
- -ex
- -c
- |
mkdir -p "$(dirname "$2")"
cp -r "$0/$1" "$2"
- inputPath: Directory
- inputValue: Subpath
- outputPath: Subdir
| 582 |
0 | kubeflow_public_repos/pipelines/components/contrib/kubernetes | kubeflow_public_repos/pipelines/components/contrib/kubernetes/Apply_object/component.yaml | name: Apply Kubernetes object
inputs:
- {name: Object, type: JsonObject}
outputs:
- {name: Name, type: String}
- {name: Kind, type: String}
- {name: Object, type: JsonObject}
metadata:
annotations:
author: Alexey Volkov <[email protected]>
canonical_location: 'https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/kubernetes/Apply_object/component.yaml'
implementation:
container:
image: bitnami/kubectl:1.17.17
command:
- bash
- -exc
- |
object_path=$0
output_name_path=$1
output_kind_path=$2
output_object_path=$3
mkdir -p "$(dirname "$output_name_path")"
mkdir -p "$(dirname "$output_kind_path")"
mkdir -p "$(dirname "$output_object_path")"
kubectl apply -f "$object_path" --output=json > "$output_object_path"
< "$output_object_path" jq '.metadata.name' --raw-output > "$output_name_path"
< "$output_object_path" jq '.kind' --raw-output > "$output_kind_path"
- {inputPath: Object}
- {outputPath: Name}
- {outputPath: Kind}
- {outputPath: Object}
| 583 |
0 | kubeflow_public_repos/pipelines/components/contrib/kubernetes | kubeflow_public_repos/pipelines/components/contrib/kubernetes/Create_object/component.yaml | name: Create Kubernetes object
inputs:
- {name: Object, type: JsonObject}
outputs:
- {name: Name, type: String}
- {name: Kind, type: String}
- {name: Object, type: JsonObject}
metadata:
annotations:
author: Alexey Volkov <[email protected]>
canonical_location: 'https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/kubernetes/Create_object/component.yaml'
implementation:
container:
image: bitnami/kubectl:1.17.17
command:
- bash
- -exc
- |
object_path=$0
output_name_path=$1
output_kind_path=$2
output_object_path=$3
mkdir -p "$(dirname "$output_name_path")"
mkdir -p "$(dirname "$output_kind_path")"
mkdir -p "$(dirname "$output_object_path")"
kubectl create -f "$object_path" --output=json > "$output_object_path"
< "$output_object_path" jq '.metadata.name' --raw-output > "$output_name_path"
< "$output_object_path" jq '.kind' --raw-output > "$output_kind_path"
- {inputPath: Object}
- {outputPath: Name}
- {outputPath: Kind}
- {outputPath: Object}
| 584 |
0 | kubeflow_public_repos/pipelines/components/contrib/kubernetes | kubeflow_public_repos/pipelines/components/contrib/kubernetes/Delete_object/component.yaml | name: Delete Kubernetes object
inputs:
- {name: Name, type: String}
- {name: Kind, type: String}
outputs:
- {name: Name, type: String}
- {name: Kind, type: String}
metadata:
annotations:
author: Alexey Volkov <[email protected]>
canonical_location: 'https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/kubernetes/Delete_object/component.yaml'
implementation:
container:
image: bitnami/kubectl:1.17.17
command:
- bash
- -exc
- |
object_name=$0
object_type=$1
output_name_path=$2
output_kind_path=$3
mkdir -p "$(dirname "$output_name_path")"
mkdir -p "$(dirname "$output_kind_path")"
typed_object_name=$(kubectl delete "$object_type" "$object_name" --output=name)
echo "${typed_object_name##*/}" >"$output_name_path"
echo "${typed_object_name%/*}" >"$output_kind_path"
- {inputValue: Name}
- {inputValue: Kind}
- {outputPath: Name}
- {outputPath: Kind}
| 585 |
0 | kubeflow_public_repos/pipelines/components/contrib/kubernetes | kubeflow_public_repos/pipelines/components/contrib/kubernetes/Get_object/component.yaml | name: Get Kubernetes object
inputs:
- {name: Name, type: String}
- {name: Kind, type: String}
outputs:
- {name: Object, type: JsonObject}
metadata:
annotations:
author: Alexey Volkov <[email protected]>
canonical_location: 'https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/kubernetes/Get_object/component.yaml'
implementation:
container:
image: bitnami/kubectl:1.17.17
command:
- bash
- -exc
- |
object_name=$0
object_type=$1
output_object_path=$2
mkdir -p "$(dirname "$output_object_path")"
kubectl get "$object_type" "$object_name" --output=json >"$output_object_path"
- {inputValue: Name}
- {inputValue: Kind}
- {outputPath: Object}
| 586 |
0 | kubeflow_public_repos/pipelines/components/contrib/kubernetes | kubeflow_public_repos/pipelines/components/contrib/kubernetes/Create_PersistentVolumeClaim/component.yaml | name: Create PersistentVolumeClaim in Kubernetes
inputs:
- {name: Name, type: String}
- {name: Storage size, type: String, default: 1Gi}
- {name: Namespace, type: String, default: default}
outputs:
- {name: Name, type: String}
metadata:
annotations:
author: Alexey Volkov <[email protected]>
canonical_location: 'https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/kubernetes/Create_PersistentVolumeClaim/component.yaml'
implementation:
container:
image: bitnami/kubectl:1.17.17
command:
- bash
- -exc
- |
name=$0
storage_size=$1
namespace=$2
output_name_path=$3
mkdir -p "$(dirname "$output_name_path")"
object_path=$(mktemp)
cat <<EOF >"$object_path"
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: $name
spec:
#storageClassName: standard
accessModes:
- ReadWriteOnce
resources:
requests:
storage: $storage_size
EOF
object_name=$(kubectl apply -f "$object_path" --namespace $namespace --output=name)
object_name=${object_name##persistentvolumeclaim/}
echo "$object_name" >"$output_name_path"
- {inputValue: Name}
- {inputValue: Storage size}
- {inputValue: Namespace}
- {outputPath: Name}
| 587 |
0 | kubeflow_public_repos/pipelines/components/contrib/_converters/TensorflowLiteModel | kubeflow_public_repos/pipelines/components/contrib/_converters/TensorflowLiteModel/from_TensorflowSavedModel/component.yaml | name: Convert Tensorflow SavedModel to Tensorflow Lite model
inputs:
- {name: Model, type: TensorflowSavedModel}
outputs:
- {name: Model, type: TensorflowLiteModel}
metadata:
annotations:
author: Alexey Volkov <[email protected]>
canonical_location: 'https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/_converters/TensorflowLiteModel/from_TensorflowSavedModel/component.yaml'
implementation:
container:
image: tensorflow/tensorflow:2.3.0
command:
- sh
- -exc
- |
model_path="$0"
output_model_path="$1"
mkdir -p "$(dirname "$output_model_path")"
tflite_convert --saved_model_dir "$model_path" --output_file "$output_model_path"
- {inputPath: Model}
- {outputPath: Model}
| 588 |
0 | kubeflow_public_repos/pipelines/components/contrib/_converters/TensorflowLiteModel | kubeflow_public_repos/pipelines/components/contrib/_converters/TensorflowLiteModel/from_KerasModelHdf5/component.yaml | name: Convert Keras HDF5 model to Tensorflow Lite model
inputs:
- {name: Model, type: KerasModelHdf5}
outputs:
- {name: Model, type: TensorflowLiteModel}
metadata:
annotations:
author: Alexey Volkov <[email protected]>
canonical_location: 'https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/_converters/TensorflowLiteModel/from_KerasModelHdf5/component.yaml'
implementation:
container:
image: tensorflow/tensorflow:2.3.0
command:
- sh
- -exc
- |
model_path="$0"
output_model_path="$1"
mkdir -p "$(dirname "$output_model_path")"
tflite_convert --keras_model_file "$model_path" --output_file "$output_model_path"
- {inputPath: Model}
- {outputPath: Model}
| 589 |
0 | kubeflow_public_repos/pipelines/components/contrib/_converters/KerasModelHdf5 | kubeflow_public_repos/pipelines/components/contrib/_converters/KerasModelHdf5/to_TensorflowSavedModel/component.py | from kfp.components import create_component_from_func, InputPath, OutputPath
def keras_convert_hdf5_model_to_tf_saved_model(
model_path: InputPath('KerasModelHdf5'),
converted_model_path: OutputPath('TensorflowSavedModel'),
):
'''Converts Keras HDF5 model to Tensorflow SavedModel format.
Args:
model_path: Keras model in HDF5 format.
converted_model_path: Keras model in Tensorflow SavedModel format.
Annotations:
author: Alexey Volkov <[email protected]>
'''
from pathlib import Path
from tensorflow import keras
model = keras.models.load_model(filepath=model_path)
keras.models.save_model(model=model, filepath=converted_model_path, save_format='tf')
if __name__ == '__main__':
keras_convert_hdf5_model_to_tf_saved_model_op = create_component_from_func(
keras_convert_hdf5_model_to_tf_saved_model,
base_image='tensorflow/tensorflow:2.3.0',
packages_to_install=['h5py==2.10.0'],
output_component_file='component.yaml',
annotations={
"author": "Alexey Volkov <[email protected]>",
"canonical_location": "https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/_converters/KerasModelHdf5/to_TensorflowSavedModel/component.yaml",
},
)
| 590 |
0 | kubeflow_public_repos/pipelines/components/contrib/_converters/KerasModelHdf5 | kubeflow_public_repos/pipelines/components/contrib/_converters/KerasModelHdf5/to_TensorflowSavedModel/component.yaml | name: Keras convert hdf5 model to tf saved model
description: Converts Keras HDF5 model to Tensorflow SavedModel format.
inputs:
- {name: model, type: KerasModelHdf5, description: Keras model in HDF5 format.}
outputs:
- {name: converted_model, type: TensorflowSavedModel, description: Keras model in Tensorflow SavedModel format.}
metadata:
annotations:
author: Alexey Volkov <[email protected]>
canonical_location: 'https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/_converters/KerasModelHdf5/to_TensorflowSavedModel/component.yaml'
implementation:
container:
image: tensorflow/tensorflow:2.3.0
command:
- sh
- -c
- (PIP_DISABLE_PIP_VERSION_CHECK=1 python3 -m pip install --quiet --no-warn-script-location
'h5py==2.10.0' || PIP_DISABLE_PIP_VERSION_CHECK=1 python3 -m pip install --quiet
--no-warn-script-location 'h5py==2.10.0' --user) && "$0" "$@"
- python3
- -u
- -c
- |
def _make_parent_dirs_and_return_path(file_path: str):
import os
os.makedirs(os.path.dirname(file_path), exist_ok=True)
return file_path
def keras_convert_hdf5_model_to_tf_saved_model(
model_path,
converted_model_path,
):
'''Converts Keras HDF5 model to Tensorflow SavedModel format.
Args:
model_path: Keras model in HDF5 format.
converted_model_path: Keras model in Tensorflow SavedModel format.
Annotations:
author: Alexey Volkov <[email protected]>
'''
from pathlib import Path
from tensorflow import keras
model = keras.models.load_model(filepath=model_path)
keras.models.save_model(model=model, filepath=converted_model_path, save_format='tf')
import argparse
_parser = argparse.ArgumentParser(prog='Keras convert hdf5 model to tf saved model', description='Converts Keras HDF5 model to Tensorflow SavedModel format.')
_parser.add_argument("--model", dest="model_path", type=str, required=True, default=argparse.SUPPRESS)
_parser.add_argument("--converted-model", dest="converted_model_path", type=_make_parent_dirs_and_return_path, required=True, default=argparse.SUPPRESS)
_parsed_args = vars(_parser.parse_args())
_outputs = keras_convert_hdf5_model_to_tf_saved_model(**_parsed_args)
args:
- --model
- {inputPath: model}
- --converted-model
- {outputPath: converted_model}
| 591 |
0 | kubeflow_public_repos/pipelines/components/contrib/_converters/TensorflowJSLayersModel | kubeflow_public_repos/pipelines/components/contrib/_converters/TensorflowJSLayersModel/from_TensorflowSavedModel/component.yaml | name: Convert Keras SavedModel to Tensorflow JS LayersModel
inputs:
- {name: Model, type: TensorflowSavedModel}
outputs:
- {name: Model, type: TensorflowJSLayersModel}
metadata:
annotations:
author: Alexey Volkov <[email protected]>
canonical_location: 'https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/_converters/TensorflowJSLayersModel/from_TensorflowSavedModel/component.yaml'
implementation:
container:
image: tensorflow/tensorflow:2.3.0
command:
- sh
- -exc
- |
# Manually installing prerequisites so that tensorflowjs does not re-install tensorflow-cpu on top of tensorflow. See https://github.com/tensorflow/tfjs/issues/3953
python3 -m pip install --quiet 'h5py>=2.8.0' 'numpy>=1.16.4,<1.19.0' 'six>=1.12.0' 'tensorflow-hub==0.7.0' 'PyInquirer==1.0.3'
python3 -m pip install --quiet tensorflowjs==2.4.0 --no-dependencies
"$0" "$*"
- tensorflowjs_converter
- --input_format=keras_saved_model
- --output_format=tfjs_layers_model
- inputPath: Model
- outputPath: Model
| 592 |
0 | kubeflow_public_repos/pipelines/components/contrib/_converters/TensorflowJSLayersModel | kubeflow_public_repos/pipelines/components/contrib/_converters/TensorflowJSLayersModel/from_KerasModelHdf5/component.yaml | name: Convert Keras HDF5 model to Tensorflow JS LayersModel
inputs:
- {name: Model, type: KerasModelHdf5}
outputs:
- {name: Model, type: TensorflowJSLayersModel}
metadata:
annotations:
author: Alexey Volkov <[email protected]>
canonical_location: 'https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/_converters/TensorflowJSLayersModel/from_KerasModelHdf5/component.yaml'
implementation:
container:
image: tensorflow/tensorflow:2.3.0
command:
- sh
- -exc
- |
# Manually installing prerequisites so that tensorflowjs does not re-install tensorflow-cpu on top of tensorflow. See https://github.com/tensorflow/tfjs/issues/3953
python3 -m pip install --quiet 'h5py>=2.8.0' 'numpy>=1.16.4,<1.19.0' 'six>=1.12.0' 'tensorflow-hub==0.7.0' 'PyInquirer==1.0.3'
python3 -m pip install --quiet tensorflowjs==2.4.0 --no-dependencies
"$0" "$*"
- tensorflowjs_converter
- --input_format=keras
- --output_format=tfjs_layers_model
- inputPath: Model
- outputPath: Model
| 593 |
0 | kubeflow_public_repos/pipelines/components/contrib/_converters/OnnxModel | kubeflow_public_repos/pipelines/components/contrib/_converters/OnnxModel/from_TensorflowSavedModel/component.yaml | name: To ONNX from Tensorflow SavedModel
inputs:
- {name: Model, type: TensorflowSavedModel}
outputs:
- {name: Model, type: OnnxModel}
metadata:
annotations:
author: Alexey Volkov <[email protected]>
canonical_location: 'https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/_converters/OnnxModel/from_TensorflowSavedModel/component.yaml'
implementation:
container:
image: tensorflow/tensorflow:2.3.0
command:
- sh
- -exc
- python3 -m pip install tf2onnx==1.6.3 && "$0" "$@"
- python3
- -m
- tf2onnx.convert
- --saved-model
- {inputPath: Model}
- --output
- {outputPath: Model}
- --fold_const
- --verbose
| 594 |
0 | kubeflow_public_repos/pipelines/components/contrib/_converters/OnnxModel | kubeflow_public_repos/pipelines/components/contrib/_converters/OnnxModel/from_KerasModelHdf5/component.yaml | name: To ONNX from Keras HDF5 model
inputs:
- {name: Model, type: KerasModelHdf5}
outputs:
- {name: Model, type: OnnxModel}
metadata:
annotations:
author: Alexey Volkov <[email protected]>
canonical_location: 'https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/_converters/OnnxModel/from_KerasModelHdf5/component.yaml'
implementation:
container:
image: tensorflow/tensorflow:2.3.0
command:
- sh
- -exc
- python3 -m pip install tf2onnx==1.6.3 && "$0" "$@"
- python3
- -m
- tf2onnx.convert
- --keras
- {inputPath: Model}
- --output
- {outputPath: Model}
- --fold_const
- --verbose
| 595 |
0 | kubeflow_public_repos/pipelines/components/contrib/_converters/OnnxModel | kubeflow_public_repos/pipelines/components/contrib/_converters/OnnxModel/to_TensorflowSavedModel/component.py | from kfp.components import create_component_from_func, InputPath, OutputPath
def convert_to_tensorflow_saved_model_from_onnx_model(
model_path: InputPath('OnnxModel'),
converted_model_path: OutputPath('TensorflowSavedModel'),
):
import onnx
import onnx_tf
onnx_model = onnx.load(model_path)
tf_rep = onnx_tf.backend.prepare(onnx_model)
tf_rep.export_graph(converted_model_path)
if __name__ == '__main__':
convert_to_tensorflow_saved_model_from_onnx_model_op = create_component_from_func(
convert_to_tensorflow_saved_model_from_onnx_model,
output_component_file='component.yaml',
base_image='tensorflow/tensorflow:2.4.1',
packages_to_install=['onnx-tf==1.7.0', 'onnx==1.8.0'], # onnx-tf==1.7.0 is not compatible with onnx==1.8.1
annotations={
"author": "Alexey Volkov <[email protected]>",
"canonical_location": "https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/_converters/OnnxModel/to_TensorflowSavedModel/component.yaml",
},
)
| 596 |
0 | kubeflow_public_repos/pipelines/components/contrib/_converters/OnnxModel | kubeflow_public_repos/pipelines/components/contrib/_converters/OnnxModel/to_TensorflowSavedModel/component.yaml | name: Convert to tensorflow saved model from onnx model
metadata:
annotations:
author: Alexey Volkov <[email protected]>
canonical_location: 'https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/_converters/OnnxModel/to_TensorflowSavedModel/component.yaml'
inputs:
- {name: model, type: OnnxModel}
outputs:
- {name: converted_model, type: TensorflowModel}
implementation:
container:
image: tensorflow/tensorflow:2.4.1
command:
- sh
- -c
- (PIP_DISABLE_PIP_VERSION_CHECK=1 python3 -m pip install --quiet --no-warn-script-location
'onnx-tf==1.7.0' 'onnx==1.8.0' || PIP_DISABLE_PIP_VERSION_CHECK=1 python3 -m
pip install --quiet --no-warn-script-location 'onnx-tf==1.7.0' 'onnx==1.8.0'
--user) && "$0" "$@"
- sh
- -ec
- |
program_path=$(mktemp)
printf "%s" "$0" > "$program_path"
python3 -u "$program_path" "$@"
- |
def _make_parent_dirs_and_return_path(file_path: str):
import os
os.makedirs(os.path.dirname(file_path), exist_ok=True)
return file_path
def convert_to_tensorflow_saved_model_from_onnx_model(
model_path,
converted_model_path,
):
import onnx
import onnx_tf
onnx_model = onnx.load(model_path)
tf_rep = onnx_tf.backend.prepare(onnx_model)
tf_rep.export_graph(converted_model_path)
import argparse
_parser = argparse.ArgumentParser(prog='Convert to tensorflow saved model from onnx model', description='')
_parser.add_argument("--model", dest="model_path", type=str, required=True, default=argparse.SUPPRESS)
_parser.add_argument("--converted-model", dest="converted_model_path", type=_make_parent_dirs_and_return_path, required=True, default=argparse.SUPPRESS)
_parsed_args = vars(_parser.parse_args())
_outputs = convert_to_tensorflow_saved_model_from_onnx_model(**_parsed_args)
args:
- --model
- {inputPath: model}
- --converted-model
- {outputPath: converted_model}
| 597 |
0 | kubeflow_public_repos/pipelines/components/contrib/_converters/ApacheParquet | kubeflow_public_repos/pipelines/components/contrib/_converters/ApacheParquet/to_CSV/component.py | from kfp.components import InputPath, OutputPath, create_component_from_func
def convert_apache_parquet_to_csv(
data_path: InputPath('ApacheParquet'),
output_data_path: OutputPath('CSV'),
):
'''Converts Apache Parquet to CSV.
[Apache Parquet](https://parquet.apache.org/)
Annotations:
author: Alexey Volkov <[email protected]>
'''
from pyarrow import parquet
data_frame = parquet.read_pandas(data_path).to_pandas()
data_frame.to_csv(
output_data_path,
index=False,
)
if __name__ == '__main__':
convert_apache_parquet_to_csv_op = create_component_from_func(
convert_apache_parquet_to_csv,
output_component_file='component.yaml',
base_image='python:3.7',
packages_to_install=['pyarrow==0.17.1', 'pandas==1.0.3'],
annotations={
"author": "Alexey Volkov <[email protected]>",
"canonical_location": "https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/_converters/ApacheParquet/to_CSV/component.yaml",
},
)
| 598 |
0 | kubeflow_public_repos/pipelines/components/contrib/_converters/ApacheParquet | kubeflow_public_repos/pipelines/components/contrib/_converters/ApacheParquet/to_CSV/component.yaml | name: Convert apache parquet to csv
description: |-
Converts Apache Parquet to CSV.
[Apache Parquet](https://parquet.apache.org/)
Annotations:
author: Alexey Volkov <[email protected]>
inputs:
- {name: data, type: ApacheParquet}
outputs:
- {name: output_data, type: CSV}
metadata:
annotations:
author: Alexey Volkov <[email protected]>
canonical_location: 'https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/_converters/ApacheParquet/to_CSV/component.yaml'
implementation:
container:
image: python:3.7
command:
- sh
- -c
- (PIP_DISABLE_PIP_VERSION_CHECK=1 python3 -m pip install --quiet --no-warn-script-location
'pyarrow==0.17.1' 'pandas==1.0.3' || PIP_DISABLE_PIP_VERSION_CHECK=1 python3
-m pip install --quiet --no-warn-script-location 'pyarrow==0.17.1' 'pandas==1.0.3'
--user) && "$0" "$@"
- python3
- -u
- -c
- |
def _make_parent_dirs_and_return_path(file_path: str):
import os
os.makedirs(os.path.dirname(file_path), exist_ok=True)
return file_path
def convert_apache_parquet_to_csv(
data_path,
output_data_path,
):
'''Converts Apache Parquet to CSV.
[Apache Parquet](https://parquet.apache.org/)
Annotations:
author: Alexey Volkov <[email protected]>
'''
from pyarrow import parquet
data_frame = parquet.read_pandas(data_path).to_pandas()
data_frame.to_csv(
output_data_path,
index=False,
)
import argparse
_parser = argparse.ArgumentParser(prog='Convert apache parquet to csv', description='Converts Apache Parquet to CSV.\n\n [Apache Parquet](https://parquet.apache.org/)\n\n Annotations:\n author: Alexey Volkov <[email protected]>')
_parser.add_argument("--data", dest="data_path", type=str, required=True, default=argparse.SUPPRESS)
_parser.add_argument("--output-data", dest="output_data_path", type=_make_parent_dirs_and_return_path, required=True, default=argparse.SUPPRESS)
_parsed_args = vars(_parser.parse_args())
_outputs = convert_apache_parquet_to_csv(**_parsed_args)
args:
- --data
- {inputPath: data}
- --output-data
- {outputPath: output_data}
| 599 |