index
int64 0
0
| repo_id
stringlengths 21
232
| file_path
stringlengths 34
259
| content
stringlengths 1
14.1M
| __index_level_0__
int64 0
10k
|
---|---|---|---|---|
0 | kubeflow_public_repos/kfp-tekton-backend/components/gcp/container/component_sdk/python/tests/google | kubeflow_public_repos/kfp-tekton-backend/components/gcp/container/component_sdk/python/tests/google/ml_engine/test__delete_version.py | # you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import unittest
from googleapiclient import errors
from kfp_component.google.ml_engine import delete_version
DELETE_VERSION_MODULE = 'kfp_component.google.ml_engine._delete_version'
@mock.patch(DELETE_VERSION_MODULE + '.gcp_common.dump_file')
@mock.patch(DELETE_VERSION_MODULE + '.KfpExecutionContext')
@mock.patch(DELETE_VERSION_MODULE + '.MLEngineClient')
class TestDeleteVersion(unittest.TestCase):
def test_execute_succeed(self, mock_mlengine_client,
mock_kfp_context, mock_dump_json):
mock_mlengine_client().get_version.return_value = {
'state': 'READY',
}
mock_mlengine_client().delete_version.return_value = {
'name': 'mock_operation_name'
}
mock_mlengine_client().get_operation.return_value = {
'done': True
}
delete_version('projects/mock_project/models/mock_model/versions/mock_version',
wait_interval = 30)
mock_mlengine_client().delete_version.assert_called_once()
def test_execute_retry_succeed(self, mock_mlengine_client,
mock_kfp_context, mock_dump_json):
pending_version = {
'state': 'DELETING',
}
mock_mlengine_client().get_version.side_effect = [pending_version, None]
delete_version('projects/mock_project/models/mock_model/versions/mock_version',
wait_interval = 0)
self.assertEqual(2, mock_mlengine_client().get_version.call_count) | 8,300 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/gcp/container/component_sdk/python/tests/google | kubeflow_public_repos/kfp-tekton-backend/components/gcp/container/component_sdk/python/tests/google/ml_engine/__init__.py | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License. | 8,301 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/gcp/container/component_sdk/python/tests/google | kubeflow_public_repos/kfp-tekton-backend/components/gcp/container/component_sdk/python/tests/google/ml_engine/test__create_version.py | # you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import unittest
from googleapiclient import errors
from kfp_component.google.ml_engine import create_version
CREATE_VERSION_MODULE = 'kfp_component.google.ml_engine._create_version'
@mock.patch(CREATE_VERSION_MODULE + '.display.display')
@mock.patch(CREATE_VERSION_MODULE + '.gcp_common.dump_file')
@mock.patch(CREATE_VERSION_MODULE + '.KfpExecutionContext')
@mock.patch(CREATE_VERSION_MODULE + '.MLEngineClient')
class TestCreateVersion(unittest.TestCase):
def test_create_version_succeed(self, mock_mlengine_client,
mock_kfp_context, mock_dump_json, mock_display):
version = {
'description': 'the mock version'
}
mock_mlengine_client().get_version.return_value = None
mock_mlengine_client().create_version.return_value = {
'name': 'mock_operation_name'
}
mock_mlengine_client().get_operation.return_value = {
'done': True,
'response': version
}
result = create_version('projects/mock_project/models/mock_model',
deployemnt_uri = 'gs://test-location', version_id = 'mock_version',
version = version,
replace_existing = True)
self.assertEqual(version, result)
def test_create_version_fail(self, mock_mlengine_client,
mock_kfp_context, mock_dump_json, mock_display):
version = {
'name': 'mock_version',
'description': 'the mock version',
'deploymentUri': 'gs://test-location'
}
mock_mlengine_client().get_version.return_value = None
mock_mlengine_client().create_version.return_value = {
'name': 'mock_operation_name'
}
mock_mlengine_client().get_operation.return_value = {
'done': True,
'error': {
'code': 400,
'message': 'bad request'
}
}
with self.assertRaises(RuntimeError) as context:
create_version('projects/mock_project/models/mock_model',
version = version, replace_existing = True, wait_interval = 30)
self.assertEqual(
'Failed to complete create version operation mock_operation_name: 400 bad request',
str(context.exception))
def test_create_version_dup_version_succeed(self, mock_mlengine_client,
mock_kfp_context, mock_dump_json, mock_display):
version = {
'name': 'mock_version',
'description': 'the mock version',
'deploymentUri': 'gs://test-location'
}
pending_version = {
'state': 'CREATING'
}
pending_version.update(version)
ready_version = {
'state': 'READY'
}
ready_version.update(version)
mock_mlengine_client().get_version.side_effect = [
pending_version, ready_version]
result = create_version('projects/mock_project/models/mock_model', version = version,
replace_existing = True, wait_interval = 0)
self.assertEqual(ready_version, result)
def test_create_version_failed_state(self, mock_mlengine_client,
mock_kfp_context, mock_dump_json, mock_display):
version = {
'name': 'mock_version',
'description': 'the mock version',
'deploymentUri': 'gs://test-location'
}
pending_version = {
'state': 'CREATING'
}
pending_version.update(version)
failed_version = {
'state': 'FAILED',
'errorMessage': 'something bad happens'
}
failed_version.update(version)
mock_mlengine_client().get_version.side_effect = [
pending_version, failed_version]
with self.assertRaises(RuntimeError) as context:
create_version('projects/mock_project/models/mock_model', version = version,
replace_existing = True, wait_interval = 0)
self.assertEqual(
'Version is in failed state: something bad happens',
str(context.exception))
def test_create_version_conflict_version_replace_succeed(self, mock_mlengine_client,
mock_kfp_context, mock_dump_json, mock_display):
version = {
'name': 'mock_version',
'description': 'the mock version',
'deploymentUri': 'gs://test-location'
}
conflicting_version = {
'name': 'mock_version',
'description': 'the changed mock version',
'deploymentUri': 'gs://changed-test-location',
'state': 'READY'
}
mock_mlengine_client().get_version.return_value = conflicting_version
mock_mlengine_client().delete_version.return_value = {
'name': 'delete_operation_name'
}
mock_mlengine_client().create_version.return_value = {
'name': 'create_operation_name'
}
delete_operation = { 'response': {}, 'done': True }
create_operation = { 'response': version, 'done': True }
mock_mlengine_client().get_operation.side_effect = [
delete_operation,
create_operation
]
result = create_version('projects/mock_project/models/mock_model', version = version,
replace_existing = True, wait_interval = 0)
self.assertEqual(version, result)
def test_create_version_conflict_version_delete_fail(self, mock_mlengine_client,
mock_kfp_context, mock_dump_json, mock_display):
version = {
'name': 'mock_version',
'description': 'the mock version',
'deploymentUri': 'gs://test-location'
}
conflicting_version = {
'name': 'mock_version',
'description': 'the changed mock version',
'deploymentUri': 'gs://changed-test-location',
'state': 'READY'
}
mock_mlengine_client().get_version.return_value = conflicting_version
mock_mlengine_client().delete_version.return_value = {
'name': 'delete_operation_name'
}
delete_operation = {
'done': True,
'error': {
'code': 400,
'message': 'bad request'
}
}
mock_mlengine_client().get_operation.return_value = delete_operation
with self.assertRaises(RuntimeError) as context:
create_version('projects/mock_project/models/mock_model', version = version,
replace_existing = True, wait_interval = 0)
self.assertEqual(
'Failed to complete delete version operation delete_operation_name: 400 bad request',
str(context.exception))
def test_create_version_conflict_version_fail(self, mock_mlengine_client,
mock_kfp_context, mock_dump_json, mock_display):
version = {
'name': 'mock_version',
'description': 'the mock version',
'deploymentUri': 'gs://test-location'
}
conflicting_version = {
'name': 'mock_version',
'description': 'the changed mock version',
'deploymentUri': 'gs://changed-test-location',
'state': 'READY'
}
mock_mlengine_client().get_version.return_value = conflicting_version
with self.assertRaises(RuntimeError) as context:
create_version('projects/mock_project/models/mock_model', version = version,
replace_existing = False, wait_interval = 0)
self.assertEqual(
'Existing version conflicts with the name of the new version.',
str(context.exception)) | 8,302 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/gcp/container/component_sdk/python/tests/google | kubeflow_public_repos/kfp-tekton-backend/components/gcp/container/component_sdk/python/tests/google/ml_engine/test__create_job.py |
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import unittest
from googleapiclient import errors
from kfp_component.google.ml_engine import create_job
CREATE_JOB_MODULE = 'kfp_component.google.ml_engine._create_job'
COMMON_OPS_MODEL = 'kfp_component.google.ml_engine._common_ops'
@mock.patch(COMMON_OPS_MODEL + '.display.display')
@mock.patch(COMMON_OPS_MODEL + '.gcp_common.dump_file')
@mock.patch(CREATE_JOB_MODULE + '.KfpExecutionContext')
@mock.patch(CREATE_JOB_MODULE + '.MLEngineClient')
class TestCreateJob(unittest.TestCase):
def test_create_job_succeed(self, mock_mlengine_client,
mock_kfp_context, mock_dump_json, mock_display):
mock_kfp_context().__enter__().context_id.return_value = 'ctx1'
job = {}
returned_job = {
'jobId': 'job_ctx1',
'state': 'SUCCEEDED'
}
mock_mlengine_client().get_job.return_value = (
returned_job)
result = create_job('mock_project', job)
self.assertEqual(returned_job, result)
mock_mlengine_client().create_job.assert_called_with(
project_id = 'mock_project',
job = {
'jobId': 'job_ctx1'
}
)
def test_create_job_with_job_id_prefix_succeed(self, mock_mlengine_client,
mock_kfp_context, mock_dump_json, mock_display):
mock_kfp_context().__enter__().context_id.return_value = 'ctx1'
job = {}
returned_job = {
'jobId': 'mock_job_ctx1',
'state': 'SUCCEEDED'
}
mock_mlengine_client().get_job.return_value = (
returned_job)
result = create_job('mock_project', job, job_id_prefix='mock_job_')
self.assertEqual(returned_job, result)
mock_mlengine_client().create_job.assert_called_with(
project_id = 'mock_project',
job = {
'jobId': 'mock_job_ctx1'
}
)
def test_execute_retry_job_success(self, mock_mlengine_client,
mock_kfp_context, mock_dump_json, mock_display):
mock_kfp_context().__enter__().context_id.return_value = 'ctx1'
job = {}
returned_job = {
'jobId': 'job_ctx1',
'state': 'SUCCEEDED'
}
mock_mlengine_client().create_job.side_effect = errors.HttpError(
resp = mock.Mock(status=409),
content = b'conflict'
)
mock_mlengine_client().get_job.return_value = returned_job
result = create_job('mock_project', job)
self.assertEqual(returned_job, result)
def test_create_job_use_context_id_as_name(self, mock_mlengine_client,
mock_kfp_context, mock_dump_json, mock_display):
context_id = 'ctx1'
job = {}
returned_job = {
'jobId': 'job_ctx1',
'state': 'SUCCEEDED'
}
mock_mlengine_client().get_job.return_value = (
returned_job)
mock_kfp_context().__enter__().context_id.return_value = context_id
create_job('mock_project', job)
mock_mlengine_client().create_job.assert_called_with(
project_id = 'mock_project',
job = {
'jobId': 'job_ctx1'
}
)
def test_execute_conflict_fail(self, mock_mlengine_client,
mock_kfp_context, mock_dump_json, mock_display):
mock_kfp_context().__enter__().context_id.return_value = 'ctx1'
job = {}
returned_job = {
'jobId': 'job_ctx1',
'trainingInput': {
'modelDir': 'test'
},
'state': 'SUCCEEDED'
}
mock_mlengine_client().create_job.side_effect = errors.HttpError(
resp = mock.Mock(status=409),
content = b'conflict'
)
mock_mlengine_client().get_job.return_value = returned_job
with self.assertRaises(errors.HttpError) as context:
create_job('mock_project', job)
self.assertEqual(409, context.exception.resp.status)
def test_execute_create_job_fail(self, mock_mlengine_client,
mock_kfp_context, mock_dump_json, mock_display):
mock_kfp_context().__enter__().context_id.return_value = 'ctx1'
job = {}
mock_mlengine_client().create_job.side_effect = errors.HttpError(
resp = mock.Mock(status=400),
content = b'bad request'
)
with self.assertRaises(errors.HttpError) as context:
create_job('mock_project', job)
self.assertEqual(400, context.exception.resp.status)
def test_execute_job_status_fail(self, mock_mlengine_client,
mock_kfp_context, mock_dump_json, mock_display):
mock_kfp_context().__enter__().context_id.return_value = 'ctx1'
job = {}
returned_job = {
'jobId': 'mock_job',
'trainingInput': {
'modelDir': 'test'
},
'state': 'FAILED'
}
mock_mlengine_client().get_job.return_value = returned_job
with self.assertRaises(RuntimeError):
create_job('mock_project', job)
def test_cancel_succeed(self, mock_mlengine_client,
mock_kfp_context, mock_dump_json, mock_display):
mock_kfp_context().__enter__().context_id.return_value = 'ctx1'
job = {}
returned_job = {
'jobId': 'job_ctx1',
'state': 'SUCCEEDED'
}
mock_mlengine_client().get_job.return_value = (
returned_job)
create_job('mock_project', job)
cancel_func = mock_kfp_context.call_args[1]['on_cancel']
cancel_func()
mock_mlengine_client().cancel_job.assert_called_with(
'mock_project', 'job_ctx1'
)
| 8,303 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/gcp/container/component_sdk/python/tests/google | kubeflow_public_repos/kfp-tekton-backend/components/gcp/container/component_sdk/python/tests/google/ml_engine/test__deploy.py | # you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import unittest
from googleapiclient import errors
from kfp_component.google.ml_engine import deploy
MODULE = 'kfp_component.google.ml_engine._deploy'
@mock.patch(MODULE + '.storage.Client')
@mock.patch(MODULE + '.create_model')
@mock.patch(MODULE + '.create_version')
@mock.patch(MODULE + '.set_default_version')
class TestDeploy(unittest.TestCase):
def test_deploy_default_path(self, mock_set_default_version, mock_create_version,
mock_create_model, mock_storage_client):
mock_storage_client().bucket().list_blobs().prefixes = []
mock_storage_client().bucket().list_blobs().__iter__.return_value = []
mock_create_model.return_value = {
'name': 'projects/mock-project/models/mock-model'
}
expected_version = {
'name': 'projects/mock-project/models/mock-model/version/mock-version'
}
mock_create_version.return_value = expected_version
result = deploy('gs://model/uri', 'mock-project')
self.assertEqual(expected_version, result)
mock_create_version.assert_called_with(
'projects/mock-project/models/mock-model',
'gs://model/uri',
None, # version_name
None, # runtime_version
None, # python_version
None, # version
False, # replace_existing_version
30)
def test_deploy_tf_exporter_path(self, mock_set_default_version, mock_create_version,
mock_create_model, mock_storage_client):
prefixes_mock = mock.PropertyMock()
prefixes_mock.return_value = set(['uri/012/', 'uri/123/'])
type(mock_storage_client().bucket().list_blobs()).prefixes = prefixes_mock
mock_storage_client().bucket().list_blobs().__iter__.return_value = []
mock_storage_client().bucket().name = 'model'
mock_create_model.return_value = {
'name': 'projects/mock-project/models/mock-model'
}
expected_version = {
'name': 'projects/mock-project/models/mock-model/version/mock-version'
}
mock_create_version.return_value = expected_version
result = deploy('gs://model/uri', 'mock-project')
self.assertEqual(expected_version, result)
mock_create_version.assert_called_with(
'projects/mock-project/models/mock-model',
'gs://model/uri/123/',
None, # version_name
None, # runtime_version
None, # python_version
None, # version
False, # replace_existing_version
30)
def test_deploy_set_default_version(self, mock_set_default_version, mock_create_version,
mock_create_model, mock_storage_client):
mock_storage_client().bucket().list_blobs().prefixes = []
mock_storage_client().bucket().list_blobs().__iter__.return_value = []
mock_create_model.return_value = {
'name': 'projects/mock-project/models/mock-model'
}
expected_version = {
'name': 'projects/mock-project/models/mock-model/version/mock-version'
}
mock_create_version.return_value = expected_version
mock_set_default_version.return_value = expected_version
result = deploy('gs://model/uri', 'mock-project', set_default=True)
self.assertEqual(expected_version, result)
mock_set_default_version.assert_called_with(
'projects/mock-project/models/mock-model/version/mock-version')
| 8,304 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/gcp/container/component_sdk/python/tests/google | kubeflow_public_repos/kfp-tekton-backend/components/gcp/container/component_sdk/python/tests/google/ml_engine/test__create_model.py | # you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import unittest
from googleapiclient import errors
from kfp_component.google.ml_engine import create_model
CREATE_MODEL_MODULE = 'kfp_component.google.ml_engine._create_model'
@mock.patch(CREATE_MODEL_MODULE + '.display.display')
@mock.patch(CREATE_MODEL_MODULE + '.gcp_common.dump_file')
@mock.patch(CREATE_MODEL_MODULE + '.KfpExecutionContext')
@mock.patch(CREATE_MODEL_MODULE + '.MLEngineClient')
class TestCreateModel(unittest.TestCase):
def test_create_model_succeed(self, mock_mlengine_client,
mock_kfp_context, mock_dump_json, mock_display):
model = {
'name': 'mock_model',
'description': 'the mock model'
}
mock_mlengine_client().create_model.return_value = model
result = create_model('mock_project', 'mock_model', model)
self.assertEqual(model, result)
def test_create_model_conflict_succeed(self, mock_mlengine_client,
mock_kfp_context, mock_dump_json, mock_display):
model = {
'name': 'mock_model',
'description': 'the mock model'
}
mock_mlengine_client().create_model.side_effect = errors.HttpError(
resp = mock.Mock(status=409),
content = b'conflict'
)
mock_mlengine_client().get_model.return_value = model
result = create_model('mock_project', 'mock_model', model)
self.assertEqual(model, result)
def test_create_model_conflict_fail(self, mock_mlengine_client,
mock_kfp_context, mock_dump_json, mock_display):
model = {
'name': 'mock_model',
'description': 'the mock model'
}
mock_mlengine_client().create_model.side_effect = errors.HttpError(
resp = mock.Mock(status=409),
content = b'conflict'
)
changed_model = {
'name': 'mock_model',
'description': 'the changed mock model'
}
mock_mlengine_client().get_model.return_value = changed_model
with self.assertRaises(errors.HttpError) as context:
create_model('mock_project', 'mock_model', model)
self.assertEqual(409, context.exception.resp.status)
def test_create_model_use_context_id_as_name(self, mock_mlengine_client,
mock_kfp_context, mock_dump_json, mock_display):
context_id = 'context1'
model = {}
returned_model = {
'name': 'model_' + context_id
}
mock_mlengine_client().create_model.return_value = returned_model
mock_kfp_context().__enter__().context_id.return_value = context_id
create_model('mock_project', model=model)
mock_mlengine_client().create_model.assert_called_with(
project_id = 'mock_project',
model = returned_model
) | 8,305 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/gcp/container/component_sdk/python/tests/google | kubeflow_public_repos/kfp-tekton-backend/components/gcp/container/component_sdk/python/tests/google/bigquery/test__query.py | # you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import unittest
from google.cloud import bigquery
from google.api_core import exceptions
from kfp_component.google.bigquery import query
CREATE_JOB_MODULE = 'kfp_component.google.bigquery._query'
@mock.patch(CREATE_JOB_MODULE + '.display.display')
@mock.patch(CREATE_JOB_MODULE + '.gcp_common.dump_file')
@mock.patch(CREATE_JOB_MODULE + '.KfpExecutionContext')
@mock.patch(CREATE_JOB_MODULE + '.bigquery.Client')
class TestQuery(unittest.TestCase):
def test_query_succeed(self, mock_client,
mock_kfp_context, mock_dump_json, mock_display):
mock_kfp_context().__enter__().context_id.return_value = 'ctx1'
mock_client().get_job.side_effect = exceptions.NotFound('not found')
mock_dataset = bigquery.DatasetReference('project-1', 'dataset-1')
mock_client().dataset.return_value = mock_dataset
mock_client().get_dataset.side_effect = exceptions.NotFound('not found')
mock_response = {
'configuration': {
'query': {
'query': 'SELECT * FROM table_1'
}
}
}
mock_client().query.return_value.to_api_repr.return_value = mock_response
result = query('SELECT * FROM table_1', 'project-1', 'dataset-1',
output_gcs_path='gs://output/path')
self.assertEqual(mock_response, result)
mock_client().create_dataset.assert_called()
expected_job_config = bigquery.QueryJobConfig()
expected_job_config.create_disposition = bigquery.job.CreateDisposition.CREATE_IF_NEEDED
expected_job_config.write_disposition = bigquery.job.WriteDisposition.WRITE_TRUNCATE
expected_job_config.destination = mock_dataset.table('query_ctx1')
mock_client().query.assert_called_with('SELECT * FROM table_1',mock.ANY,
job_id = 'query_ctx1')
actual_job_config = mock_client().query.call_args_list[0][0][1]
self.assertDictEqual(
expected_job_config.to_api_repr(),
actual_job_config.to_api_repr()
)
mock_client().extract_table.assert_called_with(
mock_dataset.table('query_ctx1'),
'gs://output/path')
def test_query_no_output_path(self, mock_client,
mock_kfp_context, mock_dump_json, mock_display):
mock_kfp_context().__enter__().context_id.return_value = 'ctx1'
mock_client().get_job.side_effect = exceptions.NotFound('not found')
mock_dataset = bigquery.DatasetReference('project-1', 'dataset-1')
mock_client().dataset.return_value = mock_dataset
mock_client().get_dataset.return_value = bigquery.Dataset(mock_dataset)
mock_response = {
'configuration': {
'query': {
'query': 'SELECT * FROM table_1'
}
}
}
mock_client().query.return_value.to_api_repr.return_value = mock_response
result = query('SELECT * FROM table_1', 'project-1', 'dataset-1', 'table-1')
self.assertEqual(mock_response, result)
mock_client().create_dataset.assert_not_called()
mock_client().extract_table.assert_not_called()
expected_job_config = bigquery.QueryJobConfig()
expected_job_config.create_disposition = bigquery.job.CreateDisposition.CREATE_IF_NEEDED
expected_job_config.write_disposition = bigquery.job.WriteDisposition.WRITE_TRUNCATE
expected_job_config.destination = mock_dataset.table('table-1')
mock_client().query.assert_called_with('SELECT * FROM table_1',mock.ANY,
job_id = 'query_ctx1')
actual_job_config = mock_client().query.call_args_list[0][0][1]
self.assertDictEqual(
expected_job_config.to_api_repr(),
actual_job_config.to_api_repr()
)
| 8,306 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/gcp/container/component_sdk/python/tests/google | kubeflow_public_repos/kfp-tekton-backend/components/gcp/container/component_sdk/python/tests/google/dataflow/__init__.py | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License. | 8,307 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/gcp/container/component_sdk/python/tests/google | kubeflow_public_repos/kfp-tekton-backend/components/gcp/container/component_sdk/python/tests/google/dataflow/test__launch_template.py | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import unittest
import os
from kfp_component.google.dataflow import launch_template
MODULE = 'kfp_component.google.dataflow._launch_template'
@mock.patch(MODULE + '.storage')
@mock.patch('kfp_component.google.dataflow._common_ops.display')
@mock.patch(MODULE + '.KfpExecutionContext')
@mock.patch(MODULE + '.DataflowClient')
class LaunchTemplateTest(unittest.TestCase):
def test_launch_template_succeed(self, mock_client, mock_context, mock_display,
mock_storage):
mock_context().__enter__().context_id.return_value = 'context-1'
mock_storage.Client().bucket().blob().exists.return_value = False
mock_client().launch_template.return_value = {
'job': { 'id': 'job-1' }
}
expected_job = {
'id': 'job-1',
'currentState': 'JOB_STATE_DONE'
}
mock_client().get_job.return_value = expected_job
result = launch_template('project-1', 'gs://foo/bar', {
"parameters": {
"foo": "bar"
},
"environment": {
"zone": "us-central1"
}
}, staging_dir='gs://staging/dir')
self.assertEqual(expected_job, result)
mock_client().launch_template.assert_called_once()
mock_storage.Client().bucket().blob().upload_from_string.assert_called_with(
'job-1,'
)
def test_launch_template_retry_succeed(self,
mock_client, mock_context, mock_display, mock_storage):
mock_context().__enter__().context_id.return_value = 'ctx-1'
mock_storage.Client().bucket().blob().exists.return_value = True
mock_storage.Client().bucket().blob().download_as_string.return_value = b'job-1,'
pending_job = {
'currentState': 'JOB_STATE_PENDING'
}
expected_job = {
'id': 'job-1',
'currentState': 'JOB_STATE_DONE'
}
mock_client().get_job.side_effect = [pending_job, expected_job]
result = launch_template('project-1', 'gs://foo/bar', {
"parameters": {
"foo": "bar"
},
"environment": {
"zone": "us-central1"
}
}, staging_dir='gs://staging/dir', wait_interval=0)
self.assertEqual(expected_job, result)
mock_client().launch_template.assert_not_called()
def test_launch_template_fail(self, mock_client, mock_context, mock_display,
mock_storage):
mock_context().__enter__().context_id.return_value = 'context-1'
mock_storage.Client().bucket().blob().exists.return_value = False
mock_client().launch_template.return_value = {
'job': { 'id': 'job-1' }
}
failed_job = {
'id': 'job-1',
'currentState': 'JOB_STATE_FAILED'
}
mock_client().get_job.return_value = failed_job
self.assertRaises(RuntimeError,
lambda: launch_template('project-1', 'gs://foo/bar', {
"parameters": {
"foo": "bar"
},
"environment": {
"zone": "us-central1"
}
}))
| 8,308 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/gcp/container/component_sdk/python/tests/google | kubeflow_public_repos/kfp-tekton-backend/components/gcp/container/component_sdk/python/tests/google/dataflow/test__launch_python.py | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import unittest
import os
from kfp_component.google.dataflow import launch_python
MODULE = 'kfp_component.google.dataflow._launch_python'
@mock.patch(MODULE + '.storage')
@mock.patch('kfp_component.google.dataflow._common_ops.display')
@mock.patch(MODULE + '.stage_file')
@mock.patch(MODULE + '.KfpExecutionContext')
@mock.patch(MODULE + '.DataflowClient')
@mock.patch(MODULE + '.Process')
@mock.patch(MODULE + '.subprocess')
class LaunchPythonTest(unittest.TestCase):
def test_launch_python_succeed(self, mock_subprocess, mock_process,
mock_client, mock_context, mock_stage_file, mock_display, mock_storage):
mock_context().__enter__().context_id.return_value = 'ctx-1'
mock_storage.Client().bucket().blob().exists.return_value = False
mock_process().read_lines.return_value = [
b'https://console.cloud.google.com/dataflow/locations/us-central1/jobs/job-1?project=project-1'
]
expected_job = {
'id': 'job-1',
'currentState': 'JOB_STATE_DONE'
}
mock_client().get_job.return_value = expected_job
result = launch_python('/tmp/test.py', 'project-1', staging_dir='gs://staging/dir')
self.assertEqual(expected_job, result)
mock_storage.Client().bucket().blob().upload_from_string.assert_called_with(
'job-1,us-central1'
)
def test_launch_python_retry_succeed(self, mock_subprocess, mock_process,
mock_client, mock_context, mock_stage_file, mock_display, mock_storage):
mock_context().__enter__().context_id.return_value = 'ctx-1'
mock_storage.Client().bucket().blob().exists.return_value = True
mock_storage.Client().bucket().blob().download_as_string.return_value = b'job-1,us-central1'
expected_job = {
'id': 'job-1',
'currentState': 'JOB_STATE_DONE'
}
mock_client().get_job.return_value = expected_job
result = launch_python('/tmp/test.py', 'project-1', staging_dir='gs://staging/dir')
self.assertEqual(expected_job, result)
mock_process.assert_not_called()
def test_launch_python_no_job_created(self, mock_subprocess, mock_process,
mock_client, mock_context, mock_stage_file, mock_display, mock_storage):
mock_context().__enter__().context_id.return_value = 'ctx-1'
mock_process().read_lines.return_value = [
b'no job id',
b'no job id'
]
result = launch_python('/tmp/test.py', 'project-1')
self.assertEqual(None, result)
| 8,309 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/gcp/container/component_sdk/python/tests/google | kubeflow_public_repos/kfp-tekton-backend/components/gcp/container/component_sdk/python/tests/google/dataproc/test__submit_spark_job.py | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import unittest
from kfp_component.google.dataproc import submit_spark_job
MODULE = 'kfp_component.google.dataproc._submit_spark_job'
@mock.patch(MODULE + '.submit_job')
class TestSubmitSparkJob(unittest.TestCase):
def test_submit_spark_job_with_expected_payload(self, mock_submit_job):
submit_spark_job('mock-project', 'mock-region', 'mock-cluster',
main_jar_file_uri='gs://mock/jar/file.jar',
args=['arg1', 'arg2'],
spark_job={ 'jarFileUris': ['gs://other/jar/file.jar'] },
job={ 'labels': {'key1': 'value1'}})
mock_submit_job.assert_called_with('mock-project', 'mock-region', 'mock-cluster',
{
'sparkJob': {
'mainJarFileUri': 'gs://mock/jar/file.jar',
'args': ['arg1', 'arg2'],
'jarFileUris': ['gs://other/jar/file.jar']
},
'labels': {
'key1': 'value1'
}
}, 30) | 8,310 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/gcp/container/component_sdk/python/tests/google | kubeflow_public_repos/kfp-tekton-backend/components/gcp/container/component_sdk/python/tests/google/dataproc/test__submit_hadoop_job.py | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import unittest
from kfp_component.google.dataproc import submit_hadoop_job
MODULE = 'kfp_component.google.dataproc._submit_hadoop_job'
@mock.patch(MODULE + '.submit_job')
class TestSubmitHadoopJob(unittest.TestCase):
def test_submit_hadoop_job_with_expected_payload(self, mock_submit_job):
submit_hadoop_job('mock-project', 'mock-region', 'mock-cluster',
main_jar_file_uri='gs://mock/jar/file.jar',
args=['arg1', 'arg2'],
hadoop_job={ 'jarFileUris': ['gs://other/jar/file.jar'] },
job={ 'labels': {'key1': 'value1'}})
mock_submit_job.assert_called_with('mock-project', 'mock-region', 'mock-cluster',
{
'hadoopJob': {
'mainJarFileUri': 'gs://mock/jar/file.jar',
'args': ['arg1', 'arg2'],
'jarFileUris': ['gs://other/jar/file.jar']
},
'labels': {
'key1': 'value1'
}
}, 30) | 8,311 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/gcp/container/component_sdk/python/tests/google | kubeflow_public_repos/kfp-tekton-backend/components/gcp/container/component_sdk/python/tests/google/dataproc/test__delete_cluster.py | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import unittest
from googleapiclient import errors
from kfp_component.google.dataproc import delete_cluster
MODULE = 'kfp_component.google.dataproc._delete_cluster'
@mock.patch(MODULE + '.KfpExecutionContext')
@mock.patch(MODULE + '.DataprocClient')
class TestDeleteCluster(unittest.TestCase):
def test_delete_cluster_succeed(self, mock_client, mock_context):
mock_context().__enter__().context_id.return_value = 'ctx1'
delete_cluster('mock-project', 'mock-region', 'mock-cluster')
mock_client().delete_cluster.assert_called_with('mock-project',
'mock-region', 'mock-cluster', request_id='ctx1')
def test_delete_cluster_ignore_not_found(self, mock_client, mock_context):
mock_context().__enter__().context_id.return_value = 'ctx1'
mock_client().delete_cluster.side_effect = errors.HttpError(
resp = mock.Mock(status=404),
content = b'not found'
)
delete_cluster('mock-project', 'mock-region', 'mock-cluster')
| 8,312 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/gcp/container/component_sdk/python/tests/google | kubeflow_public_repos/kfp-tekton-backend/components/gcp/container/component_sdk/python/tests/google/dataproc/__init__.py | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License. | 8,313 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/gcp/container/component_sdk/python/tests/google | kubeflow_public_repos/kfp-tekton-backend/components/gcp/container/component_sdk/python/tests/google/dataproc/test__submit_pyspark_job.py | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import unittest
from kfp_component.google.dataproc import submit_pyspark_job
MODULE = 'kfp_component.google.dataproc._submit_pyspark_job'
@mock.patch(MODULE + '.submit_job')
class TestSubmitPySparkJob(unittest.TestCase):
def test_submit_pyspark_job_with_expected_payload(self, mock_submit_job):
submit_pyspark_job('mock-project', 'mock-region', 'mock-cluster',
main_python_file_uri='gs://mock/python/file.py', args=['arg1', 'arg2'],
pyspark_job={ 'pythonFileUris': ['gs://other/python/file.py'] },
job={ 'labels': {'key1': 'value1'}})
mock_submit_job.assert_called_with('mock-project', 'mock-region', 'mock-cluster',
{
'pysparkJob': {
'mainPythonFileUri': 'gs://mock/python/file.py',
'args': ['arg1', 'arg2'],
'pythonFileUris': ['gs://other/python/file.py']
},
'labels': {
'key1': 'value1'
}
}, 30) | 8,314 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/gcp/container/component_sdk/python/tests/google | kubeflow_public_repos/kfp-tekton-backend/components/gcp/container/component_sdk/python/tests/google/dataproc/test__submit_sparksql_job.py | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import unittest
from kfp_component.google.dataproc import submit_sparksql_job
MODULE = 'kfp_component.google.dataproc._submit_sparksql_job'
@mock.patch(MODULE + '.submit_job')
class TestSubmitSparkSqlJob(unittest.TestCase):
def test_submit_sparksql_job_with_expected_payload(self, mock_submit_job):
submit_sparksql_job('mock-project', 'mock-region', 'mock-cluster',
queries=['select * from mock_table'],
script_variables={'var-1': 'value1'},
sparksql_job={ 'jarFileUris': ['gs://jar/file.jar'] },
job={ 'labels': {'key1': 'value1'}})
mock_submit_job.assert_called_with('mock-project', 'mock-region', 'mock-cluster',
{
'sparkSqlJob': {
'queryList': { 'queries': [
'select * from mock_table'
]},
'scriptVariables': {'var-1': 'value1'},
'jarFileUris': ['gs://jar/file.jar']
},
'labels': {
'key1': 'value1'
}
}, 30) | 8,315 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/gcp/container/component_sdk/python/tests/google | kubeflow_public_repos/kfp-tekton-backend/components/gcp/container/component_sdk/python/tests/google/dataproc/test__submit_pig_job.py | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import unittest
from kfp_component.google.dataproc import submit_pig_job
MODULE = 'kfp_component.google.dataproc._submit_pig_job'
@mock.patch(MODULE + '.submit_job')
class TestSubmitPigJob(unittest.TestCase):
def test_submit_pig_job_with_expected_payload(self, mock_submit_job):
submit_pig_job('mock-project', 'mock-region', 'mock-cluster',
queries=['select * from mock_table'],
script_variables={'var-1': 'value1'},
pig_job={ 'continueOnFailure': True },
job={ 'labels': {'key1': 'value1'}})
mock_submit_job.assert_called_with('mock-project', 'mock-region', 'mock-cluster',
{
'pigJob': {
'queryList': { 'queries': [
'select * from mock_table'
]},
'scriptVariables': {'var-1': 'value1'},
'continueOnFailure': True
},
'labels': {
'key1': 'value1'
}
}, 30) | 8,316 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/gcp/container/component_sdk/python/tests/google | kubeflow_public_repos/kfp-tekton-backend/components/gcp/container/component_sdk/python/tests/google/dataproc/test__submit_hive_job.py | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import unittest
from kfp_component.google.dataproc import submit_hive_job
MODULE = 'kfp_component.google.dataproc._submit_hive_job'
@mock.patch(MODULE + '.submit_job')
class TestSubmitHiveJob(unittest.TestCase):
def test_submit_hive_job_with_expected_payload(self, mock_submit_job):
submit_hive_job('mock-project', 'mock-region', 'mock-cluster',
queries=['select * from mock_table'],
script_variables={'var-1': 'value1'},
hive_job={ 'continueOnFailure': True },
job={ 'labels': {'key1': 'value1'}})
mock_submit_job.assert_called_with('mock-project', 'mock-region', 'mock-cluster',
{
'hiveJob': {
'queryList': { 'queries': [
'select * from mock_table'
]},
'scriptVariables': {'var-1': 'value1'},
'continueOnFailure': True
},
'labels': {
'key1': 'value1'
}
}, 30) | 8,317 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/gcp/container/component_sdk/python/tests/google | kubeflow_public_repos/kfp-tekton-backend/components/gcp/container/component_sdk/python/tests/google/dataproc/test__submit_job.py | # you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import unittest
from kfp_component.google.dataproc import submit_job
MODULE = 'kfp_component.google.dataproc._submit_job'
@mock.patch(MODULE + '.display.display')
@mock.patch(MODULE + '.gcp_common.dump_file')
@mock.patch(MODULE + '.KfpExecutionContext')
@mock.patch(MODULE + '.DataprocClient')
class TestSubmitJob(unittest.TestCase):
def test_submit_job_succeed(self, mock_dataproc_client,
mock_kfp_context, mock_dump_json, mock_display):
mock_kfp_context().__enter__().context_id.return_value = 'ctx1'
job = {}
expected_job = {
'reference': {
'projectId': 'mock-project'
},
'placement': {
'clusterName': 'mock-cluster'
}
}
returned_job = {
'reference': {
'projectId': 'mock-project',
'jobId': 'mock-job'
},
'placement': {
'clusterName': 'mock-cluster'
},
'status': {
'state': 'DONE'
}
}
mock_dataproc_client().submit_job.return_value = returned_job
mock_dataproc_client().get_job.return_value = returned_job
result = submit_job('mock-project', 'mock-region', 'mock-cluster', job)
mock_dataproc_client().submit_job.assert_called_with('mock-project', 'mock-region',
expected_job, request_id='ctx1')
self.assertEqual(returned_job, result)
def test_submit_job_failed_with_error(self, mock_dataproc_client,
mock_kfp_context, mock_dump_json, mock_display):
mock_kfp_context().__enter__().context_id.return_value = 'ctx1'
job = {}
returned_job = {
'reference': {
'projectId': 'mock-project',
'jobId': 'mock-job'
},
'placement': {
'clusterName': 'mock-cluster'
},
'status': {
'state': 'ERROR',
'details': 'mock error'
}
}
mock_dataproc_client().submit_job.return_value = returned_job
mock_dataproc_client().get_job.return_value = returned_job
with self.assertRaises(RuntimeError):
submit_job('mock-project', 'mock-region', 'mock-cluster', job)
def test_cancel_succeed(self, mock_dataproc_client,
mock_kfp_context, mock_dump_json, mock_display):
mock_kfp_context().__enter__().context_id.return_value = 'ctx1'
job = {}
returned_job = {
'reference': {
'projectId': 'mock-project',
'jobId': 'mock-job'
},
'placement': {
'clusterName': 'mock-cluster'
},
'status': {
'state': 'DONE'
}
}
mock_dataproc_client().submit_job.return_value = returned_job
mock_dataproc_client().get_job.return_value = returned_job
submit_job('mock-project', 'mock-region', 'mock-cluster', job)
cancel_func = mock_kfp_context.call_args[1]['on_cancel']
cancel_func()
mock_dataproc_client().cancel_job.assert_called_with(
'mock-project',
'mock-region',
'mock-job'
) | 8,318 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/gcp/container/component_sdk/python/tests/google | kubeflow_public_repos/kfp-tekton-backend/components/gcp/container/component_sdk/python/tests/google/dataproc/test__create_cluster.py |
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import unittest
from kfp_component.google.dataproc import create_cluster
MODULE = 'kfp_component.google.dataproc._create_cluster'
@mock.patch(MODULE + '.display.display')
@mock.patch(MODULE + '.gcp_common.dump_file')
@mock.patch(MODULE + '.KfpExecutionContext')
@mock.patch(MODULE + '.DataprocClient')
class TestCreateCluster(unittest.TestCase):
def test_create_cluster_succeed(self, mock_dataproc_client,
mock_kfp_context, mock_dump_json, mock_display):
mock_kfp_context().__enter__().context_id.return_value = 'ctx1'
cluster = {
'projectId': 'mock_project',
'config': {},
'clusterName': 'cluster-ctx1'
}
mock_dataproc_client().wait_for_operation_done.return_value = (
{
'response': cluster
})
result = create_cluster('mock_project', 'mock-region')
self.assertEqual(cluster, result)
mock_dataproc_client().create_cluster.assert_called_with(
'mock_project',
'mock-region',
cluster,
request_id = 'ctx1')
def test_create_cluster_with_specs_succeed(self, mock_dataproc_client,
mock_kfp_context, mock_dump_json, mock_display):
mock_kfp_context().__enter__().context_id.return_value = 'ctx1'
cluster = {
'projectId': 'mock_project',
'config': {
'initializationActions': [
{
'executableFile': 'gs://action/1'
},
{
'executableFile': 'gs://action/2'
}
],
'configBucket': 'gs://config/bucket',
'softwareConfig': {
'imageVersion': '1.10'
},
},
'labels': {
'label-1': 'value-1'
},
'clusterName': 'test-cluster'
}
mock_dataproc_client().wait_for_operation_done.return_value = (
{
'response': cluster
})
result = create_cluster('mock_project', 'mock-region',
name='test-cluster',
initialization_actions=['gs://action/1', 'gs://action/2'],
config_bucket='gs://config/bucket',
image_version='1.10',
cluster={
'labels':{
'label-1': 'value-1'
}
})
self.assertEqual(cluster, result)
mock_dataproc_client().create_cluster.assert_called_with(
'mock_project',
'mock-region',
cluster,
request_id = 'ctx1')
def test_create_cluster_name_prefix_succeed(self, mock_dataproc_client,
mock_kfp_context, mock_dump_json, mock_display):
mock_kfp_context().__enter__().context_id.return_value = 'ctx1'
cluster = {
'projectId': 'mock_project',
'config': {},
'clusterName': 'my-cluster-ctx1'
}
mock_dataproc_client().wait_for_operation_done.return_value = (
{
'response': cluster
})
result = create_cluster('mock_project', 'mock-region',
name_prefix='my-cluster')
self.assertEqual(cluster, result)
mock_dataproc_client().create_cluster.assert_called_with(
'mock_project',
'mock-region',
cluster,
request_id = 'ctx1')
def test_cancel_succeed(self, mock_dataproc_client,
mock_kfp_context, mock_dump_json, mock_display):
mock_kfp_context().__enter__().context_id.return_value = 'ctx1'
operation = {
'name': 'mock_operation'
}
mock_dataproc_client().create_cluster.return_value = (
operation)
cluster = {
'projectId': 'mock_project',
'config': {},
'clusterName': 'my-cluster-ctx1'
}
mock_dataproc_client().wait_for_operation_done.return_value = (
{
'response': cluster
})
create_cluster('mock_project', 'mock-region')
cancel_func = mock_kfp_context.call_args[1]['on_cancel']
cancel_func()
mock_dataproc_client().cancel_operation.assert_called_with(
'mock_operation'
)
| 8,319 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/gcp/container/component_sdk/python/tests | kubeflow_public_repos/kfp-tekton-backend/components/gcp/container/component_sdk/python/tests/core/test__kfp_execution_context.py | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from kfp_component.core import KfpExecutionContext
from kubernetes import client, config
from kubernetes.client.rest import ApiException
import mock
import unittest
@mock.patch('kubernetes.config.load_incluster_config')
@mock.patch('kubernetes.client.CoreV1Api')
class KfpExecutionContextTest(unittest.TestCase):
def test_init_succeed_without_pod_name(self,
mock_k8s_client, mock_load_config):
with KfpExecutionContext() as ctx:
self.assertFalse(ctx.under_kfp_environment())
pass
@mock.patch.dict('os.environ', {
'KFP_POD_NAME': 'mock-pod-id'
})
def test_init_succeed_when_load_k8s_config_fail(self,
mock_k8s_client, mock_load_config):
mock_load_config.side_effect = Exception()
with KfpExecutionContext() as ctx:
self.assertFalse(ctx.under_kfp_environment())
pass
@mock.patch.dict('os.environ', {
'KFP_POD_NAME': 'mock-pod-id'
})
def test_init_succeed_when_load_k8s_client_fail(self,
mock_k8s_client, mock_load_config):
mock_k8s_client.side_effect = Exception()
with KfpExecutionContext() as ctx:
self.assertFalse(ctx.under_kfp_environment())
pass
@mock.patch.dict('os.environ', {
'KFP_POD_NAME': 'mock-pod-id'
})
def test_init_succeed_when_load_pod_fail(self,
mock_k8s_client, mock_load_config):
mock_k8s_client().read_namespaced_pod.side_effect = Exception()
with KfpExecutionContext() as ctx:
self.assertFalse(ctx.under_kfp_environment())
pass
@mock.patch.dict('os.environ', {
'KFP_POD_NAME': 'mock-pod-id'
})
def test_init_succeed_no_argo_node_name(self,
mock_k8s_client, mock_load_config):
mock_pod = mock_k8s_client().read_namespaced_pod.return_value
mock_pod.metadata.annotations = {}
with KfpExecutionContext() as ctx:
self.assertFalse(ctx.under_kfp_environment())
pass
@mock.patch.dict('os.environ', {
'KFP_POD_NAME': 'mock-pod-id',
'KFP_NAMESPACE': 'mock-namespace'
})
def test_init_succeed(self,
mock_k8s_client, mock_load_config):
mock_pod = mock_k8s_client().read_namespaced_pod.return_value
mock_pod.metadata.annotations = {
'workflows.argoproj.io/node-name': 'node-1'
}
with KfpExecutionContext() as ctx:
self.assertTrue(ctx.under_kfp_environment())
pass
mock_k8s_client().read_namespaced_pod.assert_called_with('mock-pod-id', 'mock-namespace')
@mock.patch.dict('os.environ', {
'KFP_POD_NAME': 'mock-pod-id'
})
def test__exit_gracefully_cancel(self,
mock_k8s_client, mock_load_config):
mock_pod = mock_k8s_client().read_namespaced_pod.return_value
mock_pod.metadata.annotations = {
'workflows.argoproj.io/node-name': 'node-1',
'workflows.argoproj.io/execution': '{"deadline": "1970-01-01T00:00:00Z"}'
}
cancel_handler = mock.Mock()
context = KfpExecutionContext(on_cancel=cancel_handler)
context._exit_gracefully(0, 0)
cancel_handler.assert_called_once()
@mock.patch.dict('os.environ', {
'KFP_POD_NAME': 'mock-pod-id'
})
def test__exit_gracefully_no_cancel(self,
mock_k8s_client, mock_load_config):
mock_pod = mock_k8s_client().read_namespaced_pod.return_value
mock_pod.metadata.annotations = {
'workflows.argoproj.io/node-name': 'node-1'
}
cancel_handler = mock.Mock()
context = KfpExecutionContext(on_cancel=cancel_handler)
context._exit_gracefully(0, 0)
cancel_handler.assert_not_called()
@mock.patch.dict('os.environ', {
'KFP_POD_NAME': 'mock-pod-id'
})
def test_context_id_stable_across_retries(self,
mock_k8s_client, mock_load_config):
mock_pod = mock_k8s_client().read_namespaced_pod.return_value
mock_pod.metadata.annotations = {
'workflows.argoproj.io/node-name': 'node-1'
}
ctx1 = KfpExecutionContext()
ctx2 = KfpExecutionContext()
self.assertEqual(ctx1.context_id(), ctx2.context_id()) | 8,320 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/gcp/container/component_sdk/python/tests | kubeflow_public_repos/kfp-tekton-backend/components/gcp/container/component_sdk/python/tests/core/__init__.py | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License. | 8,321 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/gcp/container/component_sdk/python/tests | kubeflow_public_repos/kfp-tekton-backend/components/gcp/container/component_sdk/python/tests/core/test__display.py | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from kfp_component.core import display
import mock
import unittest
@mock.patch('kfp_component.core._display.json')
@mock.patch('kfp_component.core._display.os')
@mock.patch('kfp_component.core._display.open')
class DisplayTest(unittest.TestCase):
def test_display_markdown(self, mock_open, mock_os, mock_json):
mock_os.path.isfile.return_value = False
display.display(display.Markdown('# test'))
mock_json.dump.assert_called_with({
'outputs': [{
'type': 'markdown',
'source': '# test',
'storage': 'inline'
}]
}, mock.ANY)
def test_display_markdown_append(self, mock_open, mock_os, mock_json):
mock_os.path.isfile.return_value = True
mock_json.load.return_value = {
'outputs': [{
'type': 'markdown',
'source': '# test 1',
'storage': 'inline'
}]
}
display.display(display.Markdown('# test 2'))
mock_json.dump.assert_called_with({
'outputs': [{
'type': 'markdown',
'source': '# test 1',
'storage': 'inline'
},{
'type': 'markdown',
'source': '# test 2',
'storage': 'inline'
}]
}, mock.ANY)
def test_display_tensorboard(self, mock_open, mock_os, mock_json):
mock_os.path.isfile.return_value = False
display.display(display.Tensorboard('gs://job/dir'))
mock_json.dump.assert_called_with({
'outputs': [{
'type': 'tensorboard',
'source': 'gs://job/dir'
}]
}, mock.ANY)
def test_display_link(self, mock_open, mock_os, mock_json):
mock_os.path.isfile.return_value = False
display.display(display.Link('https://test/link', 'Test Link'))
mock_json.dump.assert_called_with({
'outputs': [{
'type': 'markdown',
'source': '## [Test Link](https://test/link)',
'storage': 'inline'
}]
}, mock.ANY)
def test___repr__(self, mock_open, mock_os, mock_json):
self.assertEqual('# Title', str(display.Markdown('# Title')))
self.assertEqual('Open Tensorboard at: gs://trained/model/',
str(display.Tensorboard('gs://trained/model/')))
self.assertEqual('title: https://test/uri',
str(display.Link('https://test/uri', 'title')))
| 8,322 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/gcp/container/component_sdk/python | kubeflow_public_repos/kfp-tekton-backend/components/gcp/container/component_sdk/python/patches/http.patch | --- http.py 2019-05-03 15:07:52.591411824 -0700
+++ http_new.py 2019-05-03 15:09:23.470304022 -0700
@@ -1784,4 +1784,4 @@
http_timeout = socket.getdefaulttimeout()
else:
http_timeout = DEFAULT_HTTP_TIMEOUT_SEC
- return httplib2.Http(timeout=http_timeout)
+ return set_user_agent(httplib2.Http(timeout=http_timeout), '-kfpipeline-')
| 8,323 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/gcp/dataflow | kubeflow_public_repos/kfp-tekton-backend/components/gcp/dataflow/launch_python/README.md |
# Name
Component: Data preparation by executing an Apache Beam job in Cloud Dataflow
# Labels
Cloud Dataflow, Apache Beam, Kubeflow
# Summary
A Kubeflow pipeline component that prepares data by submitting an Apache Beam job (authored in Python) to Cloud Dataflow for execution. The Python Beam code is run with Cloud Dataflow Runner.
# Facets
<!--Make sure the asset has data for the following facets:
Use case
Technique
Input data type
ML workflow
The data must map to the acceptable values for these facets, as documented on the “taxonomy” sheet of go/aihub-facets
https://gitlab.aihub-content-external.com/aihubbot/kfp-components/commit/fe387ab46181b5d4c7425dcb8032cb43e70411c1
--->
Use case:
Other
Technique:
Other
Input data type:
Tabular
ML workflow:
Data preparation
# Details
## Intended use
Use this component to run a Python Beam code to submit a Cloud Dataflow job as a step of a Kubeflow pipeline.
## Runtime arguments
Name | Description | Optional | Data type| Accepted values | Default |
:--- | :----------| :----------| :----------| :----------| :---------- |
python_file_path | The path to the Cloud Storage bucket or local directory containing the Python file to be run. | - | GCSPath | - | - |
project_id | The ID of the Google Cloud Platform (GCP) project containing the Cloud Dataflow job.| -| GCPProjectID | -| -|
staging_dir | The path to the Cloud Storage directory where the staging files are stored. A random subdirectory will be created under the staging directory to keep the job information.This is done so that you can resume the job in case of failure. The command line arguments, `staging_location` and `temp_location`, of the Beam code are passed through `staging_dir`. | Yes | GCSPath | - | None |
requirements_file_path | The path to the Cloud Storage bucket or local directory containing the pip requirements file. | Yes | GCSPath | - | None |
args | The list of arguments to pass to the Python file. | No | List | A list of string arguments | None |
wait_interval | The number of seconds to wait between calls to get the status of the job. | Yes | Integer | - | 30 |
## Input data schema
Before you use the component, the following files must be ready in a Cloud Storage bucket:
- A Beam Python code file.
- A `requirements.txt` file which includes a list of dependent packages.
The Beam Python code should follow the [Beam programming guide](https://beam.apache.org/documentation/programming-guide/) as well as the following additional requirements to be compatible with this component:
- It accepts the command line arguments `--project`, `--temp_location`, `--staging_location`, which are [standard Dataflow Runner options](https://cloud.google.com/dataflow/docs/guides/specifying-exec-params#setting-other-cloud-pipeline-options).
- It enables `info logging` before the start of a Cloud Dataflow job in the Python code. This allows the component to track the status and ID of the job that is created. For example, calling `logging.getLogger().setLevel(logging.INFO)` before any other code.
## Output
Name | Description
:--- | :----------
job_id | The ID of the Cloud Dataflow job that is created.
## Cautions & requirements
To use the components, the following requirements must be met:
- Cloud Dataflow API is enabled.
- The component can authenticate to GCP. Refer to [Authenticating Pipelines to GCP](https://www.kubeflow.org/docs/gke/authentication-pipelines/) for details.
- The Kubeflow user service account is a member of:
- `roles/dataflow.developer` role of the project.
- `roles/storage.objectViewer` role of the Cloud Storage Objects `python_file_path` and `requirements_file_path`.
- `roles/storage.objectCreator` role of the Cloud Storage Object `staging_dir`.
## Detailed description
The component does several things during the execution:
- Downloads `python_file_path` and `requirements_file_path` to local files.
- Starts a subprocess to launch the Python program.
- Monitors the logs produced from the subprocess to extract the Cloud Dataflow job information.
- Stores the Cloud Dataflow job information in `staging_dir` so the job can be resumed in case of failure.
- Waits for the job to finish.
The steps to use the component in a pipeline are:
1. Install the Kubeflow pipeline's SDK:
```python
%%capture --no-stderr
KFP_PACKAGE = 'https://storage.googleapis.com/ml-pipeline/release/0.1.14/kfp.tar.gz'
!pip3 install $KFP_PACKAGE --upgrade
```
2. Load the component using the Kubeflow pipeline's SDK:
```python
import kfp.components as comp
dataflow_python_op = comp.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/01a23ae8672d3b18e88adf3036071496aca3552d/components/gcp/dataflow/launch_python/component.yaml')
help(dataflow_python_op)
```
### Sample
The following sample code works in an IPython notebook or directly in Python code. See the sample code below to learn how to execute the template.
In this sample, we run a wordcount sample code in a Kubeflow pipeline. The output will be stored in a Cloud Storage bucket. Here is the sample code:
```python
!gsutil cat gs://ml-pipeline-playground/samples/dataflow/wc/wc.py
```
Concepts:
1. Reading data from text files.
2. Specifying inline transforms.
3. Counting a PCollection.
4. Writing data to Cloud Storage as text files.
Notes:
To execute this pipeline locally, first edit the code to specify the output location. Output location could be a local file path or an output prefix on Cloud Storage. (Only update the output location marked with the first CHANGE comment in the following code.)
To execute this pipeline remotely, first edit the code to set your project ID, runner type, the staging location, the temp location, and the output location.
The specified Cloud Storage bucket(s) must already exist. (Update all the places marked with a CHANGE comment in the following code.)
Then, run the pipeline as described in the README. It will be deployed and run using the Cloud Dataflow service. No arguments are required to run the pipeline. You can see the results in your output bucket in the Cloud Storage browser.
```python
from __future__ import absolute_import
import argparse
import logging
import re
from past.builtins import unicode
import apache_beam as beam
from apache_beam.io import ReadFromText
from apache_beam.io import WriteToText
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.options.pipeline_options import SetupOptions
def run(argv=None):
"""Main entry point; defines and runs the wordcount pipeline."""
parser = argparse.ArgumentParser()
parser.add_argument('--input',
dest='input',
default='gs://dataflow-samples/shakespeare/kinglear.txt',
help='Input file to process.')
parser.add_argument('--output',
dest='output',
# CHANGE 1/5: The Cloud Storage path is required
# to output the results.
default='gs://YOUR_OUTPUT_BUCKET/AND_OUTPUT_PREFIX',
help='Output file to write results to.')
known_args, pipeline_args = parser.parse_known_args(argv)
# pipeline_args.extend([
# # CHANGE 2/5: (OPTIONAL) Change this to DataflowRunner to
# # run your pipeline on the Cloud Dataflow Service.
# '--runner=DirectRunner',
# # CHANGE 3/5: Your project ID is required in order to run your pipeline on
# # the Cloud Dataflow Service.
# '--project=SET_YOUR_PROJECT_ID_HERE',
# # CHANGE 4/5: Your Cloud Storage path is required for staging local
# # files.
# '--staging_location=gs://YOUR_BUCKET_NAME/AND_STAGING_DIRECTORY',
# # CHANGE 5/5: Your Cloud Storage path is required for temporary
# # files.
# '--temp_location=gs://YOUR_BUCKET_NAME/AND_TEMP_DIRECTORY',
# '--job_name=your-wordcount-job',
# ])
# We use the save_main_session option because one or more DoFn's in this
# workflow rely on global context (e.g., a module imported at module level).
pipeline_options = PipelineOptions(pipeline_args)
pipeline_options.view_as(SetupOptions).save_main_session = True
with beam.Pipeline(options=pipeline_options) as p:
# Read the text file[pattern] into a PCollection.
lines = p | ReadFromText(known_args.input)
# Count the occurrences of each word.
counts = (
lines
| 'Split' >> (beam.FlatMap(lambda x: re.findall(r'[A-Za-z\']+', x))
.with_output_types(unicode))
| 'PairWithOne' >> beam.Map(lambda x: (x, 1))
| 'GroupAndSum' >> beam.CombinePerKey(sum))
# Format the counts into a PCollection of strings.
def format_result(word_count):
(word, count) = word_count
return '%s: %s' % (word, count)
output = counts | 'Format' >> beam.Map(format_result)
# Write the output using a "Write" transform that has side effects.
# pylint: disable=expression-not-assigned
output | WriteToText(known_args.output)
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
run()
```
#### Set sample parameters
```python
# Required parameters
PROJECT_ID = '<Put your project ID here>'
GCS_STAGING_DIR = 'gs://<Put your GCS path here>' # No ending slash
```
```python
# Optional parameters
EXPERIMENT_NAME = 'Dataflow - Launch Python'
OUTPUT_FILE = '{}/wc/wordcount.out'.format(GCS_STAGING_DIR)
```
#### Example pipeline that uses the component
```python
import kfp.dsl as dsl
import json
@dsl.pipeline(
name='Dataflow launch python pipeline',
description='Dataflow launch python pipeline'
)
def pipeline(
python_file_path = 'gs://ml-pipeline-playground/samples/dataflow/wc/wc.py',
project_id = PROJECT_ID,
staging_dir = GCS_STAGING_DIR,
requirements_file_path = 'gs://ml-pipeline-playground/samples/dataflow/wc/requirements.txt',
args = json.dumps([
'--output', OUTPUT_FILE
]),
wait_interval = 30
):
dataflow_python_op(
python_file_path = python_file_path,
project_id = project_id,
staging_dir = staging_dir,
requirements_file_path = requirements_file_path,
args = args,
wait_interval = wait_interval)
```
#### Compile the pipeline
```python
pipeline_func = pipeline
pipeline_filename = pipeline_func.__name__ + '.zip'
import kfp.compiler as compiler
compiler.Compiler().compile(pipeline_func, pipeline_filename)
```
#### Submit the pipeline for execution
```python
#Specify values for the pipeline's arguments
arguments = {}
#Get or create an experiment
import kfp
client = kfp.Client()
experiment = client.create_experiment(EXPERIMENT_NAME)
#Submit a pipeline run
run_name = pipeline_func.__name__ + ' run'
run_result = client.run_pipeline(experiment.id, run_name, pipeline_filename, arguments)
```
#### Inspect the output
```python
!gsutil cat $OUTPUT_FILE
```
## References
* [Component Python code](https://github.com/kubeflow/pipelines/blob/master/components/gcp/container/component_sdk/python/kfp_component/google/dataflow/_launch_python.py)
* [Component Docker file](https://github.com/kubeflow/pipelines/blob/master/components/gcp/container/Dockerfile)
* [Sample notebook](https://github.com/kubeflow/pipelines/blob/master/components/gcp/dataflow/launch_python/sample.ipynb)
* [Dataflow Python Quickstart](https://cloud.google.com/dataflow/docs/quickstarts/quickstart-python)
## License
By deploying or using this software you agree to comply with the [AI Hub Terms of Service](https://aihub.cloud.google.com/u/0/aihub-tos) and the [Google APIs Terms of Service](https://developers.google.com/terms/). To the extent of a direct conflict of terms, the AI Hub Terms of Service will control.
| 8,324 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/gcp/dataflow | kubeflow_public_repos/kfp-tekton-backend/components/gcp/dataflow/launch_python/sample.ipynb | import kfp.components as comp
dataflow_python_op = comp.load_component_from_url(
'https://raw.githubusercontent.com/kubeflow/pipelines/01a23ae8672d3b18e88adf3036071496aca3552d/components/gcp/dataflow/launch_python/component.yaml')
help(dataflow_python_op)# Required Parameters
PROJECT_ID = '<Please put your project ID here>'
GCS_STAGING_DIR = 'gs://<Please put your GCS path here>' # No ending slash# Optional Parameters
EXPERIMENT_NAME = 'Dataflow - Launch Python'
OUTPUT_FILE = '{}/wc/wordcount.out'.format(GCS_STAGING_DIR)import kfp.dsl as dsl
import json
@dsl.pipeline(
name='Dataflow launch python pipeline',
description='Dataflow launch python pipeline'
)
def pipeline(
python_file_path = 'gs://ml-pipeline-playground/samples/dataflow/wc/wc.py',
project_id = PROJECT_ID,
staging_dir = GCS_STAGING_DIR,
requirements_file_path = 'gs://ml-pipeline-playground/samples/dataflow/wc/requirements.txt',
args = json.dumps([
'--output', OUTPUT_FILE
]),
wait_interval = 30
):
dataflow_python_op(
python_file_path = python_file_path,
project_id = project_id,
staging_dir = staging_dir,
requirements_file_path = requirements_file_path,
args = args,
wait_interval = wait_interval)pipeline_func = pipeline
pipeline_filename = pipeline_func.__name__ + '.zip'
import kfp.compiler as compiler
compiler.Compiler().compile(pipeline_func, pipeline_filename)#Specify pipeline argument values
arguments = {}
#Get or create an experiment and submit a pipeline run
import kfp
client = kfp.Client()
experiment = client.create_experiment(EXPERIMENT_NAME)
#Submit a pipeline run
run_name = pipeline_func.__name__ + ' run'
run_result = client.run_pipeline(experiment.id, run_name, pipeline_filename, arguments) | 8,325 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/gcp/dataflow | kubeflow_public_repos/kfp-tekton-backend/components/gcp/dataflow/launch_python/component.yaml | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
name: Launch Python
description: |
Launch a self-executing beam python file.
metadata:
labels:
add-pod-env: 'true'
inputs:
- name: python_file_path
description: 'The gcs or local path to the python file to run.'
type: String
- name: project_id
description: 'The ID of the parent project.'
type: GCPProjectID
- name: staging_dir
description: >-
Optional. The GCS directory for keeping staging files.
A random subdirectory will be created under the directory to keep job info
for resuming the job in case of failure and it will be passed as
`staging_location` and `temp_location` command line args of the beam code.
default: ''
type: GCSPath
- name: requirements_file_path
description: 'Optional, the gcs or local path to the pip requirements file'
default: ''
type: GCSPath
- name: args
description: 'The list of args to pass to the python file.'
default: '[]'
type: List
- name: wait_interval
default: '30'
description: 'Optional wait interval between calls to get job status. Defaults to 30.'
type: Integer
outputs:
- name: job_id
description: 'The id of the created dataflow job.'
type: String
- name: MLPipeline UI metadata
type: UI metadata
implementation:
container:
image: gcr.io/ml-pipeline/ml-pipeline-gcp:ad9bd5648dd0453005225779f25d8cebebc7ca00
args: [
--ui_metadata_path, {outputPath: MLPipeline UI metadata},
kfp_component.google.dataflow, launch_python,
--python_file_path, {inputValue: python_file_path},
--project_id, {inputValue: project_id},
--staging_dir, {inputValue: staging_dir},
--requirements_file_path, {inputValue: requirements_file_path},
--args, {inputValue: args},
--wait_interval, {inputValue: wait_interval},
]
env:
KFP_POD_NAME: "{{pod.name}}"
fileOutputs:
job_id: /tmp/kfp/output/dataflow/job_id.txt
| 8,326 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/gcp/dataflow | kubeflow_public_repos/kfp-tekton-backend/components/gcp/dataflow/launch_template/README.md |
# Name
Data preparation by using a template to submit a job to Cloud Dataflow
# Labels
GCP, Cloud Dataflow, Kubeflow, Pipeline
# Summary
A Kubeflow Pipeline component to prepare data by using a template to submit a job to Cloud Dataflow.
# Details
## Intended use
Use this component when you have a pre-built Cloud Dataflow template and want to launch it as a step in a Kubeflow Pipeline.
## Runtime arguments
Argument | Description | Optional | Data type | Accepted values | Default |
:--- | :---------- | :----------| :----------| :---------- | :----------|
project_id | The ID of the Google Cloud Platform (GCP) project to which the job belongs. | No | GCPProjectID | | |
gcs_path | The path to a Cloud Storage bucket containing the job creation template. It must be a valid Cloud Storage URL beginning with 'gs://'. | No | GCSPath | | |
launch_parameters | The parameters that are required to launch the template. The schema is defined in [LaunchTemplateParameters](https://cloud.google.com/dataflow/docs/reference/rest/v1b3/LaunchTemplateParameters). The parameter `jobName` is replaced by a generated name. | Yes | Dict | A JSON object which has the same structure as [LaunchTemplateParameters](https://cloud.google.com/dataflow/docs/reference/rest/v1b3/LaunchTemplateParameters) | None |
location | The regional endpoint to which the job request is directed.| Yes | GCPRegion | | None |
staging_dir | The path to the Cloud Storage directory where the staging files are stored. A random subdirectory will be created under the staging directory to keep the job information. This is done so that you can resume the job in case of failure.| Yes | GCSPath | | None |
validate_only | If True, the request is validated but not executed. | Yes | Boolean | | False |
wait_interval | The number of seconds to wait between calls to get the status of the job. | Yes | Integer | | 30 |
## Input data schema
The input `gcs_path` must contain a valid Cloud Dataflow template. The template can be created by following the instructions in [Creating Templates](https://cloud.google.com/dataflow/docs/guides/templates/creating-templates). You can also use [Google-provided templates](https://cloud.google.com/dataflow/docs/guides/templates/provided-templates).
## Output
Name | Description
:--- | :----------
job_id | The id of the Cloud Dataflow job that is created.
## Caution & requirements
To use the component, the following requirements must be met:
- Cloud Dataflow API is enabled.
- The component can authenticate to GCP. Refer to [Authenticating Pipelines to GCP](https://www.kubeflow.org/docs/gke/authentication-pipelines/) for details.
- The Kubeflow user service account is a member of:
- `roles/dataflow.developer` role of the project.
- `roles/storage.objectViewer` role of the Cloud Storage Object `gcs_path.`
- `roles/storage.objectCreator` role of the Cloud Storage Object `staging_dir.`
## Detailed description
You can execute the template locally by following the instructions in [Executing Templates](https://cloud.google.com/dataflow/docs/guides/templates/executing-templates). See the sample code below to learn how to execute the template.
Follow these steps to use the component in a pipeline:
1. Install the Kubeflow Pipeline SDK:
```python
%%capture --no-stderr
KFP_PACKAGE = 'https://storage.googleapis.com/ml-pipeline/release/0.1.14/kfp.tar.gz'
!pip3 install $KFP_PACKAGE --upgrade
```
2. Load the component using KFP SDK
```python
import kfp.components as comp
dataflow_template_op = comp.load_component_from_url(
'https://raw.githubusercontent.com/kubeflow/pipelines/01a23ae8672d3b18e88adf3036071496aca3552d/components/gcp/dataflow/launch_template/component.yaml')
help(dataflow_template_op)
```
### Sample
Note: The following sample code works in an IPython notebook or directly in Python code.
In this sample, we run a Google-provided word count template from `gs://dataflow-templates/latest/Word_Count`. The template takes a text file as input and outputs word counts to a Cloud Storage bucket. Here is the sample input:
```python
!gsutil cat gs://dataflow-samples/shakespeare/kinglear.txt
```
#### Set sample parameters
```python
# Required Parameters
PROJECT_ID = '<Please put your project ID here>'
GCS_WORKING_DIR = 'gs://<Please put your GCS path here>' # No ending slash
```
```python
# Optional Parameters
EXPERIMENT_NAME = 'Dataflow - Launch Template'
OUTPUT_PATH = '{}/out/wc'.format(GCS_WORKING_DIR)
```
#### Example pipeline that uses the component
```python
import kfp.dsl as dsl
import json
@dsl.pipeline(
name='Dataflow launch template pipeline',
description='Dataflow launch template pipeline'
)
def pipeline(
project_id = PROJECT_ID,
gcs_path = 'gs://dataflow-templates/latest/Word_Count',
launch_parameters = json.dumps({
'parameters': {
'inputFile': 'gs://dataflow-samples/shakespeare/kinglear.txt',
'output': OUTPUT_PATH
}
}),
location = '',
validate_only = 'False',
staging_dir = GCS_WORKING_DIR,
wait_interval = 30):
dataflow_template_op(
project_id = project_id,
gcs_path = gcs_path,
launch_parameters = launch_parameters,
location = location,
validate_only = validate_only,
staging_dir = staging_dir,
wait_interval = wait_interval))
```
#### Compile the pipeline
```python
pipeline_func = pipeline
pipeline_filename = pipeline_func.__name__ + '.zip'
import kfp.compiler as compiler
compiler.Compiler().compile(pipeline_func, pipeline_filename)
```
#### Submit the pipeline for execution
```python
#Specify pipeline argument values
arguments = {}
#Get or create an experiment and submit a pipeline run
import kfp
client = kfp.Client()
experiment = client.create_experiment(EXPERIMENT_NAME)
#Submit a pipeline run
run_name = pipeline_func.__name__ + ' run'
run_result = client.run_pipeline(experiment.id, run_name, pipeline_filename, arguments)
```
#### Inspect the output
```python
!gsutil cat $OUTPUT_PATH*
```
## References
* [Component python code](https://github.com/kubeflow/pipelines/blob/master/components/gcp/container/component_sdk/python/kfp_component/google/dataflow/_launch_template.py)
* [Component docker file](https://github.com/kubeflow/pipelines/blob/master/components/gcp/container/Dockerfile)
* [Sample notebook](https://github.com/kubeflow/pipelines/blob/master/components/gcp/dataflow/launch_template/sample.ipynb)
* [Cloud Dataflow Templates overview](https://cloud.google.com/dataflow/docs/guides/templates/overview)
## License
By deploying or using this software you agree to comply with the [AI Hub Terms of Service](https://aihub.cloud.google.com/u/0/aihub-tos) and the [Google APIs Terms of Service](https://developers.google.com/terms/). To the extent of a direct conflict of terms, the AI Hub Terms of Service will control.
| 8,327 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/gcp/dataflow | kubeflow_public_repos/kfp-tekton-backend/components/gcp/dataflow/launch_template/sample.ipynb | import kfp.components as comp
dataflow_template_op = comp.load_component_from_url(
'https://raw.githubusercontent.com/kubeflow/pipelines/01a23ae8672d3b18e88adf3036071496aca3552d/components/gcp/dataflow/launch_template/component.yaml')
help(dataflow_template_op)# Required Parameters
PROJECT_ID = '<Please put your project ID here>'
GCS_WORKING_DIR = 'gs://<Please put your GCS path here>' # No ending slash# Optional Parameters
EXPERIMENT_NAME = 'Dataflow - Launch Template'
OUTPUT_PATH = '{}/out/wc'.format(GCS_WORKING_DIR)import kfp.dsl as dsl
import json
@dsl.pipeline(
name='Dataflow launch template pipeline',
description='Dataflow launch template pipeline'
)
def pipeline(
project_id = PROJECT_ID,
gcs_path = 'gs://dataflow-templates/latest/Word_Count',
launch_parameters = json.dumps({
'parameters': {
'inputFile': 'gs://dataflow-samples/shakespeare/kinglear.txt',
'output': OUTPUT_PATH
}
}),
location = '',
validate_only = 'False',
staging_dir = GCS_WORKING_DIR,
wait_interval = 30):
dataflow_template_op(
project_id = project_id,
gcs_path = gcs_path,
launch_parameters = launch_parameters,
location = location,
validate_only = validate_only,
staging_dir = staging_dir,
wait_interval = wait_interval)pipeline_func = pipeline
pipeline_filename = pipeline_func.__name__ + '.zip'
import kfp.compiler as compiler
compiler.Compiler().compile(pipeline_func, pipeline_filename)#Specify pipeline argument values
arguments = {}
#Get or create an experiment and submit a pipeline run
import kfp
client = kfp.Client()
experiment = client.create_experiment(EXPERIMENT_NAME)
#Submit a pipeline run
run_name = pipeline_func.__name__ + ' run'
run_result = client.run_pipeline(experiment.id, run_name, pipeline_filename, arguments) | 8,328 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/gcp/dataflow | kubeflow_public_repos/kfp-tekton-backend/components/gcp/dataflow/launch_template/component.yaml | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
name: Launch Dataflow Template
description: |
Launchs a dataflow job from template.
metadata:
labels:
add-pod-env: 'true'
inputs:
- name: project_id
description: 'Required. The ID of the Cloud Platform project that the job belongs to.'
type: GCPProjectID
- name: gcs_path
description: >-
Required. A Cloud Storage path to the template from
which to create the job. Must be valid Cloud Storage URL, beginning with `gs://`.
type: GCSPath
- name: launch_parameters
description: >-
Parameters to provide to the template being launched. Schema defined in
https://cloud.google.com/dataflow/docs/reference/rest/v1b3/LaunchTemplateParameters.
`jobName` will be replaced by generated name.'
type: Dict
default: '{}'
- name: location
description: 'The regional endpoint to which to direct the request.'
default: ''
type: GCPRegion
- name: validate_only
description: >-
If true, the request is validated but not actually executed. Defaults to false.
default: 'False'
type: Bool
- name: staging_dir
description: >-
Optional. The GCS directory for keeping staging files.
A random subdirectory will be created under the directory to keep job info
for resuming the job in case of failure.
default: ''
type: GCSPath
- name: wait_interval
description: >-
Optional wait interval between calls to get job status. Defaults to 30.
default: '30'
type: Integer
outputs:
- name: job_id
description: 'The id of the created dataflow job.'
type: String
- name: MLPipeline UI metadata
type: UI metadata
implementation:
container:
image: gcr.io/ml-pipeline/ml-pipeline-gcp:ad9bd5648dd0453005225779f25d8cebebc7ca00
args: [
--ui_metadata_path, {outputPath: MLPipeline UI metadata},
kfp_component.google.dataflow, launch_template,
--project_id, {inputValue: project_id},
--gcs_path, {inputValue: gcs_path},
--launch_parameters, {inputValue: launch_parameters},
--location, {inputValue: location},
--validate_only, {inputValue: validate_only},
--staging_dir, {inputValue: staging_dir},
--wait_interval, {inputValue: wait_interval},
]
env:
KFP_POD_NAME: "{{pod.name}}"
fileOutputs:
job_id: /tmp/kfp/output/dataflow/job_id.txt
| 8,329 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/gcp/dataproc | kubeflow_public_repos/kfp-tekton-backend/components/gcp/dataproc/submit_hadoop_job/README.md |
# Name
Component: Data preparation using Hadoop MapReduce on YARN with Cloud Dataproc
# Labels
Cloud Dataproc, Hadoop, YARN, Apache, MapReduce
# Summary
A Kubeflow pipeline component to prepare data by submitting an Apache Hadoop MapReduce job on Apache Hadoop YARN to Cloud Dataproc.
# Facets
<!--Make sure the asset has data for the following facets:
Use case
Technique
Input data type
ML workflow
The data must map to the acceptable values for these facets, as documented on the “taxonomy” sheet of go/aihub-facets
https://gitlab.aihub-content-external.com/aihubbot/kfp-components/commit/fe387ab46181b5d4c7425dcb8032cb43e70411c1
--->
Use case:
Technique:
Input data type:
ML workflow:
# Details
## Intended use
Use the component to run an Apache Hadoop MapReduce job as one preprocessing step in a Kubeflow pipeline.
## Runtime arguments
| Argument | Description | Optional | Data type | Accepted values | Default |
|----------|-------------|----------|-----------|-----------------|---------|
| project_id | The Google Cloud Platform (GCP) project ID that the cluster belongs to. | No | GCPProjectID | - | - |
| region | The Dataproc region to handle the request. | No | GCPRegion | - | - |
| cluster_name | The name of the cluster to run the job. | No | String | - | - |
| main_jar_file_uri | The Hadoop Compatible Filesystem (HCFS) URI of the JAR file containing the main class to execute. | No | List |- |- |
| main_class | The name of the driver's main class. The JAR file that contains the class must be either in the default CLASSPATH or specified in `hadoop_job.jarFileUris`. | No | String |- | - |
| args | The arguments to pass to the driver. Do not include arguments, such as -libjars or -Dfoo=bar, that can be set as job properties, since a collision may occur that causes an incorrect job submission. | Yes | List | - | None |
| hadoop_job | The payload of a [HadoopJob](https://cloud.google.com/dataproc/docs/reference/rest/v1/HadoopJob). | Yes | Dict | - | None |
| job | The payload of a [Dataproc job](https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs). | Yes | Dict | -| None |
| wait_interval | The number of seconds to pause between polling the operation. | Yes | Integer | - | 30 |
Note:
`main_jar_file_uri`: The examples for the files are:
- `gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar`
- `hdfs:/tmp/test-samples/custom-wordcount.jarfile:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar`
## Output
Name | Description | Type
:--- | :---------- | :---
job_id | The ID of the created job. | String
## Cautions & requirements
To use the component, you must:
* Set up a GCP project by following this [guide](https://cloud.google.com/dataproc/docs/guides/setup-project).
* [Create a new cluster](https://cloud.google.com/dataproc/docs/guides/create-cluster).
* The component can authenticate to GCP. Refer to [Authenticating Pipelines to GCP](https://www.kubeflow.org/docs/gke/authentication-pipelines/) for details.
* Grant the Kubeflow user service account the role, `roles/dataproc.editor`, on the project.
## Detailed description
This component creates a Hadoop job from the [Dataproc submit job REST API](https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs/submit).
Follow these steps to use the component in a pipeline:
1. Install the Kubeflow pipeline's SDK:
```python
%%capture --no-stderr
KFP_PACKAGE = 'https://storage.googleapis.com/ml-pipeline/release/0.1.14/kfp.tar.gz'
!pip3 install $KFP_PACKAGE --upgrade
```
2. Load the component using the Kubeflow pipeline's SDK:
```python
import kfp.components as comp
dataproc_submit_hadoop_job_op = comp.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/01a23ae8672d3b18e88adf3036071496aca3552d/components/gcp/dataproc/submit_hadoop_job/component.yaml')
help(dataproc_submit_hadoop_job_op)
```
### Sample
The following sample code works in an IPython notebook or directly in Python code. See the sample code below to learn how to execute the template.
#### Setup a Dataproc cluster
[Create a new Dataproc cluster](https://cloud.google.com/dataproc/docs/guides/create-cluster) (or reuse an existing one) before running the sample code.
### Prepare a Hadoop job
Upload your Hadoop JAR file to a Cloud Storage bucket. In the sample, we will use a JAR file that is preinstalled in the main cluster, so you don't have to provide the argument, `main_jar_file_uri`.
To package a self-contained Hadoop MapReduce application from the [WordCount example source code](https://github.com/apache/hadoop/blob/trunk/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/WordCount.java), follow the [MapReduce Tutorial](https://hadoop.apache.org/docs/current/hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapReduceTutorial.html).
#### Set sample parameters
```python
PROJECT_ID = '<Put your project ID here>'
CLUSTER_NAME = '<Put your existing cluster name here>'
OUTPUT_GCS_PATH = '<Put your output GCS path here>'
REGION = 'us-central1'
MAIN_CLASS = 'org.apache.hadoop.examples.WordCount'
INTPUT_GCS_PATH = 'gs://ml-pipeline-playground/shakespeare1.txt'
EXPERIMENT_NAME = 'Dataproc - Submit Hadoop Job'
```
#### Inspect the input data
The input file is a simple text file:
```python
!gsutil cat $INTPUT_GCS_PATH
```
#### Clean up the existing output files (optional)
This is needed because the sample code requires the output folder to be a clean folder. To continue to run the sample, make sure that the service account of the notebook server has access to `OUTPUT_GCS_PATH`.
Caution: This will remove all blob files under `OUTPUT_GCS_PATH`.
```python
!gsutil rm $OUTPUT_GCS_PATH/**
```
#### Example pipeline that uses the component
```python
import kfp.dsl as dsl
import json
@dsl.pipeline(
name='Dataproc submit Hadoop job pipeline',
description='Dataproc submit Hadoop job pipeline'
)
def dataproc_submit_hadoop_job_pipeline(
project_id = PROJECT_ID,
region = REGION,
cluster_name = CLUSTER_NAME,
main_jar_file_uri = '',
main_class = MAIN_CLASS,
args = json.dumps([
INTPUT_GCS_PATH,
OUTPUT_GCS_PATH
]),
hadoop_job='',
job='{}',
wait_interval='30'
):
dataproc_submit_hadoop_job_op(
project_id=project_id,
region=region,
cluster_name=cluster_name,
main_jar_file_uri=main_jar_file_uri,
main_class=main_class,
args=args,
hadoop_job=hadoop_job,
job=job,
wait_interval=wait_interval)
```
#### Compile the pipeline
```python
pipeline_func = dataproc_submit_hadoop_job_pipeline
pipeline_filename = pipeline_func.__name__ + '.zip'
import kfp.compiler as compiler
compiler.Compiler().compile(pipeline_func, pipeline_filename)
```
#### Submit the pipeline for execution
```python
#Specify values for the pipeline's arguments
arguments = {}
#Get or create an experiment
import kfp
client = kfp.Client()
experiment = client.create_experiment(EXPERIMENT_NAME)
#Submit a pipeline run
run_name = pipeline_func.__name__ + ' run'
run_result = client.run_pipeline(experiment.id, run_name, pipeline_filename, arguments)
```
#### Inspect the output
The sample in the notebook will count the words in the input text and save them in sharded files. The command to inspect the output is:
```python
!gsutil cat $OUTPUT_GCS_PATH/*
```
## References
* [Component Python code](https://github.com/kubeflow/pipelines/blob/master/components/gcp/container/component_sdk/python/kfp_component/google/dataproc/_submit_hadoop_job.py)
* [Component Docker file](https://github.com/kubeflow/pipelines/blob/master/components/gcp/container/Dockerfile)
* [Sample notebook](https://github.com/kubeflow/pipelines/blob/master/components/gcp/dataproc/submit_hadoop_job/sample.ipynb)
* [Dataproc HadoopJob](https://cloud.google.com/dataproc/docs/reference/rest/v1/HadoopJob)
# License
By deploying or using this software you agree to comply with the [AI Hub Terms of Service](https://aihub.cloud.google.com/u/0/aihub-tos) and the [Google APIs Terms of Service](https://developers.google.com/terms/). To the extent of a direct conflict of terms, the AI Hub Terms of Service will control.
| 8,330 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/gcp/dataproc | kubeflow_public_repos/kfp-tekton-backend/components/gcp/dataproc/submit_hadoop_job/sample.ipynb | import kfp.components as comp
dataproc_submit_hadoop_job_op = comp.load_component_from_url(
'https://raw.githubusercontent.com/kubeflow/pipelines/01a23ae8672d3b18e88adf3036071496aca3552d/components/gcp/dataproc/submit_hadoop_job/component.yaml')
help(dataproc_submit_hadoop_job_op)PROJECT_ID = '<Please put your project ID here>'
CLUSTER_NAME = '<Please put your existing cluster name here>'
OUTPUT_GCS_PATH = '<Please put your output GCS path here>'
REGION = 'us-central1'
MAIN_CLASS = 'org.apache.hadoop.examples.WordCount'
INTPUT_GCS_PATH = 'gs://ml-pipeline-playground/shakespeare1.txt'
EXPERIMENT_NAME = 'Dataproc - Submit Hadoop Job'import kfp.dsl as dsl
import json
@dsl.pipeline(
name='Dataproc submit Hadoop job pipeline',
description='Dataproc submit Hadoop job pipeline'
)
def dataproc_submit_hadoop_job_pipeline(
project_id = PROJECT_ID,
region = REGION,
cluster_name = CLUSTER_NAME,
main_jar_file_uri = '',
main_class = MAIN_CLASS,
args = json.dumps([
INTPUT_GCS_PATH,
OUTPUT_GCS_PATH
]),
hadoop_job='',
job='{}',
wait_interval='30'
):
dataproc_submit_hadoop_job_op(
project_id=project_id,
region=region,
cluster_name=cluster_name,
main_jar_file_uri=main_jar_file_uri,
main_class=main_class,
args=args,
hadoop_job=hadoop_job,
job=job,
wait_interval=wait_interval)pipeline_func = dataproc_submit_hadoop_job_pipeline
pipeline_filename = pipeline_func.__name__ + '.zip'
import kfp.compiler as compiler
compiler.Compiler().compile(pipeline_func, pipeline_filename)#Specify pipeline argument values
arguments = {}
#Get or create an experiment and submit a pipeline run
import kfp
client = kfp.Client()
experiment = client.create_experiment(EXPERIMENT_NAME)
#Submit a pipeline run
run_name = pipeline_func.__name__ + ' run'
run_result = client.run_pipeline(experiment.id, run_name, pipeline_filename, arguments) | 8,331 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/gcp/dataproc | kubeflow_public_repos/kfp-tekton-backend/components/gcp/dataproc/submit_hadoop_job/component.yaml | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
name: dataproc_submit_hadoop_job
description: >-
Submits a Cloud Dataproc job for running Apache Hadoop MapReduce jobs on
Apache Hadoop YARN.
metadata:
labels:
add-pod-env: 'true'
inputs:
- name: project_id
description: >-
Required. The ID of the Google Cloud Platform project that the cluster
belongs to.
type: GCPProjectID
- name: region
description: >-
Required. The Cloud Dataproc region in which to handle the request.
type: GCPRegion
- name: cluster_name
description: 'Required. The cluster to run the job.'
type: String
- name: main_jar_file_uri
default: ''
description: >-
The HCFS URI of the jar file containing the main class. Examples:
`gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar`
`hdfs:/tmp/test-samples/custom-wordcount.jar`
`file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar`
type: GCSPath
- name: main_class
default: ''
description: >-
The name of the driver's main class. The jar file
containing the class must be in the default CLASSPATH or specified
in `jarFileUris`.
type: String
- name: args
default: ''
description: >-
Optional. The arguments to pass to the driver. Do not include
arguments, such as -libjars or -Dfoo=bar, that can be set as job properties,
since a collision may occur that causes an incorrect job submission.
type: List
- name: hadoop_job
default: ''
description: >-
Optional. The full payload of a
[hadoop job](https://cloud.google.com/dataproc/docs/reference/rest/v1/HadoopJob).
type: Dict
- name: job
default: ''
description: >-
Optional. The full payload of a
[Dataproc job](https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs).
type: Dict
- name: wait_interval
default: '30'
description: >-
Optional. The wait seconds between polling the operation.
Defaults to 30.
type: Integer
outputs:
- name: job_id
description: 'The ID of the created job.'
type: String
- name: MLPipeline UI metadata
type: UI metadata
implementation:
container:
image: gcr.io/ml-pipeline/ml-pipeline-gcp:ad9bd5648dd0453005225779f25d8cebebc7ca00
args: [
--ui_metadata_path, {outputPath: MLPipeline UI metadata},
kfp_component.google.dataproc, submit_hadoop_job,
--project_id, {inputValue: project_id},
--region, {inputValue: region},
--cluster_name, {inputValue: cluster_name},
--main_jar_file_uri, {inputValue: main_jar_file_uri},
--main_class, {inputValue: main_class},
--args, {inputValue: args},
--hadoop_job, {inputValue: hadoop_job},
--job, {inputValue: job},
--wait_interval, {inputValue: wait_interval},
]
env:
KFP_POD_NAME: "{{pod.name}}"
fileOutputs:
job_id: /tmp/kfp/output/dataproc/job_id.txt
| 8,332 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/gcp/dataproc | kubeflow_public_repos/kfp-tekton-backend/components/gcp/dataproc/submit_pig_job/README.md |
# Name
Component: Data preparation using Apache Pig on YARN with Cloud Dataproc
# Labels
Cloud Dataproc, YARN, Apache Pig, Kubeflow
# Summary
A Kubeflow pipeline component to prepare data by submitting an Apache Pig job on YARN to Cloud Dataproc.
# Facets
<!--Make sure the asset has data for the following facets:
Use case
Technique
Input data type
ML workflow
The data must map to the acceptable values for these facets, as documented on the “taxonomy” sheet of go/aihub-facets
https://gitlab.aihub-content-external.com/aihubbot/kfp-components/commit/fe387ab46181b5d4c7425dcb8032cb43e70411c1
--->
Use case:
Other
Technique:
Other
Input data type:
Tabular
ML workflow:
Data preparation
# Details
## Intended use
Use this component to run an Apache Pig job as one preprocessing step in a Kubeflow pipeline.
## Runtime arguments
| Argument | Description | Optional | Data type | Accepted values | Default |
|:----------|:-------------|:----------|:-----------|:-----------------|:---------|
| project_id | The ID of the Google Cloud Platform (GCP) project that the cluster belongs to. | No | GCPProjectID |- | -|
| region | The Cloud Dataproc region that handles the request. | No | GCPRegion | - |- |
| cluster_name | The name of the cluster that runs the job. | No | String | - | - |
| queries | The queries to execute the Pig job. Specify multiple queries in one string by separating them with semicolons. You do not need to terminate queries with semicolons. | Yes | List | -| None |
| query_file_uri | The Cloud Storage bucket path pointing to a file that contains the Pig queries. | Yes | GCSPath | - | None |
| script_variables | Mapping of the query’s variable names to their values (equivalent to the Pig command: SET name="value";). | Yes | Dict | -| None |
| pig_job | The payload of a [PigJob](https://cloud.google.com/dataproc/docs/reference/rest/v1/PigJob). | Yes | Dict | - | None |
| job | The payload of a [Dataproc job](https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs). | Yes | Dict | | None |
| wait_interval | The number of seconds to pause between polling the operation. | Yes | Integer | - | 30 |
## Output
Name | Description | Type
:--- | :---------- | :---
job_id | The ID of the created job. | String
## Cautions & requirements
To use the component, you must:
* Set up a GCP project by following this [guide](https://cloud.google.com/dataproc/docs/guides/setup-project).
* [Create a new cluster](https://cloud.google.com/dataproc/docs/guides/create-cluster).
* The component can authenticate to GCP. Refer to [Authenticating Pipelines to GCP](https://www.kubeflow.org/docs/gke/authentication-pipelines/) for details.
* Grant the Kubeflow user service account the role, `roles/dataproc.editor`, on the project.
## Detailed description
This component creates a Pig job from the [Dataproc submit job REST API](https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs/submit).
Follow these steps to use the component in a pipeline:
1. Install the Kubeflow pipeline's SDK
```python
%%capture --no-stderr
KFP_PACKAGE = 'https://storage.googleapis.com/ml-pipeline/release/0.1.14/kfp.tar.gz'
!pip3 install $KFP_PACKAGE --upgrade
```
2. Load the component using the Kubeflow pipeline's SDK
```python
import kfp.components as comp
dataproc_submit_pig_job_op = comp.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/01a23ae8672d3b18e88adf3036071496aca3552d/components/gcp/dataproc/submit_pig_job/component.yaml')
help(dataproc_submit_pig_job_op)
```
### Sample
The following sample code works in an IPython notebook or directly in Python code. See the sample code below to learn how to execute the template.
#### Setup a Dataproc cluster
[Create a new Dataproc cluster](https://cloud.google.com/dataproc/docs/guides/create-cluster) (or reuse an existing one) before running the sample code.
#### Prepare a Pig query
You can put your Pig queries in the `queries` list, or you can use `query_file_uri`. In this sample, we will use a hard-coded query in the `queries` list to select data from a local password file.
For more details on Apache Pig, see the [Pig documentation.](http://pig.apache.org/docs/latest/)
#### Set sample parameters
```python
PROJECT_ID = '<Put your project ID here>'
CLUSTER_NAME = '<Put your existing cluster name here>'
REGION = 'us-central1'
QUERY = '''
natality_csv = load 'gs://public-datasets/natality/csv' using PigStorage(':');
top_natality_csv = LIMIT natality_csv 10;
dump natality_csv;'''
EXPERIMENT_NAME = 'Dataproc - Submit Pig Job'
```
#### Example pipeline that uses the component
```python
import kfp.dsl as dsl
import json
@dsl.pipeline(
name='Dataproc submit Pig job pipeline',
description='Dataproc submit Pig job pipeline'
)
def dataproc_submit_pig_job_pipeline(
project_id = PROJECT_ID,
region = REGION,
cluster_name = CLUSTER_NAME,
queries = json.dumps([QUERY]),
query_file_uri = '',
script_variables = '',
pig_job='',
job='',
wait_interval='30'
):
dataproc_submit_pig_job_op(
project_id=project_id,
region=region,
cluster_name=cluster_name,
queries=queries,
query_file_uri=query_file_uri,
script_variables=script_variables,
pig_job=pig_job,
job=job,
wait_interval=wait_interval)
```
#### Compile the pipeline
```python
pipeline_func = dataproc_submit_pig_job_pipeline
pipeline_filename = pipeline_func.__name__ + '.zip'
import kfp.compiler as compiler
compiler.Compiler().compile(pipeline_func, pipeline_filename)
```
#### Submit the pipeline for execution
```python
#Specify values for the pipeline's arguments
arguments = {}
#Get or create an experiment
import kfp
client = kfp.Client()
experiment = client.create_experiment(EXPERIMENT_NAME)
#Submit a pipeline run
run_name = pipeline_func.__name__ + ' run'
run_result = client.run_pipeline(experiment.id, run_name, pipeline_filename, arguments)
```
## References
* [Create a new Dataproc cluster](https://cloud.google.com/dataproc/docs/guides/create-cluster)
* [Pig documentation](http://pig.apache.org/docs/latest/)
* [Dataproc job](https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs)
* [PigJob](https://cloud.google.com/dataproc/docs/reference/rest/v1/PigJob)
## License
By deploying or using this software you agree to comply with the [AI Hub Terms of Service](https://aihub.cloud.google.com/u/0/aihub-tos) and the [Google APIs Terms of Service](https://developers.google.com/terms/). To the extent of a direct conflict of terms, the AI Hub Terms of Service will control.
| 8,333 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/gcp/dataproc | kubeflow_public_repos/kfp-tekton-backend/components/gcp/dataproc/submit_pig_job/sample.ipynb | import kfp.components as comp
dataproc_submit_pig_job_op = comp.load_component_from_url(
'https://raw.githubusercontent.com/kubeflow/pipelines/01a23ae8672d3b18e88adf3036071496aca3552d/components/gcp/dataproc/submit_pig_job/component.yaml')
help(dataproc_submit_pig_job_op)PROJECT_ID = '<Please put your project ID here>'
CLUSTER_NAME = '<Please put your existing cluster name here>'
REGION = 'us-central1'
QUERY = '''
natality_csv = load 'gs://public-datasets/natality/csv' using PigStorage(':');
top_natality_csv = LIMIT natality_csv 10;
dump natality_csv;'''
EXPERIMENT_NAME = 'Dataproc - Submit Pig Job'import kfp.dsl as dsl
import json
@dsl.pipeline(
name='Dataproc submit Pig job pipeline',
description='Dataproc submit Pig job pipeline'
)
def dataproc_submit_pig_job_pipeline(
project_id = PROJECT_ID,
region = REGION,
cluster_name = CLUSTER_NAME,
queries = json.dumps([QUERY]),
query_file_uri = '',
script_variables = '',
pig_job='',
job='',
wait_interval='30'
):
dataproc_submit_pig_job_op(
project_id=project_id,
region=region,
cluster_name=cluster_name,
queries=queries,
query_file_uri=query_file_uri,
script_variables=script_variables,
pig_job=pig_job,
job=job,
wait_interval=wait_interval)
pipeline_func = dataproc_submit_pig_job_pipeline
pipeline_filename = pipeline_func.__name__ + '.zip'
import kfp.compiler as compiler
compiler.Compiler().compile(pipeline_func, pipeline_filename)#Specify pipeline argument values
arguments = {}
#Get or create an experiment and submit a pipeline run
import kfp
client = kfp.Client()
experiment = client.create_experiment(EXPERIMENT_NAME)
#Submit a pipeline run
run_name = pipeline_func.__name__ + ' run'
run_result = client.run_pipeline(experiment.id, run_name, pipeline_filename, arguments) | 8,334 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/gcp/dataproc | kubeflow_public_repos/kfp-tekton-backend/components/gcp/dataproc/submit_pig_job/component.yaml | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
name: dataproc_submit_pig_job
description: >-
Submits a Cloud Dataproc job for running Apache Pig queries on YARN.
metadata:
labels:
add-pod-env: 'true'
inputs:
- name: project_id
description: >-
Required. The ID of the Google Cloud Platform project that the cluster
belongs to.
type: GCPProjectID
- name: region
description: >-
Required. The Cloud Dataproc region in which to handle the request.
type: GCPRegion
- name: cluster_name
description: 'Required. The cluster to run the job.'
type: String
- name: queries
default: ''
description: >-
Required. The queries to execute. You do not need to
terminate a query with a semicolon. Multiple queries can be specified
in one string by separating each with a semicolon.
type: List
- name: query_file_uri
default: ''
description: >-
The HCFS URI of the script that contains Pig queries.
type: GCSPath
- name: script_variables
default: ''
description: >-
Optional. Mapping of query variable names to
values (equivalent to the Pig command: SET name="value";).
type: Dict
- name: pig_job
default: ''
description: >-
Optional. The full payload of a
[PigJob](https://cloud.google.com/dataproc/docs/reference/rest/v1/PigJob).
type: Dict
- name: job
default: ''
description: >-
Optional. The full payload of a
[Dataproc job](https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs).
type: Dict
- name: wait_interval
default: '30'
description: >-
Optional. The wait seconds between polling the operation.
Defaults to 30.
type: Integer
outputs:
- name: job_id
description: 'The ID of the created job.'
type: String
- name: MLPipeline UI metadata
type: UI metadata
implementation:
container:
image: gcr.io/ml-pipeline/ml-pipeline-gcp:ad9bd5648dd0453005225779f25d8cebebc7ca00
args: [
--ui_metadata_path, {outputPath: MLPipeline UI metadata},
kfp_component.google.dataproc, submit_pig_job,
--project_id, {inputValue: project_id},
--region, {inputValue: region},
--cluster_name, {inputValue: cluster_name},
--queries, {inputValue: queries},
--query_file_uri, {inputValue: query_file_uri},
--script_variables, {inputValue: script_variables},
--pig_job, {inputValue: pig_job},
--job, {inputValue: job},
--wait_interval, {inputValue: wait_interval},
]
env:
KFP_POD_NAME: "{{pod.name}}"
fileOutputs:
job_id: /tmp/kfp/output/dataproc/job_id.txt
| 8,335 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/gcp/dataproc | kubeflow_public_repos/kfp-tekton-backend/components/gcp/dataproc/create_cluster/README.md |
# Name
Component: Data processing by creating a cluster in Cloud Dataproc
# Label
Cloud Dataproc, Kubeflow
# Facets
<!--Make sure the asset has data for the following facets:
Use case
Technique
Input data type
ML workflow
The data must map to the acceptable values for these facets, as documented on the “taxonomy” sheet of go/aihub-facets
https://gitlab.aihub-content-external.com/aihubbot/kfp-components/commit/fe387ab46181b5d4c7425dcb8032cb43e70411c1
--->
Use case:
Other
Technique:
Other
Input data type:
Tabular
ML workflow:
Data preparation
# Summary
A Kubeflow pipeline component to create a cluster in Cloud Dataproc.
# Details
## Intended use
Use this component at the start of a Kubeflow pipeline to create a temporary Cloud Dataproc cluster to run Cloud Dataproc jobs as steps in the pipeline.
## Runtime arguments
| Argument | Description | Optional | Data type | Accepted values | Default |
|----------|-------------|----------|-----------|-----------------|---------|
| project_id | The Google Cloud Platform (GCP) project ID that the cluster belongs to. | No | GCPProjectID | | |
| region | The Cloud Dataproc region to create the cluster in. | No | GCPRegion | | |
| name | The name of the cluster. Cluster names within a project must be unique. You can reuse the names of deleted clusters. | Yes | String | | None |
| name_prefix | The prefix of the cluster name. | Yes | String | | None |
| initialization_actions | A list of Cloud Storage URIs identifying the executables on each node after the configuration is completed. By default, executables are run on the master and all the worker nodes. | Yes | List | | None |
| config_bucket | The Cloud Storage bucket to use to stage the job dependencies, the configuration files, and the job driver console’s output. | Yes | GCSPath | | None |
| image_version | The version of the software inside the cluster. | Yes | String | | None |
| cluster | The full [cluster configuration](https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.clusters#Cluster). | Yes | Dict | | None |
| wait_interval | The number of seconds to pause before polling the operation. | Yes | Integer | | 30 |
## Output
Name | Description | Type
:--- | :---------- | :---
cluster_name | The name of the cluster. | String
Note: You can recycle the cluster by using the [Dataproc delete cluster component](https://github.com/kubeflow/pipelines/tree/master/components/gcp/dataproc/delete_cluster).
## Cautions & requirements
To use the component, you must:
* Set up the GCP project by following these [steps](https://cloud.google.com/dataproc/docs/guides/setup-project).
* The component can authenticate to GCP. Refer to [Authenticating Pipelines to GCP](https://www.kubeflow.org/docs/gke/authentication-pipelines/) for details.
* Grant the following types of access to the Kubeflow user service account:
* Read access to the Cloud Storage buckets which contain the initialization action files.
* The role, `roles/dataproc.editor`, on the project.
## Detailed description
This component creates a new Dataproc cluster by using the [Dataproc create cluster REST API](https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.clusters/create).
Follow these steps to use the component in a pipeline:
1. Install the Kubeflow pipeline's SDK
```python
%%capture --no-stderr
KFP_PACKAGE = 'https://storage.googleapis.com/ml-pipeline/release/0.1.14/kfp.tar.gz'
!pip3 install $KFP_PACKAGE --upgrade
```
2. Load the component using the Kubeflow pipeline's SDK
```python
import kfp.components as comp
dataproc_create_cluster_op = comp.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/01a23ae8672d3b18e88adf3036071496aca3552d/components/gcp/dataproc/create_cluster/component.yaml')
help(dataproc_create_cluster_op)
```
### Sample
The following sample code works in an IPython notebook or directly in Python code. See the sample code below to learn how to execute the template.
#### Set sample parameters
```python
# Required parameters
PROJECT_ID = '<Put your project ID here>'
# Optional parameters
EXPERIMENT_NAME = 'Dataproc - Create Cluster'
```
#### Example pipeline that uses the component
```python
import kfp.dsl as dsl
import json
@dsl.pipeline(
name='Dataproc create cluster pipeline',
description='Dataproc create cluster pipeline'
)
def dataproc_create_cluster_pipeline(
project_id = PROJECT_ID,
region = 'us-central1',
name='',
name_prefix='',
initialization_actions='',
config_bucket='',
image_version='',
cluster='',
wait_interval='30'
):
dataproc_create_cluster_op(
project_id=project_id,
region=region,
name=name,
name_prefix=name_prefix,
initialization_actions=initialization_actions,
config_bucket=config_bucket,
image_version=image_version,
cluster=cluster,
wait_interval=wait_interval)
```
#### Compile the pipeline
```python
#Compile the pipeline
pipeline_func = dataproc_create_cluster_pipeline
pipeline_filename = pipeline_func.__name__ + '.zip'
import kfp.compiler as compiler
compiler.Compiler().compile(pipeline_func, pipeline_filename)
```
#### Submit the pipeline for execution
```python
#Specify values for the pipeline's arguments
arguments = {}
#Get or create an experiment
import kfp
client = kfp.Client()
experiment = client.create_experiment(EXPERIMENT_NAME)
#Submit a pipeline run
run_name = pipeline_func.__name__ + ' run'
run_result = client.run_pipeline(experiment.id, run_name, pipeline_filename, arguments)
```
## References
* [Kubernetes Engine for Kubeflow](https://www.kubeflow.org/docs/started/getting-started-gke/#gcp-service-accounts)
* [Component Python code](https://github.com/kubeflow/pipelines/blob/master/components/gcp/container/component_sdk/python/kfp_component/google/dataproc/_create_cluster.py)
* [Component Docker file](https://github.com/kubeflow/pipelines/blob/master/components/gcp/container/Dockerfile)
* [Sample notebook](https://github.com/kubeflow/pipelines/blob/master/components/gcp/dataproc/create_cluster/sample.ipynb)
* [Dataproc create cluster REST API](https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.clusters/create)
## License
By deploying or using this software you agree to comply with the [AI Hub Terms of Service](https://aihub.cloud.google.com/u/0/aihub-tos) and the [Google APIs Terms of Service](https://developers.google.com/terms/). To the extent of a direct conflict of terms, the AI Hub Terms of Service will control.
| 8,336 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/gcp/dataproc | kubeflow_public_repos/kfp-tekton-backend/components/gcp/dataproc/create_cluster/sample.ipynb | import kfp.components as comp
dataproc_create_cluster_op = comp.load_component_from_url(
'https://raw.githubusercontent.com/kubeflow/pipelines/01a23ae8672d3b18e88adf3036071496aca3552d/components/gcp/dataproc/create_cluster/component.yaml')
help(dataproc_create_cluster_op)# Required Parameters
PROJECT_ID = '<Please put your project ID here>'
# Optional Parameters
EXPERIMENT_NAME = 'Dataproc - Create Cluster'import kfp.dsl as dsl
import json
@dsl.pipeline(
name='Dataproc create cluster pipeline',
description='Dataproc create cluster pipeline'
)
def dataproc_create_cluster_pipeline(
project_id = PROJECT_ID,
region = 'us-central1',
name='',
name_prefix='',
initialization_actions='',
config_bucket='',
image_version='',
cluster='',
wait_interval='30'
):
dataproc_create_cluster_op(
project_id=project_id,
region=region,
name=name,
name_prefix=name_prefix,
initialization_actions=initialization_actions,
config_bucket=config_bucket,
image_version=image_version,
cluster=cluster,
wait_interval=wait_interval)pipeline_func = dataproc_create_cluster_pipeline
pipeline_filename = pipeline_func.__name__ + '.zip'
import kfp.compiler as compiler
compiler.Compiler().compile(pipeline_func, pipeline_filename)#Specify pipeline argument values
arguments = {}
#Get or create an experiment and submit a pipeline run
import kfp
client = kfp.Client()
experiment = client.create_experiment(EXPERIMENT_NAME)
#Submit a pipeline run
run_name = pipeline_func.__name__ + ' run'
run_result = client.run_pipeline(experiment.id, run_name, pipeline_filename, arguments) | 8,337 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/gcp/dataproc | kubeflow_public_repos/kfp-tekton-backend/components/gcp/dataproc/create_cluster/component.yaml | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
name: dataproc_create_cluster
description: |
Creates a DataProc cluster under a project.
metadata:
labels:
add-pod-env: 'true'
inputs:
- name: project_id
description: >-
Required. The ID of the Google Cloud Platform project that the cluster belongs to.
type: GCPProjectID
- name: region
description: 'Required. The Cloud Dataproc region in which to handle the request.'
type: GCPRegion
- name: name
description: >-
Optional. The cluster name. Cluster names within a project must be unique. Names of
deleted clusters can be reused
default: ''
type: String
- name: name_prefix
description: 'Optional. The prefix of the cluster name.'
default: ''
type: String
- name: initialization_actions
description: >-
Optional. List of GCS URIs of executables to execute on each node after config
is completed. By default, executables are run on master and all worker nodes.
default: ''
type: List
- name: config_bucket
description: >-
Optional. A Google Cloud Storage bucket used to stage job dependencies, config
files, and job driver console output.
default: ''
type: GCSPath
- name: image_version
description: 'Optional. The version of software inside the cluster.'
default: ''
type: String
- name: cluster
description: >-
Optional. The full cluster config. See
[full details](https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.clusters#Cluster)
default: ''
type: Dict
- name: wait_interval
default: '30'
description: 'Optional. The wait seconds between polling the operation. Defaults to 30.'
type: Integer
outputs:
- name: cluster_name
description: 'The cluster name of the created cluster.'
type: String
- name: MLPipeline UI metadata
type: UI metadata
implementation:
container:
image: gcr.io/ml-pipeline/ml-pipeline-gcp:ad9bd5648dd0453005225779f25d8cebebc7ca00
args: [
--ui_metadata_path, {outputPath: MLPipeline UI metadata},
kfp_component.google.dataproc, create_cluster,
--project_id, {inputValue: project_id},
--region, {inputValue: region},
--name, {inputValue: name},
--name_prefix, {inputValue: name_prefix},
--initialization_actions, {inputValue: initialization_actions},
--config_bucket, {inputValue: config_bucket},
--image_version, {inputValue: image_version},
--cluster, {inputValue: cluster},
--wait_interval, {inputValue: wait_interval},
]
env:
KFP_POD_NAME: "{{pod.name}}"
fileOutputs:
cluster_name: /tmp/kfp/output/dataproc/cluster_name.txt
| 8,338 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/gcp/dataproc | kubeflow_public_repos/kfp-tekton-backend/components/gcp/dataproc/submit_pyspark_job/README.md |
# Name
Component: Data preparation using PySpark on Cloud Dataproc
# Labels
Cloud Dataproc, PySpark, Kubeflow
# Summary
A Kubeflow Pipeline component to prepare data by submitting a PySpark job to Cloud Dataproc.
# Facets
<!--Make sure the asset has data for the following facets:
Use case
Technique
Input data type
ML workflow
The data must map to the acceptable values for these facets, as documented on the “taxonomy” sheet of go/aihub-facets
https://gitlab.aihub-content-external.com/aihubbot/kfp-components/commit/fe387ab46181b5d4c7425dcb8032cb43e70411c1
--->
Use case:
Technique:
Input data type:
ML workflow:
# Details
## Intended use
Use this component to run an Apache PySpark job as one preprocessing step in a Kubeflow pipeline.
## Runtime arguments
| Argument | Description | Optional | Data type | Accepted values | Default |
|:----------------------|:------------|:----------|:--------------|:-----------------|:---------|
| project_id | The ID of the Google Cloud Platform (GCP) project that the cluster belongs to. | No | GCPProjectID | - | - |
| region | The Cloud Dataproc region to handle the request. | No | GCPRegion | - | - |
| cluster_name | The name of the cluster to run the job. | No | String | - | - |
| main_python_file_uri | The HCFS URI of the Python file to use as the driver. This must be a .py file. | No | GCSPath | - | - |
| args | The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission. | Yes | List | - | None |
| pyspark_job | The payload of a [PySparkJob](https://cloud.google.com/dataproc/docs/reference/rest/v1/PySparkJob). | Yes | Dict | - | None |
| job | The payload of a [Dataproc job](https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs). | Yes | Dict | - | None |
## Output
Name | Description | Type
:--- | :---------- | :---
job_id | The ID of the created job. | String
## Cautions & requirements
To use the component, you must:
* Set up a GCP project by following this [guide](https://cloud.google.com/dataproc/docs/guides/setup-project).
* [Create a new cluster](https://cloud.google.com/dataproc/docs/guides/create-cluster).
* The component can authenticate to GCP. Refer to [Authenticating Pipelines to GCP](https://www.kubeflow.org/docs/gke/authentication-pipelines/) for details.
* Grant the Kubeflow user service account the role `roles/dataproc.editor` on the project.
## Detailed description
This component creates a PySpark job from the [Dataproc submit job REST API](https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs/submit).
Follow these steps to use the component in a pipeline:
1. Install the Kubeflow pipeline's SDK:
```python
%%capture --no-stderr
KFP_PACKAGE = 'https://storage.googleapis.com/ml-pipeline/release/0.1.14/kfp.tar.gz'
!pip3 install $KFP_PACKAGE --upgrade
```
2. Load the Kubeflow pipeline's SDK:
```python
import kfp.components as comp
dataproc_submit_pyspark_job_op = comp.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/01a23ae8672d3b18e88adf3036071496aca3552d/components/gcp/dataproc/submit_pyspark_job/component.yaml')
help(dataproc_submit_pyspark_job_op)
```
### Sample
The following sample code works in an IPython notebook or directly in Python code. See the sample code below to learn how to execute the template.
#### Setup a Dataproc cluster
[Create a new Dataproc cluster](https://cloud.google.com/dataproc/docs/guides/create-cluster) (or reuse an existing one) before running the sample code.
#### Prepare a PySpark job
Upload your PySpark code file to a Cloud Storage bucket. For example, this is a publicly accessible `hello-world.py` in Cloud Storage:
```python
!gsutil cat gs://dataproc-examples-2f10d78d114f6aaec76462e3c310f31f/src/pyspark/hello-world/hello-world.py
```
#### Set sample parameters
```python
PROJECT_ID = '<Put your project ID here>'
CLUSTER_NAME = '<Put your existing cluster name here>'
REGION = 'us-central1'
PYSPARK_FILE_URI = 'gs://dataproc-examples-2f10d78d114f6aaec76462e3c310f31f/src/pyspark/hello-world/hello-world.py'
ARGS = ''
EXPERIMENT_NAME = 'Dataproc - Submit PySpark Job'
```
#### Example pipeline that uses the component
```python
import kfp.dsl as dsl
import json
@dsl.pipeline(
name='Dataproc submit PySpark job pipeline',
description='Dataproc submit PySpark job pipeline'
)
def dataproc_submit_pyspark_job_pipeline(
project_id = PROJECT_ID,
region = REGION,
cluster_name = CLUSTER_NAME,
main_python_file_uri = PYSPARK_FILE_URI,
args = ARGS,
pyspark_job='{}',
job='{}',
wait_interval='30'
):
dataproc_submit_pyspark_job_op(
project_id=project_id,
region=region,
cluster_name=cluster_name,
main_python_file_uri=main_python_file_uri,
args=args,
pyspark_job=pyspark_job,
job=job,
wait_interval=wait_interval)
```
#### Compile the pipeline
```python
pipeline_func = dataproc_submit_pyspark_job_pipeline
pipeline_filename = pipeline_func.__name__ + '.zip'
import kfp.compiler as compiler
compiler.Compiler().compile(pipeline_func, pipeline_filename)
```
#### Submit the pipeline for execution
```python
#Specify values for the pipeline's arguments
arguments = {}
#Get or create an experiment
import kfp
client = kfp.Client()
experiment = client.create_experiment(EXPERIMENT_NAME)
#Submit a pipeline run
run_name = pipeline_func.__name__ + ' run'
run_result = client.run_pipeline(experiment.id, run_name, pipeline_filename, arguments)
```
## References
* [Create a new Dataproc cluster](https://cloud.google.com/dataproc/docs/guides/create-cluster)
* [PySparkJob](https://cloud.google.com/dataproc/docs/reference/rest/v1/PySparkJob)
* [Dataproc job](https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs)
## License
By deploying or using this software you agree to comply with the [AI Hub Terms of Service](https://aihub.cloud.google.com/u/0/aihub-tos) and the [Google APIs Terms of Service](https://developers.google.com/terms/). To the extent of a direct conflict of terms, the AI Hub Terms of Service will control.
| 8,339 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/gcp/dataproc | kubeflow_public_repos/kfp-tekton-backend/components/gcp/dataproc/submit_pyspark_job/sample.ipynb | import kfp.components as comp
dataproc_submit_pyspark_job_op = comp.load_component_from_url(
'https://raw.githubusercontent.com/kubeflow/pipelines/01a23ae8672d3b18e88adf3036071496aca3552d/components/gcp/dataproc/submit_pyspark_job/component.yaml')
help(dataproc_submit_pyspark_job_op)PROJECT_ID = '<Please put your project ID here>'
CLUSTER_NAME = '<Please put your existing cluster name here>'
REGION = 'us-central1'
PYSPARK_FILE_URI = 'gs://dataproc-examples-2f10d78d114f6aaec76462e3c310f31f/src/pyspark/hello-world/hello-world.py'
ARGS = ''
EXPERIMENT_NAME = 'Dataproc - Submit PySpark Job'import kfp.dsl as dsl
import json
@dsl.pipeline(
name='Dataproc submit PySpark job pipeline',
description='Dataproc submit PySpark job pipeline'
)
def dataproc_submit_pyspark_job_pipeline(
project_id = PROJECT_ID,
region = REGION,
cluster_name = CLUSTER_NAME,
main_python_file_uri = PYSPARK_FILE_URI,
args = ARGS,
pyspark_job='{}',
job='{}',
wait_interval='30'
):
dataproc_submit_pyspark_job_op(
project_id=project_id,
region=region,
cluster_name=cluster_name,
main_python_file_uri=main_python_file_uri,
args=args,
pyspark_job=pyspark_job,
job=job,
wait_interval=wait_interval)
pipeline_func = dataproc_submit_pyspark_job_pipeline
pipeline_filename = pipeline_func.__name__ + '.zip'
import kfp.compiler as compiler
compiler.Compiler().compile(pipeline_func, pipeline_filename)#Specify pipeline argument values
arguments = {}
#Get or create an experiment and submit a pipeline run
import kfp
client = kfp.Client()
experiment = client.create_experiment(EXPERIMENT_NAME)
#Submit a pipeline run
run_name = pipeline_func.__name__ + ' run'
run_result = client.run_pipeline(experiment.id, run_name, pipeline_filename, arguments) | 8,340 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/gcp/dataproc | kubeflow_public_repos/kfp-tekton-backend/components/gcp/dataproc/submit_pyspark_job/component.yaml | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
name: dataproc_submit_pyspark_job
description: >-
Submits a Cloud Dataproc job for running Apache PySpark applications on YARN.
metadata:
labels:
add-pod-env: 'true'
inputs:
- name: project_id
description: >-
Required. The ID of the Google Cloud Platform project that the cluster
belongs to.
type: GCPProjectID
- name: region
description: >-
Required. The Cloud Dataproc region in which to handle the request.
type: GCPRegion
- name: cluster_name
description: 'Required. The cluster to run the job.'
type: String
- name: main_python_file_uri
description: >-
Required. The HCFS URI of the main Python file to
use as the driver. Must be a .py file.
type: GCSPath
- name: args
default: ''
description: >-
Optional. The arguments to pass to the driver. Do not include
arguments, such as --conf, that can be set as job properties, since a
collision may occur that causes an incorrect job submission.
type: List
- name: pyspark_job
default: ''
description: >-
Optional. The full payload of a
[PySparkJob](https://cloud.google.com/dataproc/docs/reference/rest/v1/PySparkJob).
type: Dict
- name: job
default: ''
description: >-
Optional. The full payload of a
[Dataproc job](https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs).
type: Dict
- name: wait_interval
default: '30'
description: >-
Optional. The wait seconds between polling the operation.
Defaults to 30.
type: Integer
outputs:
- name: job_id
description: 'The ID of the created job.'
type: String
- name: MLPipeline UI metadata
type: UI metadata
implementation:
container:
image: gcr.io/ml-pipeline/ml-pipeline-gcp:ad9bd5648dd0453005225779f25d8cebebc7ca00
args: [
--ui_metadata_path, {outputPath: MLPipeline UI metadata},
kfp_component.google.dataproc, submit_pyspark_job,
--project_id, {inputValue: project_id},
--region, {inputValue: region},
--cluster_name, {inputValue: cluster_name},
--main_python_file_uri, {inputValue: main_python_file_uri},
--args, {inputValue: args},
--pyspark_job, {inputValue: pyspark_job},
--job, {inputValue: job},
--wait_interval, {inputValue: wait_interval},
]
env:
KFP_POD_NAME: "{{pod.name}}"
fileOutputs:
job_id: /tmp/kfp/output/dataproc/job_id.txt
| 8,341 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/gcp/dataproc | kubeflow_public_repos/kfp-tekton-backend/components/gcp/dataproc/submit_sparksql_job/README.md |
# Name
Component: Data preparation using SparkSQL on YARN with Cloud Dataproc
# Label
Cloud Dataproc, YARN, SparkSQL, Kubeflow
# Summary
A Kubeflow pipeline component to prepare data by submitting a SparkSql job on YARN to Cloud Dataproc.
# Facets
<!--Make sure the asset has data for the following facets:
Use case
Technique
Input data type
ML workflow
The data must map to the acceptable values for these facets, as documented on the “taxonomy” sheet of go/aihub-facets
https://gitlab.aihub-content-external.com/aihubbot/kfp-components/commit/fe387ab46181b5d4c7425dcb8032cb43e70411c1
--->
Use case:
Technique:
Input data type:
ML workflow:
# Details
## Intended use
Use the component to run an Apache SparkSql job as one preprocessing step in a Kubeflow pipeline.
## Runtime arguments
Argument| Description | Optional | Data type| Accepted values| Default |
:--- | :---------- | :--- | :------- | :------ | :------
project_id | The ID of the Google Cloud Platform (GCP) project that the cluster belongs to. | No| GCPProjectID | - | -|
region | The Cloud Dataproc region to handle the request. | No | GCPRegion|-|-
cluster_name | The name of the cluster to run the job. | No | String| -| -|
queries | The queries to execute the SparkSQL job. Specify multiple queries in one string by separating them with semicolons. You do not need to terminate queries with semicolons. | Yes | List | - | None |
query_file_uri | The Hadoop Compatible Filesystem (HCFS) URI of the script that contains the SparkSQL queries. The SparkSQL queries are listed in a CSV file that is stored in a Cloud Storage bucket.| Yes | GCSPath | - | None |
script_variables | Mapping of the query’s variable names to their values (equivalent to the SparkSQL command: SET name="value";).| Yes| Dict |- | None |
sparksql_job | The payload of a [SparkSql job](https://cloud.google.com/dataproc/docs/reference/rest/v1/SparkSqlJob). | Yes | Dict | - | None |
job | The payload of a [Dataproc job](https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs). | Yes | Dict | - | None |
wait_interval | The number of seconds to pause between polling the operation. | Yes |Integer | - | 30 |
## Output
Name | Description | Type
:--- | :---------- | :---
job_id | The ID of the created job. | String
## Cautions & requirements
To use the component, you must:
* Set up a GCP project by following this [guide](https://cloud.google.com/dataproc/docs/guides/setup-project).
* [Create a new cluster](https://cloud.google.com/dataproc/docs/guides/create-cluster).
* The component can authenticate to GCP. Refer to [Authenticating Pipelines to GCP](https://www.kubeflow.org/docs/gke/authentication-pipelines/) for details.
* Grant the Kubeflow user service account the role, `roles/dataproc.editor`, on the project.
## Detailed Description
This component creates a SparkSql job from the [Dataproc submit job REST API](https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs/submit).
Follow these steps to use the component in a pipeline:
1. Install the Kubeflow pipeline's SDK:
```python
%%capture --no-stderr
KFP_PACKAGE = 'https://storage.googleapis.com/ml-pipeline/release/0.1.14/kfp.tar.gz'
!pip3 install $KFP_PACKAGE --upgrade
```
2. Load the component using the Kubeflow pipeline's SDK:
```python
import kfp.components as comp
dataproc_submit_sparksql_job_op = comp.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/01a23ae8672d3b18e88adf3036071496aca3552d/components/gcp/dataproc/submit_sparksql_job/component.yaml')
help(dataproc_submit_sparksql_job_op)
```
### Sample
The following sample code works in an IPython notebook or directly in Python code. See the sample code below to learn how to execute the template.
#### Setup a Dataproc cluster
[Create a new Dataproc cluster](https://cloud.google.com/dataproc/docs/guides/create-cluster) (or reuse an existing one) before running the sample code.
#### Prepare a SparkSQL job
You can put your SparkSQL queries in the `queries` list, or you can use `query_file_uri`. In this sample, we will use a hard coded query in the `queries` list to select data from a public CSV file in Cloud Storage.
For more details about Spark SQL, see [Spark SQL, DataFrames and Datasets Guide](https://spark.apache.org/docs/latest/sql-programming-guide.html).
#### Set sample parameters
```python
PROJECT_ID = '<Put your project ID here>'
CLUSTER_NAME = '<Put your existing cluster name here>'
REGION = 'us-central1'
QUERY = '''
DROP TABLE IF EXISTS natality_csv;
CREATE EXTERNAL TABLE natality_csv (
source_year BIGINT, year BIGINT, month BIGINT, day BIGINT, wday BIGINT,
state STRING, is_male BOOLEAN, child_race BIGINT, weight_pounds FLOAT,
plurality BIGINT, apgar_1min BIGINT, apgar_5min BIGINT,
mother_residence_state STRING, mother_race BIGINT, mother_age BIGINT,
gestation_weeks BIGINT, lmp STRING, mother_married BOOLEAN,
mother_birth_state STRING, cigarette_use BOOLEAN, cigarettes_per_day BIGINT,
alcohol_use BOOLEAN, drinks_per_week BIGINT, weight_gain_pounds BIGINT,
born_alive_alive BIGINT, born_alive_dead BIGINT, born_dead BIGINT,
ever_born BIGINT, father_race BIGINT, father_age BIGINT,
record_weight BIGINT
)
ROW FORMAT DELIMITED FIELDS TERMINATED BY ','
LOCATION 'gs://public-datasets/natality/csv';
SELECT * FROM natality_csv LIMIT 10;'''
EXPERIMENT_NAME = 'Dataproc - Submit SparkSQL Job'
```
#### Example pipeline that uses the component
```python
import kfp.dsl as dsl
import json
@dsl.pipeline(
name='Dataproc submit SparkSQL job pipeline',
description='Dataproc submit SparkSQL job pipeline'
)
def dataproc_submit_sparksql_job_pipeline(
project_id = PROJECT_ID,
region = REGION,
cluster_name = CLUSTER_NAME,
queries = json.dumps([QUERY]),
query_file_uri = '',
script_variables = '',
sparksql_job='',
job='',
wait_interval='30'
):
dataproc_submit_sparksql_job_op(
project_id=project_id,
region=region,
cluster_name=cluster_name,
queries=queries,
query_file_uri=query_file_uri,
script_variables=script_variables,
sparksql_job=sparksql_job,
job=job,
wait_interval=wait_interval)
```
#### Compile the pipeline
```python
pipeline_func = dataproc_submit_sparksql_job_pipeline
pipeline_filename = pipeline_func.__name__ + '.zip'
import kfp.compiler as compiler
compiler.Compiler().compile(pipeline_func, pipeline_filename)
```
#### Submit the pipeline for execution
```python
#Specify values for the pipeline's arguments
arguments = {}
#Get or create an experiment
import kfp
client = kfp.Client()
experiment = client.create_experiment(EXPERIMENT_NAME)
#Submit a pipeline run
run_name = pipeline_func.__name__ + ' run'
run_result = client.run_pipeline(experiment.id, run_name, pipeline_filename, arguments)
```
## References
* [Spark SQL, DataFrames and Datasets Guide](https://spark.apache.org/docs/latest/sql-programming-guide.html)
* [SparkSqlJob](https://cloud.google.com/dataproc/docs/reference/rest/v1/SparkSqlJob)
* [Cloud Dataproc job](https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs)
## License
By deploying or using this software you agree to comply with the [AI Hub Terms of Service](https://aihub.cloud.google.com/u/0/aihub-tos) and the [Google APIs Terms of Service](https://developers.google.com/terms/). To the extent of a direct conflict of terms, the AI Hub Terms of Service will control.
| 8,342 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/gcp/dataproc | kubeflow_public_repos/kfp-tekton-backend/components/gcp/dataproc/submit_sparksql_job/sample.ipynb | import kfp.components as comp
dataproc_submit_sparksql_job_op = comp.load_component_from_url(
'https://raw.githubusercontent.com/kubeflow/pipelines/01a23ae8672d3b18e88adf3036071496aca3552d/components/gcp/dataproc/submit_sparksql_job/component.yaml')
help(dataproc_submit_sparksql_job_op)PROJECT_ID = '<Please put your project ID here>'
CLUSTER_NAME = '<Please put your existing cluster name here>'
REGION = 'us-central1'
QUERY = '''
DROP TABLE IF EXISTS natality_csv;
CREATE EXTERNAL TABLE natality_csv (
source_year BIGINT, year BIGINT, month BIGINT, day BIGINT, wday BIGINT,
state STRING, is_male BOOLEAN, child_race BIGINT, weight_pounds FLOAT,
plurality BIGINT, apgar_1min BIGINT, apgar_5min BIGINT,
mother_residence_state STRING, mother_race BIGINT, mother_age BIGINT,
gestation_weeks BIGINT, lmp STRING, mother_married BOOLEAN,
mother_birth_state STRING, cigarette_use BOOLEAN, cigarettes_per_day BIGINT,
alcohol_use BOOLEAN, drinks_per_week BIGINT, weight_gain_pounds BIGINT,
born_alive_alive BIGINT, born_alive_dead BIGINT, born_dead BIGINT,
ever_born BIGINT, father_race BIGINT, father_age BIGINT,
record_weight BIGINT
)
ROW FORMAT DELIMITED FIELDS TERMINATED BY ','
LOCATION 'gs://public-datasets/natality/csv';
SELECT * FROM natality_csv LIMIT 10;'''
EXPERIMENT_NAME = 'Dataproc - Submit SparkSQL Job'import kfp.dsl as dsl
import json
@dsl.pipeline(
name='Dataproc submit SparkSQL job pipeline',
description='Dataproc submit SparkSQL job pipeline'
)
def dataproc_submit_sparksql_job_pipeline(
project_id = PROJECT_ID,
region = REGION,
cluster_name = CLUSTER_NAME,
queries = json.dumps([QUERY]),
query_file_uri = '',
script_variables = '',
sparksql_job='',
job='',
wait_interval='30'
):
dataproc_submit_sparksql_job_op(
project_id=project_id,
region=region,
cluster_name=cluster_name,
queries=queries,
query_file_uri=query_file_uri,
script_variables=script_variables,
sparksql_job=sparksql_job,
job=job,
wait_interval=wait_interval)
pipeline_func = dataproc_submit_sparksql_job_pipeline
pipeline_filename = pipeline_func.__name__ + '.zip'
import kfp.compiler as compiler
compiler.Compiler().compile(pipeline_func, pipeline_filename)#Specify pipeline argument values
arguments = {}
#Get or create an experiment and submit a pipeline run
import kfp
client = kfp.Client()
experiment = client.create_experiment(EXPERIMENT_NAME)
#Submit a pipeline run
run_name = pipeline_func.__name__ + ' run'
run_result = client.run_pipeline(experiment.id, run_name, pipeline_filename, arguments) | 8,343 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/gcp/dataproc | kubeflow_public_repos/kfp-tekton-backend/components/gcp/dataproc/submit_sparksql_job/component.yaml | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
name: dataproc_submit_sparksql_job
description: >-
Submits a Cloud Dataproc job for running Apache Spark SQL queries.
metadata:
labels:
add-pod-env: 'true'
inputs:
- name: project_id
description: >-
Required. The ID of the Google Cloud Platform project that the cluster
belongs to.
type: GCPProjectID
- name: region
description: >-
Required. The Cloud Dataproc region in which to handle the request.
type: GCPRegion
- name: cluster_name
description: 'Required. The cluster to run the job.'
type: String
- name: queries
default: ''
description: >-
Required. The queries to execute. You do not need to
terminate a query with a semicolon. Multiple queries can be specified
in one string by separating each with a semicolon.
type: List
- name: query_file_uri
default: ''
description: >-
The HCFS URI of the script that contains SQL queries.
type: GCSPath
- name: script_variables
default: ''
description: >-
Optional. Mapping of query variable names to
values (equivalent to the Spark SQL command: SET name="value";).
type: Dict
- name: sparksql_job
default: ''
description: >-
Optional. The full payload of a
[SparkSqlJob](https://cloud.google.com/dataproc/docs/reference/rest/v1/SparkSqlJob).
type: Dict
- name: job
default: ''
description: >-
Optional. The full payload of a
[Dataproc job](https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs).
type: Dict
- name: wait_interval
default: '30'
description: >-
Optional. The wait seconds between polling the operation.
Defaults to 30.
type: Integer
outputs:
- name: job_id
description: 'The ID of the created job.'
type: String
- name: MLPipeline UI metadata
type: UI metadata
implementation:
container:
image: gcr.io/ml-pipeline/ml-pipeline-gcp:ad9bd5648dd0453005225779f25d8cebebc7ca00
args: [
--ui_metadata_path, {outputPath: MLPipeline UI metadata},
kfp_component.google.dataproc, submit_sparksql_job,
--project_id, {inputValue: project_id},
--region, {inputValue: region},
--cluster_name, {inputValue: cluster_name},
--queries, {inputValue: queries},
--query_file_uri, {inputValue: query_file_uri},
--script_variables, {inputValue: script_variables},
--sparksql_job, {inputValue: sparksql_job},
--job, {inputValue: job},
--wait_interval, {inputValue: wait_interval},
]
env:
KFP_POD_NAME: "{{pod.name}}"
fileOutputs:
job_id: /tmp/kfp/output/dataproc/job_id.txt
| 8,344 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/gcp/dataproc | kubeflow_public_repos/kfp-tekton-backend/components/gcp/dataproc/submit_spark_job/README.md |
# Name
Component: Data preparation using Spark on YARN with Cloud Dataproc
# Labels
Spark, Kubeflow,YARN
# Facets
<!--Make sure the asset has data for the following facets:
Use case
Technique
Input data type
ML workflow
The data must map to the acceptable values for these facets, as documented on the “taxonomy” sheet of go/aihub-facets
https://gitlab.aihub-content-external.com/aihubbot/kfp-components/commit/fe387ab46181b5d4c7425dcb8032cb43e70411c1
--->
Use case:
Other
Technique:
Other
Input data type:
Tabular
ML workflow:
Data preparation
# Summary
A Kubeflow pipeline component to prepare data by submitting a Spark job on YARN to Cloud Dataproc.
# Details
## Intended use
Use the component to run an Apache Spark job as one preprocessing step in a Kubeflow pipeline.
## Runtime arguments
Argument | Description | Optional | Data type | Accepted values | Default |
:--- | :---------- | :--- | :------- | :------| :------|
project_id | The ID of the Google Cloud Platform (GCP) project that the cluster belongs to.|No | GCPProjectID | | |
region | The Cloud Dataproc region to handle the request. | No | GCPRegion | | |
cluster_name | The name of the cluster to run the job. | No | String | | |
main_jar_file_uri | The Hadoop Compatible Filesystem (HCFS) URI of the JAR file that contains the main class. | No | GCSPath | | |
main_class | The name of the driver's main class. The JAR file that contains the class must be either in the default CLASSPATH or specified in `spark_job.jarFileUris`.| No | | | |
args | The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission.| Yes | | | |
spark_job | The payload of a [SparkJob](https://cloud.google.com/dataproc/docs/reference/rest/v1/SparkJob).| Yes | | | |
job | The payload of a [Dataproc job](https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs). | Yes | | | |
wait_interval | The number of seconds to wait between polling the operation. | Yes | | | 30 |
## Output
Name | Description | Type
:--- | :---------- | :---
job_id | The ID of the created job. | String
## Cautions & requirements
To use the component, you must:
* Set up a GCP project by following this [guide](https://cloud.google.com/dataproc/docs/guides/setup-project).
* [Create a new cluster](https://cloud.google.com/dataproc/docs/guides/create-cluster).
* The component can authenticate to GCP. Refer to [Authenticating Pipelines to GCP](https://www.kubeflow.org/docs/gke/authentication-pipelines/) for details.
* Grant the Kubeflow user service account the role `roles/dataproc.editor` on the project.
## Detailed description
This component creates a Spark job from the [Dataproc submit job REST API](https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs/submit).
Follow these steps to use the component in a pipeline:
1. Install the Kubeflow Pipeline's SDK:
```python
%%capture --no-stderr
KFP_PACKAGE = 'https://storage.googleapis.com/ml-pipeline/release/0.1.14/kfp.tar.gz'
!pip3 install $KFP_PACKAGE --upgrade
```
2. Load the component using the Kubeflow Pipeline's SDK
```python
import kfp.components as comp
dataproc_submit_spark_job_op = comp.load_component_from_url(
'https://raw.githubusercontent.com/kubeflow/pipelines/01a23ae8672d3b18e88adf3036071496aca3552d/components/gcp/dataproc/submit_spark_job/component.yaml')
help(dataproc_submit_spark_job_op)
```
### Sample
Note: The following sample code works in an IPython notebook or directly in Python code.
#### Set up a Dataproc cluster
[Create a new Dataproc cluster](https://cloud.google.com/dataproc/docs/guides/create-cluster) (or reuse an existing one) before running the sample code.
#### Prepare a Spark job
Upload your Spark JAR file to a Cloud Storage bucket. In the sample, we use a JAR file that is preinstalled in the main cluster: `file:///usr/lib/spark/examples/jars/spark-examples.jar`.
Here is the [source code of the sample](https://github.com/apache/spark/blob/master/examples/src/main/java/org/apache/spark/examples/JavaSparkPi.java).
To package a self-contained Spark application, follow these [instructions](https://spark.apache.org/docs/latest/quick-start.html#self-contained-applications).
#### Set sample parameters
```python
PROJECT_ID = '<Put your project ID here>'
CLUSTER_NAME = '<Put your existing cluster name here>'
REGION = 'us-central1'
SPARK_FILE_URI = 'file:///usr/lib/spark/examples/jars/spark-examples.jar'
MAIN_CLASS = 'org.apache.spark.examples.SparkPi'
ARGS = ['1000']
EXPERIMENT_NAME = 'Dataproc - Submit Spark Job'
```
#### Example pipeline that uses the component
```python
import kfp.dsl as dsl
import json
@dsl.pipeline(
name='Dataproc submit Spark job pipeline',
description='Dataproc submit Spark job pipeline'
)
def dataproc_submit_spark_job_pipeline(
project_id = PROJECT_ID,
region = REGION,
cluster_name = CLUSTER_NAME,
main_jar_file_uri = '',
main_class = MAIN_CLASS,
args = json.dumps(ARGS),
spark_job=json.dumps({ 'jarFileUris': [ SPARK_FILE_URI ] }),
job='{}',
wait_interval='30'
):
dataproc_submit_spark_job_op(
project_id=project_id,
region=region,
cluster_name=cluster_name,
main_jar_file_uri=main_jar_file_uri,
main_class=main_class,
args=args,
spark_job=spark_job,
job=job,
wait_interval=wait_interval)
```
#### Compile the pipeline
```python
#Compile the pipeline
pipeline_func = dataproc_submit_spark_job_pipeline
pipeline_filename = pipeline_func.__name__ + '.zip'
import kfp.compiler as compiler
compiler.Compiler().compile(pipeline_func, pipeline_filename)
```
#### Submit the pipeline for execution
```python
#Specify values for the pipeline's arguments
arguments = {}
#Get or create an experiment
import kfp
client = kfp.Client()
experiment = client.create_experiment(EXPERIMENT_NAME)
#Submit a pipeline run
run_name = pipeline_func.__name__ + ' run'
run_result = client.run_pipeline(experiment.id, run_name, pipeline_filename, arguments)
```
## References
* [Component Python code](https://github.com/kubeflow/pipelines/blob/master/components/gcp/container/component_sdk/python/kfp_component/google/dataproc/_submit_spark_job.py)
* [Component Docker file](https://github.com/kubeflow/pipelines/blob/master/components/gcp/container/Dockerfile)
* [Sample notebook](https://github.com/kubeflow/pipelines/blob/master/components/gcp/dataproc/submit_spark_job/sample.ipynb)
* [Dataproc SparkJob](https://cloud.google.com/dataproc/docs/reference/rest/v1/SparkJob)
## License
By deploying or using this software you agree to comply with the [AI Hub Terms of Service](https://aihub.cloud.google.com/u/0/aihub-tos) and the [Google APIs Terms of Service](https://developers.google.com/terms/). To the extent of a direct conflict of terms, the AI Hub Terms of Service will control.
| 8,345 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/gcp/dataproc | kubeflow_public_repos/kfp-tekton-backend/components/gcp/dataproc/submit_spark_job/sample.ipynb | import kfp.components as comp
dataproc_submit_spark_job_op = comp.load_component_from_url(
'https://raw.githubusercontent.com/kubeflow/pipelines/01a23ae8672d3b18e88adf3036071496aca3552d/components/gcp/dataproc/submit_spark_job/component.yaml')
help(dataproc_submit_spark_job_op)PROJECT_ID = '<Please put your project ID here>'
CLUSTER_NAME = '<Please put your existing cluster name here>'
REGION = 'us-central1'
SPARK_FILE_URI = 'file:///usr/lib/spark/examples/jars/spark-examples.jar'
MAIN_CLASS = 'org.apache.spark.examples.SparkPi'
ARGS = ['1000']
EXPERIMENT_NAME = 'Dataproc - Submit Spark Job'import kfp.dsl as dsl
import json
@dsl.pipeline(
name='Dataproc submit Spark job pipeline',
description='Dataproc submit Spark job pipeline'
)
def dataproc_submit_spark_job_pipeline(
project_id = PROJECT_ID,
region = REGION,
cluster_name = CLUSTER_NAME,
main_jar_file_uri = '',
main_class = MAIN_CLASS,
args = json.dumps(ARGS),
spark_job=json.dumps({ 'jarFileUris': [ SPARK_FILE_URI ] }),
job='{}',
wait_interval='30'
):
dataproc_submit_spark_job_op(
project_id=project_id,
region=region,
cluster_name=cluster_name,
main_jar_file_uri=main_jar_file_uri,
main_class=main_class,
args=args,
spark_job=spark_job,
job=job,
wait_interval=wait_interval)
pipeline_func = dataproc_submit_spark_job_pipeline
pipeline_filename = pipeline_func.__name__ + '.zip'
import kfp.compiler as compiler
compiler.Compiler().compile(pipeline_func, pipeline_filename)#Specify pipeline argument values
arguments = {}
#Get or create an experiment and submit a pipeline run
import kfp
client = kfp.Client()
experiment = client.create_experiment(EXPERIMENT_NAME)
#Submit a pipeline run
run_name = pipeline_func.__name__ + ' run'
run_result = client.run_pipeline(experiment.id, run_name, pipeline_filename, arguments) | 8,346 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/gcp/dataproc | kubeflow_public_repos/kfp-tekton-backend/components/gcp/dataproc/submit_spark_job/component.yaml | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
name: dataproc_submit_spark_job
description: >-
Submits a Cloud Dataproc job for running Apache Spark applications on YARN.
metadata:
labels:
add-pod-env: 'true'
inputs:
- name: project_id
description: >-
Required. The ID of the Google Cloud Platform project that the cluster
belongs to.
type: GCPProjectID
- name: region
description: >-
Required. The Cloud Dataproc region in which to handle the request.
type: GCPRegion
- name: cluster_name
description: 'Required. The cluster to run the job.'
type: String
- name: main_jar_file_uri
default: ''
description: >-
The HCFS URI of the jar file that contains the main class.
type: GCSPath
- name: main_class
default: ''
description: >-
The name of the driver's main class. The jar file that
contains the class must be in the default CLASSPATH or specified in
jarFileUris.
type: String
- name: args
default: ''
description: >-
Optional. The arguments to pass to the driver. Do not include
arguments, such as --conf, that can be set as job properties, since a
collision may occur that causes an incorrect job submission.
type: List
- name: spark_job
default: ''
description: >-
Optional. The full payload of a
[SparkJob](https://cloud.google.com/dataproc/docs/reference/rest/v1/SparkJob).
type: Dict
- name: job
default: ''
description: >-
Optional. The full payload of a
[Dataproc job](https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs).
type: Dict
- name: wait_interval
default: '30'
description: >-
Optional. The wait seconds between polling the operation.
Defaults to 30.
type: Integer
outputs:
- name: job_id
description: 'The ID of the created job.'
type: String
- name: MLPipeline UI metadata
type: UI metadata
implementation:
container:
image: gcr.io/ml-pipeline/ml-pipeline-gcp:ad9bd5648dd0453005225779f25d8cebebc7ca00
args: [
--ui_metadata_path, {outputPath: MLPipeline UI metadata},
kfp_component.google.dataproc, submit_spark_job,
--project_id, {inputValue: project_id},
--region, {inputValue: region},
--cluster_name, {inputValue: cluster_name},
--main_jar_file_uri, {inputValue: main_jar_file_uri},
--main_class, {inputValue: main_class},
--args, {inputValue: args},
--spark_job, {inputValue: spark_job},
--job, {inputValue: job},
--wait_interval, {inputValue: wait_interval},
]
env:
KFP_POD_NAME: "{{pod.name}}"
fileOutputs:
job_id: /tmp/kfp/output/dataproc/job_id.txt
| 8,347 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/gcp/dataproc | kubeflow_public_repos/kfp-tekton-backend/components/gcp/dataproc/submit_hive_job/README.md |
# Name
Component: Data preparation using Apache Hive on YARN with Cloud Dataproc
# Label
Cloud Dataproc, YARN, Apache Hive
# Summary
A Kubeflow pipeline component to prepare data by submitting an Apache Hive job on YARN to Cloud Dataproc.
# Facets
<!--Make sure the asset has data for the following facets:
Use case
Technique
Input data type
ML workflow
The data must map to the acceptable values for these facets, as documented on the “taxonomy” sheet of go/aihub-facets
https://gitlab.aihub-content-external.com/aihubbot/kfp-components/commit/fe387ab46181b5d4c7425dcb8032cb43e70411c1
--->
Use case:
Technique:
Input data type:
ML workflow:
# Details
## Intended use
Use the component to run an Apache Hive job as one preprocessing step in a Kubeflow pipeline.
## Runtime arguments
| Argument | Description | Optional | Data type | Accepted values | Default |
|----------|-------------|----------|-----------|-----------------|---------|
| project_id | The Google Cloud Platform (GCP) project ID that the cluster belongs to. | No | GCPProjectId | | |
| region | The Cloud Dataproc region to handle the request. | No | GCPRegion | | |
| cluster_name | The name of the cluster to run the job. | No | String | | |
| queries | The queries to execute the Hive job. Specify multiple queries in one string by separating them with semicolons. You do not need to terminate queries with semicolons. | Yes | List | | None |
| query_file_uri | The Hadoop Compatible Filesystem (HCFS) URI of the script that contains the Hive queries. | Yes | GCSPath | | None |
| script_variables | Mapping of the query’s variable names to their values (equivalent to the Hive command: SET name="value";). | Yes | Dict | | None |
| hive_job | The payload of a [Hive job](https://cloud.google.com/dataproc/docs/reference/rest/v1/HiveJob) | Yes | Dict | | None |
| job | The payload of a [Dataproc job](https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs). | Yes | Dict | | None |
| wait_interval | The number of seconds to pause between polling the operation. | Yes | Integer | | 30 |
## Output
Name | Description | Type
:--- | :---------- | :---
job_id | The ID of the created job. | String
## Cautions & requirements
To use the component, you must:
* Set up a GCP project by following this [guide](https://cloud.google.com/dataproc/docs/guides/setup-project).
* [Create a new cluster](https://cloud.google.com/dataproc/docs/guides/create-cluster).
* The component can authenticate to GCP. Refer to [Authenticating Pipelines to GCP](https://www.kubeflow.org/docs/gke/authentication-pipelines/) for details.
* Grant the Kubeflow user service account the role `roles/dataproc.editor` on the project.
## Detailed description
This component creates a Hive job from the [Dataproc submit job REST API](https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs/submit).
Follow these steps to use the component in a pipeline:
1. Install the Kubeflow pipeline's SDK:
```python
%%capture --no-stderr
KFP_PACKAGE = 'https://storage.googleapis.com/ml-pipeline/release/0.1.14/kfp.tar.gz'
!pip3 install $KFP_PACKAGE --upgrade
```
2. Load the component using the Kubeflow pipeline's SDK:
```python
import kfp.components as comp
dataproc_submit_hive_job_op = comp.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/01a23ae8672d3b18e88adf3036071496aca3552d/components/gcp/dataproc/submit_hive_job/component.yaml')
help(dataproc_submit_hive_job_op)
```
### Sample
The following sample code works in an IPython notebook or directly in Python code. See the sample code below to learn how to execute the template.
#### Setup a Dataproc cluster
[Create a new Dataproc cluster](https://cloud.google.com/dataproc/docs/guides/create-cluster) (or reuse an existing one) before running the sample code.
#### Prepare a Hive query
You can put your Hive queries in the `queries` list, or you can use `query_file_uri`. In this sample, we will use a hard coded query in the `queries` list to select data from a public CSV file in Cloud Storage.
For more details, see the [Hive language manual.](https://cwiki.apache.org/confluence/display/Hive/LanguageManual)
#### Set sample parameters
```python
PROJECT_ID = '<Put your project ID here>'
CLUSTER_NAME = '<Put your existing cluster name here>'
REGION = 'us-central1'
QUERY = '''
DROP TABLE IF EXISTS natality_csv;
CREATE EXTERNAL TABLE natality_csv (
source_year BIGINT, year BIGINT, month BIGINT, day BIGINT, wday BIGINT,
state STRING, is_male BOOLEAN, child_race BIGINT, weight_pounds FLOAT,
plurality BIGINT, apgar_1min BIGINT, apgar_5min BIGINT,
mother_residence_state STRING, mother_race BIGINT, mother_age BIGINT,
gestation_weeks BIGINT, lmp STRING, mother_married BOOLEAN,
mother_birth_state STRING, cigarette_use BOOLEAN, cigarettes_per_day BIGINT,
alcohol_use BOOLEAN, drinks_per_week BIGINT, weight_gain_pounds BIGINT,
born_alive_alive BIGINT, born_alive_dead BIGINT, born_dead BIGINT,
ever_born BIGINT, father_race BIGINT, father_age BIGINT,
record_weight BIGINT
)
ROW FORMAT DELIMITED FIELDS TERMINATED BY ','
LOCATION 'gs://public-datasets/natality/csv';
SELECT * FROM natality_csv LIMIT 10;'''
EXPERIMENT_NAME = 'Dataproc - Submit Hive Job'
```
#### Example pipeline that uses the component
```python
import kfp.dsl as dsl
import json
@dsl.pipeline(
name='Dataproc submit Hive job pipeline',
description='Dataproc submit Hive job pipeline'
)
def dataproc_submit_hive_job_pipeline(
project_id = PROJECT_ID,
region = REGION,
cluster_name = CLUSTER_NAME,
queries = json.dumps([QUERY]),
query_file_uri = '',
script_variables = '',
hive_job='',
job='',
wait_interval='30'
):
dataproc_submit_hive_job_op(
project_id=project_id,
region=region,
cluster_name=cluster_name,
queries=queries,
query_file_uri=query_file_uri,
script_variables=script_variables,
hive_job=hive_job,
job=job,
wait_interval=wait_interval)
```
#### Compile the pipeline
```python
pipeline_func = dataproc_submit_hive_job_pipeline
pipeline_filename = pipeline_func.__name__ + '.zip'
import kfp.compiler as compiler
compiler.Compiler().compile(pipeline_func, pipeline_filename)
```
#### Submit the pipeline for execution
```python
#Specify values for the pipeline's arguments
arguments = {}
#Get or create an experiment
import kfp
client = kfp.Client()
experiment = client.create_experiment(EXPERIMENT_NAME)
#Submit a pipeline run
run_name = pipeline_func.__name__ + ' run'
run_result = client.run_pipeline(experiment.id, run_name, pipeline_filename, arguments)
```
## References
* [Component Python code](https://github.com/kubeflow/pipelines/blob/master/components/gcp/container/component_sdk/python/kfp_component/google/dataproc/_submit_hive_job.py)
* [Component Docker file](https://github.com/kubeflow/pipelines/blob/master/components/gcp/container/Dockerfile)
* [Sample notebook](https://github.com/kubeflow/pipelines/blob/master/components/gcp/dataproc/submit_hive_job/sample.ipynb)
* [Dataproc HiveJob](https://cloud.google.com/dataproc/docs/reference/rest/v1/HiveJob)
## License
By deploying or using this software you agree to comply with the [AI Hub Terms of Service](https://aihub.cloud.google.com/u/0/aihub-tos) and the [Google APIs Terms of Service](https://developers.google.com/terms/). To the extent of a direct conflict of terms, the AI Hub Terms of Service will control.
| 8,348 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/gcp/dataproc | kubeflow_public_repos/kfp-tekton-backend/components/gcp/dataproc/submit_hive_job/sample.ipynb | import kfp.components as comp
dataproc_submit_hive_job_op = comp.load_component_from_url(
'https://raw.githubusercontent.com/kubeflow/pipelines/01a23ae8672d3b18e88adf3036071496aca3552d/components/gcp/dataproc/submit_hive_job/component.yaml')
help(dataproc_submit_hive_job_op)PROJECT_ID = '<Please put your project ID here>'
CLUSTER_NAME = '<Please put your existing cluster name here>'
REGION = 'us-central1'
QUERY = '''
DROP TABLE IF EXISTS natality_csv;
CREATE EXTERNAL TABLE natality_csv (
source_year BIGINT, year BIGINT, month BIGINT, day BIGINT, wday BIGINT,
state STRING, is_male BOOLEAN, child_race BIGINT, weight_pounds FLOAT,
plurality BIGINT, apgar_1min BIGINT, apgar_5min BIGINT,
mother_residence_state STRING, mother_race BIGINT, mother_age BIGINT,
gestation_weeks BIGINT, lmp STRING, mother_married BOOLEAN,
mother_birth_state STRING, cigarette_use BOOLEAN, cigarettes_per_day BIGINT,
alcohol_use BOOLEAN, drinks_per_week BIGINT, weight_gain_pounds BIGINT,
born_alive_alive BIGINT, born_alive_dead BIGINT, born_dead BIGINT,
ever_born BIGINT, father_race BIGINT, father_age BIGINT,
record_weight BIGINT
)
ROW FORMAT DELIMITED FIELDS TERMINATED BY ','
LOCATION 'gs://public-datasets/natality/csv';
SELECT * FROM natality_csv LIMIT 10;'''
EXPERIMENT_NAME = 'Dataproc - Submit Hive Job'import kfp.dsl as dsl
import json
@dsl.pipeline(
name='Dataproc submit Hive job pipeline',
description='Dataproc submit Hive job pipeline'
)
def dataproc_submit_hive_job_pipeline(
project_id = PROJECT_ID,
region = REGION,
cluster_name = CLUSTER_NAME,
queries = json.dumps([QUERY]),
query_file_uri = '',
script_variables = '',
hive_job='',
job='',
wait_interval='30'
):
dataproc_submit_hive_job_op(
project_id=project_id,
region=region,
cluster_name=cluster_name,
queries=queries,
query_file_uri=query_file_uri,
script_variables=script_variables,
hive_job=hive_job,
job=job,
wait_interval=wait_interval)
pipeline_func = dataproc_submit_hive_job_pipeline
pipeline_filename = pipeline_func.__name__ + '.zip'
import kfp.compiler as compiler
compiler.Compiler().compile(pipeline_func, pipeline_filename)#Specify pipeline argument values
arguments = {}
#Get or create an experiment and submit a pipeline run
import kfp
client = kfp.Client()
experiment = client.create_experiment(EXPERIMENT_NAME)
#Submit a pipeline run
run_name = pipeline_func.__name__ + ' run'
run_result = client.run_pipeline(experiment.id, run_name, pipeline_filename, arguments) | 8,349 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/gcp/dataproc | kubeflow_public_repos/kfp-tekton-backend/components/gcp/dataproc/submit_hive_job/component.yaml | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
name: dataproc_submit_hive_job
description: >-
Submits a Cloud Dataproc job for running Apache Hive queries on YARN.
metadata:
labels:
add-pod-env: 'true'
inputs:
- name: project_id
description: >-
Required. The ID of the Google Cloud Platform project that the cluster
belongs to.
type: GCPProjectID
- name: region
description: >-
Required. The Cloud Dataproc region in which to handle the request.
type: GCPRegion
- name: cluster_name
description: 'Required. The cluster to run the job.'
type: String
- name: queries
default: ''
description: >-
Required. The queries to execute. You do not need to
terminate a query with a semicolon. Multiple queries can be specified
in one string by separating each with a semicolon.
type: List
- name: query_file_uri
default: ''
description: >-
The HCFS URI of the script that contains Hive queries.
type: GCSPath
- name: script_variables
default: ''
description: >-
Optional. Mapping of query variable names to
values (equivalent to the Hive command: SET name="value";).
type: Dict
- name: hive_job
default: ''
description: >-
Optional. The full payload of a
[HiveJob](https://cloud.google.com/dataproc/docs/reference/rest/v1/HiveJob).
type: Dict
- name: job
default: ''
description: >-
Optional. The full payload of a
[Dataproc job](https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs).
type: Dict
- name: wait_interval
default: '30'
description: >-
Optional. The wait seconds between polling the operation.
Defaults to 30.
type: Integer
outputs:
- name: job_id
description: 'The ID of the created job.'
type: String
- name: MLPipeline UI metadata
type: UI metadata
implementation:
container:
image: gcr.io/ml-pipeline/ml-pipeline-gcp:ad9bd5648dd0453005225779f25d8cebebc7ca00
args: [
--ui_metadata_path, {outputPath: MLPipeline UI metadata},
kfp_component.google.dataproc, submit_hive_job,
--project_id, {inputValue: project_id},
--region, {inputValue: region},
--cluster_name, {inputValue: cluster_name},
--queries, {inputValue: queries},
--query_file_uri, {inputValue: query_file_uri},
--script_variables, {inputValue: script_variables},
--hive_job, {inputValue: hive_job},
--job, {inputValue: job},
--wait_interval, {inputValue: wait_interval},
]
env:
KFP_POD_NAME: "{{pod.name}}"
fileOutputs:
job_id: /tmp/kfp/output/dataproc/job_id.txt
| 8,350 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/gcp/dataproc | kubeflow_public_repos/kfp-tekton-backend/components/gcp/dataproc/delete_cluster/README.md |
# Name
Component: Data preparation by deleting a cluster in Cloud Dataproc
# Label
Cloud Dataproc, Kubeflow
# Summary
A Kubeflow pipeline component to delete a cluster in Cloud Dataproc.
## Intended use
Use this component at the start of a Kubeflow pipeline to delete a temporary Cloud Dataproc cluster when running Cloud Dataproc jobs as steps in the pipeline. This component is usually used with an [exit handler](https://github.com/kubeflow/pipelines/blob/master/samples/core/exit_handler/exit_handler.py) to run at the end of a pipeline.
# Facets
<!--Make sure the asset has data for the following facets:
Use case
Technique
Input data type
ML workflow
The data must map to the acceptable values for these facets, as documented on the “taxonomy” sheet of go/aihub-facets
https://gitlab.aihub-content-external.com/aihubbot/kfp-components/commit/fe387ab46181b5d4c7425dcb8032cb43e70411c1
--->
Use case:
Technique:
Input data type:
ML workflow:
## Runtime arguments
| Argument | Description | Optional | Data type | Accepted values | Default |
|:----------|:-------------|:----------|:-----------|:-----------------|:---------|
| project_id | The Google Cloud Platform (GCP) project ID that the cluster belongs to. | No | GCPProjectID | - | - |
| region | The Cloud Dataproc region in which to handle the request. | No | GCPRegion | - | - |
| name | The name of the cluster to delete. | No | String | - | - |
| wait_interval | The number of seconds to pause between polling the operation. | Yes | Integer | - | 30 |
## Cautions & requirements
To use the component, you must:
* Set up a GCP project by following this [guide](https://cloud.google.com/dataproc/docs/guides/setup-project).
* The component can authenticate to GCP. Refer to [Authenticating Pipelines to GCP](https://www.kubeflow.org/docs/gke/authentication-pipelines/) for details.
* Grant the Kubeflow user service account the role, `roles/dataproc.editor`, on the project.
## Detailed description
This component deletes a Dataproc cluster by using [Dataproc delete cluster REST API](https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.clusters/delete).
Follow these steps to use the component in a pipeline:
1. Install the Kubeflow pipeline's SDK:
```python
%%capture --no-stderr
KFP_PACKAGE = 'https://storage.googleapis.com/ml-pipeline/release/0.1.14/kfp.tar.gz'
!pip3 install $KFP_PACKAGE --upgrade
```
2. Load the component using the Kubeflow pipeline's SDK:
```python
import kfp.components as comp
dataproc_delete_cluster_op = comp.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/01a23ae8672d3b18e88adf3036071496aca3552d/components/gcp/dataproc/delete_cluster/component.yaml')
help(dataproc_delete_cluster_op)
```
### Sample
The following sample code works in an IPython notebook or directly in Python code. See the sample code below to learn how to execute the template.
#### Prerequisites
[Create a Dataproc cluster](https://cloud.google.com/dataproc/docs/guides/create-cluster) before running the sample code.
#### Set sample parameters
```python
PROJECT_ID = '<Put your project ID here>'
CLUSTER_NAME = '<Put your existing cluster name here>'
REGION = 'us-central1'
EXPERIMENT_NAME = 'Dataproc - Delete Cluster'
```
#### Example pipeline that uses the component
```python
import kfp.dsl as dsl
import json
@dsl.pipeline(
name='Dataproc delete cluster pipeline',
description='Dataproc delete cluster pipeline'
)
def dataproc_delete_cluster_pipeline(
project_id = PROJECT_ID,
region = REGION,
name = CLUSTER_NAME
):
dataproc_delete_cluster_op(
project_id=project_id,
region=region,
name=name)
```
#### Compile the pipeline
```python
pipeline_func = dataproc_delete_cluster_pipeline
pipeline_filename = pipeline_func.__name__ + '.zip'
import kfp.compiler as compiler
compiler.Compiler().compile(pipeline_func, pipeline_filename)
```
#### Submit the pipeline for execution
```python
#Specify values for the pipeline's arguments
arguments = {}
#Get or create an experiment
import kfp
client = kfp.Client()
experiment = client.create_experiment(EXPERIMENT_NAME)
#Submit a pipeline run
run_name = pipeline_func.__name__ + ' run'
run_result = client.run_pipeline(experiment.id, run_name, pipeline_filename, arguments)
```
## References
* [Component Python code](https://github.com/kubeflow/pipelines/blob/master/components/gcp/container/component_sdk/python/kfp_component/google/dataproc/_delete_cluster.py)
* [Component Docker file](https://github.com/kubeflow/pipelines/blob/master/components/gcp/container/Dockerfile)
* [Sample notebook](https://github.com/kubeflow/pipelines/blob/master/components/gcp/dataproc/delete_cluster/sample.ipynb)
* [Dataproc delete cluster REST API](https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.clusters/delete)
## License
By deploying or using this software you agree to comply with the [AI Hub Terms of Service](https://aihub.cloud.google.com/u/0/aihub-tos) and the [Google APIs Terms of Service](https://developers.google.com/terms/). To the extent of a direct conflict of terms, the AI Hub Terms of Service will control.
| 8,351 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/gcp/dataproc | kubeflow_public_repos/kfp-tekton-backend/components/gcp/dataproc/delete_cluster/sample.ipynb | import kfp.components as comp
dataproc_delete_cluster_op = comp.load_component_from_url(
'https://raw.githubusercontent.com/kubeflow/pipelines/01a23ae8672d3b18e88adf3036071496aca3552d/components/gcp/dataproc/delete_cluster/component.yaml')
help(dataproc_delete_cluster_op)PROJECT_ID = '<Please put your project ID here>'
CLUSTER_NAME = '<Please put your existing cluster name here>'
REGION = 'us-central1'
EXPERIMENT_NAME = 'Dataproc - Delete Cluster'import kfp.dsl as dsl
import json
@dsl.pipeline(
name='Dataproc delete cluster pipeline',
description='Dataproc delete cluster pipeline'
)
def dataproc_delete_cluster_pipeline(
project_id = PROJECT_ID,
region = REGION,
name = CLUSTER_NAME
):
dataproc_delete_cluster_op(
project_id=project_id,
region=region,
name=name)pipeline_func = dataproc_delete_cluster_pipeline
pipeline_filename = pipeline_func.__name__ + '.zip'
import kfp.compiler as compiler
compiler.Compiler().compile(pipeline_func, pipeline_filename)#Specify pipeline argument values
arguments = {}
#Get or create an experiment and submit a pipeline run
import kfp
client = kfp.Client()
experiment = client.create_experiment(EXPERIMENT_NAME)
#Submit a pipeline run
run_name = pipeline_func.__name__ + ' run'
run_result = client.run_pipeline(experiment.id, run_name, pipeline_filename, arguments) | 8,352 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/gcp/dataproc | kubeflow_public_repos/kfp-tekton-backend/components/gcp/dataproc/delete_cluster/component.yaml | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
name: dataproc_delete_cluster
description: |
Deletes a DataProc cluster.
metadata:
labels:
add-pod-env: 'true'
inputs:
- name: project_id
description: >-
Required. The ID of the Google Cloud Platform project that the cluster belongs to.
type: GCPProjectID
- name: region
description: >-
Required. The Cloud Dataproc region in which to handle the request.
type: GCPRegion
- name: name
description: 'Required. The cluster name to delete.'
type: String
- name: wait_interval
default: '30'
description: 'Optional. The wait seconds between polling the operation. Defaults to 30.'
type: Integer
implementation:
container:
image: gcr.io/ml-pipeline/ml-pipeline-gcp:ad9bd5648dd0453005225779f25d8cebebc7ca00
args: [
kfp_component.google.dataproc, delete_cluster,
--project_id, {inputValue: project_id},
--region, {inputValue: region},
--name, {inputValue: name},
--wait_interval, {inputValue: wait_interval}
]
env:
KFP_POD_NAME: "{{pod.name}}" | 8,353 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/aws | kubeflow_public_repos/kfp-tekton-backend/components/aws/athena/Dockerfile | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM ubuntu:16.04
RUN apt-get update -y && apt-get install --no-install-recommends -y -q ca-certificates python-dev python-setuptools wget unzip
RUN easy_install pip
RUN pip install boto3==1.9.130 pathlib2
COPY query/src/query.py .
ENV PYTHONPATH /app
ENTRYPOINT [ "bash" ]
| 8,354 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/aws/athena | kubeflow_public_repos/kfp-tekton-backend/components/aws/athena/query/component.yaml | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
name: Athena Query
description: |
A Kubeflow Pipeline component to submit a query to Amazon Web Services Athena
service and dump outputs to AWS S3.
inputs:
- {name: region, description: 'The Athena region in which to handle the request.'}
- {name: database, description: 'The name of the database.'}
- {name: query, description: 'The SQL query statements to be executed in Athena.'}
- {name: output_path, description: 'The path to the Amazon S3 location where logs for this cluster are stored.'}
- {name: workgroup, description: 'Optional argument to provide Athena workgroup'}
outputs:
- {name: output_path, description: 'The path to the S3 bucket containing the query output in CSV format.'}
implementation:
container:
image: seedjeffwan/kubeflow-pipeline-aws-athena:20190501
command: ['python', 'query.py']
args: [
--region, {inputValue: region},
--database, {inputValue: database},
--query, {inputValue: query},
--output, {inputValue: output_path},
--workgroup, {inputValue: workgroup}
]
fileOutputs:
output_path: /output.txt
| 8,355 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/aws/athena/query | kubeflow_public_repos/kfp-tekton-backend/components/aws/athena/query/src/query.py | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import logging
import time
import re
import boto3
def get_client(region=None):
"""Builds a client to the AWS Athena API."""
client = boto3.client("athena", region_name=region)
return client
def query(client, query, database, output, workgroup=None):
"""Executes an AWS Athena query."""
params = dict(
QueryString=query,
QueryExecutionContext={"Database": database},
ResultConfiguration={"OutputLocation": output,},
)
if workgroup:
params.update(dict(WorkGroup=workgroup))
response = client.start_query_execution(**params)
execution_id = response["QueryExecutionId"]
logging.info("Execution ID: %s", execution_id)
# Athena query is aync call, we need to fetch results and wait for execution
state = "RUNNING"
max_execution = (
5 # TODO: this should be an optional parameter from users. or use timeout
)
while max_execution > 0 and state in ["RUNNING"]:
max_execution = max_execution - 1
response = client.get_query_execution(QueryExecutionId=execution_id)
if (
"QueryExecution" in response
and "Status" in response["QueryExecution"]
and "State" in response["QueryExecution"]["Status"]
):
state = response["QueryExecution"]["Status"]["State"]
if state == "FAILED":
raise Exception("Athena Query Failed")
elif state == "SUCCEEDED":
s3_path = response["QueryExecution"]["ResultConfiguration"][
"OutputLocation"
]
# could be multiple files?
filename = re.findall(".*\/(.*)", s3_path)[0]
logging.info("S3 output file name %s", filename)
break
time.sleep(5)
# TODO:(@Jeffwan) Add more details.
result = {
"total_bytes_processed": response["QueryExecution"]["Statistics"][
"DataScannedInBytes"
],
"filename": filename,
}
return result
def main():
logging.getLogger().setLevel(logging.INFO)
parser = argparse.ArgumentParser()
parser.add_argument("--region", type=str, help="Athena region.")
parser.add_argument(
"--database", type=str, required=True, help="The name of the database."
)
parser.add_argument(
"--query",
type=str,
required=True,
help="The SQL query statements to be executed in Athena.",
)
parser.add_argument(
"--output",
type=str,
required=False,
help="The location in Amazon S3 where your query results are stored, such as s3://path/to/query/bucket/",
)
parser.add_argument(
"--workgroup",
type=str,
required=False,
help="Optional argument to provide Athena workgroup",
)
args = parser.parse_args()
client = get_client(args.region)
results = query(client, args.query, args.database, args.output, args.workgroup)
results["output"] = args.output
logging.info("Athena results: %s", results)
with open("/output.txt", "w+") as f:
json.dump(results, f)
if __name__ == "__main__":
main()
| 8,356 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/aws | kubeflow_public_repos/kfp-tekton-backend/components/aws/emr/Dockerfile | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM ubuntu:16.04
RUN apt-get update -y && apt-get install --no-install-recommends -y -q ca-certificates python-dev python-setuptools wget unzip
RUN easy_install pip
RUN pip install boto3==1.9.130 pathlib2
COPY create_cluster/src/create_cluster.py .
COPY delete_cluster/src/delete_cluster.py .
COPY submit_pyspark_job/src/submit_pyspark_job.py .
COPY submit_spark_job/src/submit_spark_job.py .
COPY common /app/common/
ENV PYTHONPATH /app
ENTRYPOINT [ "bash" ]
| 8,357 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/aws/emr | kubeflow_public_repos/kfp-tekton-backend/components/aws/emr/common/_utils.py | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import os
import subprocess
import time
import boto3
from botocore.exceptions import ClientError
import json
def get_client(region=None):
"""Builds a client to the AWS EMR API."""
client = boto3.client('emr', region_name=region)
return client
def create_cluster(client, cluster_name, log_s3_uri, release_label, instance_type, instance_count):
"""Create a EMR cluster."""
response = client.run_job_flow(
Name=cluster_name,
LogUri=log_s3_uri,
ReleaseLabel=release_label,
Applications=[
{
'Name': 'Spark'
}
],
BootstrapActions=[
{
'Name': 'Maximize Spark Default Config',
'ScriptBootstrapAction': {
'Path': 's3://support.elasticmapreduce/spark/maximize-spark-default-config',
}
},
],
Instances= {
'MasterInstanceType': instance_type,
'SlaveInstanceType': instance_type,
'InstanceCount': instance_count,
'KeepJobFlowAliveWhenNoSteps':True,
'TerminationProtected':False,
},
VisibleToAllUsers=True,
JobFlowRole='EMR_EC2_DefaultRole',
ServiceRole='EMR_DefaultRole'
)
return response
def delete_cluster(client, jobflow_id):
"""Delete a EMR cluster. Cluster shutdowns in background"""
client.terminate_job_flows(JobFlowIds=[jobflow_id])
def wait_for_cluster(client, jobflow_id):
"""Waiting for a new cluster to be ready."""
while True:
response = client.describe_cluster(ClusterId=jobflow_id)
cluster_status = response['Cluster']['Status']
state = cluster_status['State']
if 'Message' in cluster_status['StateChangeReason']:
state = cluster_status['State']
message = cluster_status['StateChangeReason']['Message']
if state in ['TERMINATED', 'TERMINATED', 'TERMINATED_WITH_ERRORS']:
raise Exception(message)
if state == 'WAITING':
print('EMR cluster create completed')
break
print("Cluster state: {}, wait 15s for cluster to start up.".format(state))
time.sleep(15)
# Check following documentation to add other job type steps. Seems python SDK only have 'HadoopJarStep' here.
# https://docs.aws.amazon.com/cli/latest/reference/emr/add-steps.html
def submit_spark_job(client, jobflow_id, job_name, jar_path, main_class, extra_args):
"""Submits single spark job to a running cluster"""
spark_job = {
'Name': job_name,
'ActionOnFailure': 'CONTINUE',
'HadoopJarStep': {
'Jar': 'command-runner.jar'
}
}
spark_args = ['spark-submit', "--deploy-mode", "cluster"]
if main_class:
spark_args.extend(['--class', main_class])
spark_args.extend([jar_path])
spark_args.extend(extra_args)
spark_job['HadoopJarStep']['Args'] = spark_args
try:
response = client.add_job_flow_steps(
JobFlowId=jobflow_id,
Steps=[spark_job],
)
except ClientError as e:
print(e.response['Error']['Message'])
exit(1)
step_id = response['StepIds'][0]
print("Step Id {} has been submitted".format(step_id))
return step_id
def wait_for_job(client, jobflow_id, step_id):
"""Waiting for a cluster step by polling it."""
while True:
result = client.describe_step(ClusterId=jobflow_id, StepId=step_id)
step_status = result['Step']['Status']
state = step_status['State']
if state in ('CANCELLED', 'FAILED', 'INTERRUPTED'):
err_msg = 'UNKNOWN'
if 'FailureDetails' in step_status:
err_msg = step_status['FailureDetails']
raise Exception(err_msg)
elif state == 'COMPLETED':
print('EMR Step finishes')
break
print("Step state: {}, wait 15s for step status update.".format(state))
time.sleep(10)
def submit_pyspark_job(client, jobflow_id, job_name, py_file, extra_args):
"""Submits single spark job to a running cluster"""
pyspark_args = ['spark-submit', py_file]
pyspark_args.extend(extra_args)
return submit_spark_job(client, jobflow_id, job_name, 'command-runner.jar', '', pyspark_args)
| 8,358 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/aws/emr | kubeflow_public_repos/kfp-tekton-backend/components/aws/emr/create_cluster/component.yaml | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
name: emr_create_cluster
description: |
Creates an Elastic Map Reduce (EMR) cluster in sepcific region.
inputs:
- {name: region, description: 'The EMR region in which to handle the request.'}
- {name: name, description: 'The EMR cluster name. Cluster names within a region must be unique. Names of deleted clusters can be reused'}
- {name: release_label, description: 'The EMR version.', default: 'emr-5.23.0'}
- {name: log_s3_uri, description: 'The path to the Amazon S3 location where logs for this cluster are stored.'}
- {name: instance_type, description: 'The EC2 instance type of master, the core and task nodes.', default: 'm4.xlarge'}
- {name: instance_count, description: 'The number of EC2 instances in the cluster.', default: '3'}
outputs:
- {name: cluster_name, description: 'The cluster name of the created cluster.'}
implementation:
container:
image: seedjeffwan/kubeflow-pipeline-aws-emr:20190507
command: ['python', 'create_cluster.py']
args: [
--region, {inputValue: region},
--name, {inputValue: name},
--release_label, {inputValue: release_label},
--log_s3_uri, {inputValue: log_s3_uri},
--instance_type, {inputValue: instance_type},
--instance_count, {inputValue: instance_count}
]
fileOutputs:
cluster_name: /output.txt | 8,359 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/aws/emr/create_cluster | kubeflow_public_repos/kfp-tekton-backend/components/aws/emr/create_cluster/src/create_cluster.py | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import logging
from pathlib2 import Path
from common import _utils
try:
unicode
except NameError:
unicode = str
def main(argv=None):
parser = argparse.ArgumentParser(description='Create EMR Cluster')
parser.add_argument('--region', type=str, help='EMR Cluster region.')
parser.add_argument('--name', type=str, help='The name of the cluster to create.')
parser.add_argument('--release_label', type=str, default="emr-5.23.0" ,help='The Amazon EMR release label, which determines the version of open-source application packages installed on the cluster.')
parser.add_argument('--log_s3_uri', type=str, help='The path to the Amazon S3 location where logs for this cluster are stored.')
parser.add_argument('--instance_type', type=str, default="m4.xlarge", help='The EC2 instance type of master, the core and task nodes.')
parser.add_argument('--instance_count', type=int, default=3, help='The number of EC2 instances in the cluster.')
parser.add_argument('--output_location_file', type=str, help='File path where the program will write the Amazon S3 URI of the transform job results.')
args = parser.parse_args()
logging.getLogger().setLevel(logging.INFO)
client = _utils.get_client(args.region)
logging.info('Creating cluster...')
create_response = _utils.create_cluster(client, args.name, args.log_s3_uri, args.release_label, args.instance_type, args.instance_count)
logging.info('Cluster creation request submitted. Waiting for completion...')
_utils.wait_for_cluster(client, create_response['JobFlowId'])
Path('/output.txt').write_text(unicode(create_response['JobFlowId']))
logging.info('Cluster created.')
if __name__== "__main__":
main()
| 8,360 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/aws/emr | kubeflow_public_repos/kfp-tekton-backend/components/aws/emr/submit_pyspark_job/component.yaml | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
name: emr_submit_pyspark_job
description: >-
Submits an Elastic Map Reduce (EMR) PySpark application.
inputs:
- {name: region, description: 'The EMR region in which to handle the request.'}
- {name: jobflow_id, description: 'The cluster id to run the job.'}
- {name: job_name, description: 'The name of the spark job.'}
- {name: py_file, description: 'A path to a pyspark file run during the step.'}
- {name: input, description: 'File path of the dataset.'}
- {name: output, description: 'Output path of the result files.'}
outputs:
- {name: job_id, description: 'The id of the created job.'}
- {name: output_location, description: 'S3 URI of the training job results.'}
implementation:
container:
image: seedjeffwan/kubeflow-pipeline-aws-emr:20190507
command: ['python', 'submit_pyspark_job.py']
args: [
--region, {inputValue: region},
--jobflow_id, {inputValue: jobflow_id},
--job_name, {inputValue: job_name},
--py_file, {inputValue: py_file},
--input, {inputValue: input},
--output, {inputValue: output},
--output_file, {outputPath: output_location},
]
fileOutputs:
job_id: /output.txt | 8,361 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/aws/emr/submit_pyspark_job | kubeflow_public_repos/kfp-tekton-backend/components/aws/emr/submit_pyspark_job/src/submit_pyspark_job.py | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# A program to perform training through a EMR cluster.
# Usage:
# python train.py \
# --region us-west-2 \
# --jobflow_id j-xsdsadsadsa \
# --job_name traing_job \
# --jar_path s3://kubeflow-pipeline/jars/py_workcount.py\
# --main_class org.apache.spark.examples.JavaWordCount \
# --input s3://kubeflow-pipeline/datasets/words.txt \
# --output s3://kubeflow-pipeline/datasets/output/ \
import argparse
import logging
import random
from datetime import datetime
from pathlib2 import Path
from common import _utils
try:
unicode
except NameError:
unicode = str
def main(argv=None):
parser = argparse.ArgumentParser(description='Submit PySpark Job')
parser.add_argument('--region', type=str, help='The region where the cluster launches.')
parser.add_argument('--jobflow_id', type=str, help='The name of the cluster to run job.')
parser.add_argument('--job_name', type=str, help='The name of spark job.')
parser.add_argument('--py_file', type=str, help='A path to a pyspark file run during the step')
parser.add_argument('--input', type=str, help='File path of the dataset.')
parser.add_argument('--output', type=str, help='Output path of the result files.')
parser.add_argument('--output_file', type=str, help='S3 URI of the training job results.')
args = parser.parse_args()
logging.getLogger().setLevel(logging.INFO)
client = _utils.get_client(args.region)
logging.info('Submitting job to %s...', args.jobflow_id)
spark_args = [args.input, args.output]
step_id = _utils.submit_pyspark_job(
client, args.jobflow_id, args.job_name, args.py_file, spark_args)
logging.info('Job request submitted. Waiting for completion...')
_utils.wait_for_job(client, args.jobflow_id, step_id)
Path('/output.txt').write_text(unicode(args.step_id))
Path(args.output_file).parent.mkdir(parents=True, exist_ok=True)
Path(args.output_file).write_text(unicode(args.output))
logging.info('Job completed.')
if __name__== "__main__":
main()
| 8,362 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/aws/emr | kubeflow_public_repos/kfp-tekton-backend/components/aws/emr/submit_spark_job/component.yaml | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
name: emr_submit_spark_job
description: >-
Submits an Elastic Map Reduce (EMR) Spark applications.
inputs:
- {name: region, description: 'The EMR region in which to handle the request.'}
- {name: jobflow_id, description: 'The cluster id to run the job.'}
- {name: job_name, description: The name of the spark job.}
- {name: jar_path, description: 'A path to a JAR file run during the step.'}
- {name: main_class, default: '', description: 'The name of the main class in the specified Java file. If not specified, the JAR file should specify a Main-Class in its manifest file.'}
- {name: input, description: 'File path of the dataset.'}
- {name: output, description: 'Output path of the result files.'}
outputs:
- {name: job_id, description: 'The id of the created EMR step.'}
- {name: output_location, description: 'S3 URI of the training job results.'}
implementation:
container:
image: seedjeffwan/kubeflow-pipeline-aws-emr:20190507
command: ['python', 'submit_spark_job.py']
args: [
--region, {inputValue: region},
--jobflow_id, {inputValue: jobflow_id},
--job_name, {inputValue: job_name},
--jar_path, {inputValue: jar_path},
--main_class, {inputValue: main_class},
--input, {inputValue: input},
--output, {inputValue: output},
--output_file, {outputPath: output_location},
]
fileOutputs:
job_id: /output.txt | 8,363 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/aws/emr/submit_spark_job | kubeflow_public_repos/kfp-tekton-backend/components/aws/emr/submit_spark_job/src/submit_spark_job.py | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# A program to perform training through a EMR cluster.
# Usage:
# python train.py \
# --region us-west-2 \
# --jobflow_id j-xsdsadsadsa \
# --job_name traing_job \
# --jar_path s3://kubeflow-pipeline/jars/spark-examples_2.11-2.4.1.jar \
# --main_class org.apache.spark.examples.JavaWordCount \
# --input s3://kubeflow-pipeline/datasets/words.txt \
# --output '' \
import argparse
import logging
import random
from datetime import datetime
from pathlib2 import Path
from common import _utils
try:
unicode
except NameError:
unicode = str
def main(argv=None):
parser = argparse.ArgumentParser(description='Submit Spark Job')
parser.add_argument('--region', type=str, help='The region where the cluster launches.')
parser.add_argument('--jobflow_id', type=str, help='The name of the cluster to run job.')
parser.add_argument('--job_name', type=str, help='The name of spark job.')
parser.add_argument('--jar_path', type=str, help='A path to a JAR file run during the step')
parser.add_argument('--main_class', type=str, default=None,
help='The name of the main class in the specified Java file. If not specified, the JAR file should specify a Main-Class in its manifest file.')
parser.add_argument('--input', type=str, help='File path of the dataset.')
parser.add_argument('--output', type=str, help='Output path of the result files')
parser.add_argument('--output_file', type=str, help='S3 URI of the training job results.')
args = parser.parse_args()
logging.getLogger().setLevel(logging.INFO)
client = _utils.get_client(args.region)
logging.info('Submitting job...')
spark_args = [args.input, args.output]
step_id = _utils.submit_spark_job(
client, args.jobflow_id, args.job_name, args.jar_path, args.main_class, spark_args)
logging.info('Job request submitted. Waiting for completion...')
_utils.wait_for_job(client, args.jobflow_id, step_id)
Path('/output.txt').write_text(unicode(args.step_id))
Path(args.output_file).parent.mkdir(parents=True, exist_ok=True)
Path(args.output_file).write_text(unicode(args.output))
logging.info('Job completed.')
if __name__== "__main__":
main()
| 8,364 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/aws/emr | kubeflow_public_repos/kfp-tekton-backend/components/aws/emr/delete_cluster/component.yaml | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
name: emr_delete_cluster
description: |
Deletes an Elastic Map Reduce (EMR) cluster.
inputs:
- {name: region, description: 'The EMR region in which to handle the request.'}
- {name: jobflow_id, description: 'The cluster id to delete.'}
- {name: dependent, description: 'Dependent to defer EMR cluster termination. This is only used to generate DAG.'}
implementation:
container:
image: seedjeffwan/kubeflow-pipeline-aws-emr:20190507
command: ['python', 'delete_cluster.py']
args: [
--region, {inputValue: region},
--jobflow_id, {inputValue: jobflow_id},
--job_id, {inputValue: dependent}
] | 8,365 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/aws/emr/delete_cluster | kubeflow_public_repos/kfp-tekton-backend/components/aws/emr/delete_cluster/src/delete_cluster.py | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import logging
from common import _utils
def main(argv=None):
parser = argparse.ArgumentParser(description='Shutdown EMR cluster')
parser.add_argument('--region', type=str, help='The region where the cluster launches.')
parser.add_argument('--jobflow_id', type=str, help='Job flows to be shutdown.')
parser.add_argument('--job_id', type=str, help='Job id before cluster termination.')
args = parser.parse_args()
logging.getLogger().setLevel(logging.INFO)
client = _utils.get_client(args.region)
logging.info('Tearing down cluster...')
_utils.delete_cluster(client, args.jobflow_id)
logging.info('Cluster deletion request submitted. Cluster will be shut down in the background')
if __name__== "__main__":
main()
| 8,366 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/aws | kubeflow_public_repos/kfp-tekton-backend/components/aws/sagemaker/LICENSE.txt |
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
| 8,367 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/aws | kubeflow_public_repos/kfp-tekton-backend/components/aws/sagemaker/THIRD-PARTY-LICENSES.txt | ** Amazon SageMaker Components for Kubeflow Pipelines; version 0.3.1 --
https://github.com/kubeflow/pipelines/tree/master/components/aws/sagemaker
Copyright 2019-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
** boto3; version 1.12.33 -- https://github.com/boto/boto3/
Copyright 2013-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
** botocore; version 1.15.33 -- https://github.com/boto/botocore
Botocore
Copyright 2012-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
** Importlib-metadata; version 1.6.0 --
https://importlib-metadata.readthedocs.io/en/latest/
© Copyright 2017-2019, Jason R. Coombs, Barry Warsaw
** s3transfer; version 0.3.3 -- https://github.com/boto/s3transfer/
s3transfer
Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
** sagemaker; version 1.54.0 -- https://aws.amazon.com/sagemaker/
Amazon SageMaker Python SDK
Copyright 2017-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
** smdebug-rulesconfig; version 0.1.2 --
https://github.com/awslabs/sagemaker-debugger-rulesconfig
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND
DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction, and
distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by the
copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all other
entities that control, are controlled by, or are under common control
with that entity. For the purposes of this definition, "control" means
(i) the power, direct or indirect, to cause the direction or management
of such entity, whether by contract or otherwise, or (ii) ownership of
fifty percent (50%) or more of the outstanding shares, or (iii)
beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity exercising
permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation source,
and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but not limited
to compiled object code, generated documentation, and conversions to
other media types.
"Work" shall mean the work of authorship, whether in Source or Object
form, made available under the License, as indicated by a copyright
notice that is included in or attached to the work (an example is
provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object form,
that is based on (or derived from) the Work and for which the editorial
revisions, annotations, elaborations, or other modifications represent,
as a whole, an original work of authorship. For the purposes of this
License, Derivative Works shall not include works that remain separable
from, or merely link (or bind by name) to the interfaces of, the Work and
Derivative Works thereof.
"Contribution" shall mean any work of authorship, including the original
version of the Work and any modifications or additions to that Work or
Derivative Works thereof, that is intentionally submitted to Licensor for
inclusion in the Work by the copyright owner or by an individual or Legal
Entity authorized to submit on behalf of the copyright owner. For the
purposes of this definition, "submitted" means any form of electronic,
verbal, or written communication sent to the Licensor or its
representatives, including but not limited to communication on electronic
mailing lists, source code control systems, and issue tracking systems
that are managed by, or on behalf of, the Licensor for the purpose of
discussing and improving the Work, but excluding communication that is
conspicuously marked or otherwise designated in writing by the copyright
owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity on
behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of this
License, each Contributor hereby grants to You a perpetual, worldwide,
non-exclusive, no-charge, royalty-free, irrevocable copyright license to
reproduce, prepare Derivative Works of, publicly display, publicly perform,
sublicense, and distribute the Work and such Derivative Works in Source or
Object form.
3. Grant of Patent License. Subject to the terms and conditions of this
License, each Contributor hereby grants to You a perpetual, worldwide,
non-exclusive, no-charge, royalty-free, irrevocable (except as stated in
this section) patent license to make, have made, use, offer to sell, sell,
import, and otherwise transfer the Work, where such license applies only to
those patent claims licensable by such Contributor that are necessarily
infringed by their Contribution(s) alone or by combination of their
Contribution(s) with the Work to which such Contribution(s) was submitted.
If You institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work or a
Contribution incorporated within the Work constitutes direct or contributory
patent infringement, then any patent licenses granted to You under this
License for that Work shall terminate as of the date such litigation is
filed.
4. Redistribution. You may reproduce and distribute copies of the Work or
Derivative Works thereof in any medium, with or without modifications, and
in Source or Object form, provided that You meet the following conditions:
(a) You must give any other recipients of the Work or Derivative Works a
copy of this License; and
(b) You must cause any modified files to carry prominent notices stating
that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works that You
distribute, all copyright, patent, trademark, and attribution notices
from the Source form of the Work, excluding those notices that do not
pertain to any part of the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must include
a readable copy of the attribution notices contained within such NOTICE
file, excluding those notices that do not pertain to any part of the
Derivative Works, in at least one of the following places: within a
NOTICE text file distributed as part of the Derivative Works; within the
Source form or documentation, if provided along with the Derivative
Works; or, within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents of the
NOTICE file are for informational purposes only and do not modify the
License. You may add Your own attribution notices within Derivative Works
that You distribute, alongside or as an addendum to the NOTICE text from
the Work, provided that such additional attribution notices cannot be
construed as modifying the License.
You may add Your own copyright statement to Your modifications and may
provide additional or different license terms and conditions for use,
reproduction, or distribution of Your modifications, or for any such
Derivative Works as a whole, provided Your use, reproduction, and
distribution of the Work otherwise complies with the conditions stated in
this License.
5. Submission of Contributions. Unless You explicitly state otherwise, any
Contribution intentionally submitted for inclusion in the Work by You to the
Licensor shall be under the terms and conditions of this License, without
any additional terms or conditions. Notwithstanding the above, nothing
herein shall supersede or modify the terms of any separate license agreement
you may have executed with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor, except
as required for reasonable and customary use in describing the origin of the
Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or agreed to in
writing, Licensor provides the Work (and each Contributor provides its
Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied, including, without limitation, any
warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or
FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining
the appropriateness of using or redistributing the Work and assume any risks
associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory, whether
in tort (including negligence), contract, or otherwise, unless required by
applicable law (such as deliberate and grossly negligent acts) or agreed to
in writing, shall any Contributor be liable to You for damages, including
any direct, indirect, special, incidental, or consequential damages of any
character arising as a result of this License or out of the use or inability
to use the Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all other
commercial damages or losses), even if such Contributor has been advised of
the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing the Work
or Derivative Works thereof, You may choose to offer, and charge a fee for,
acceptance of support, warranty, indemnity, or other liability obligations
and/or rights consistent with this License. However, in accepting such
obligations, You may act only on Your own behalf and on Your sole
responsibility, not on behalf of any other Contributor, and only if You
agree to indemnify, defend, and hold each Contributor harmless for any
liability incurred by, or claims asserted against, such Contributor by
reason of your accepting any such warranty or additional liability. END OF
TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following boilerplate
notice, with the fields enclosed by brackets "[]" replaced with your own
identifying information. (Don't include the brackets!) The text should be
enclosed in the appropriate comment syntax for the file format. We also
recommend that a file or class name and description of purpose be included on
the same "printed page" as the copyright notice for easier identification
within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
* For Amazon SageMaker Components for Kubeflow Pipelines see also this required
NOTICE:
Copyright 2019-2020 Amazon.com, Inc. or its affiliates. All Rights
Reserved.
* For boto3 see also this required NOTICE:
Copyright 2013-2019 Amazon.com, Inc. or its affiliates. All Rights
Reserved.
* For botocore see also this required NOTICE:
Botocore
Copyright 2012-2017 Amazon.com, Inc. or its affiliates. All Rights
Reserved.
* For Importlib-metadata see also this required NOTICE:
© Copyright 2017-2019, Jason R. Coombs, Barry Warsaw
* For s3transfer see also this required NOTICE:
s3transfer
Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
* For sagemaker see also this required NOTICE:
Amazon SageMaker Python SDK
Copyright 2017-2020 Amazon.com, Inc. or its affiliates. All Rights
Reserved.
* For smdebug-rulesconfig see also this required NOTICE:
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
------
** docutils; version 0.15.2 -- https://docutils.sourceforge.io/
Copyright: David Goodger
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
------
** mock; version 1.3.0 -- https://github.com/testing-cabal/mock
Copyright (c) 2003-2013, Michael Foord & the mock team
All rights reserved.
Copyright (c) 2003-2013, Michael Foord & the mock team
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
------
** scandir; version 1.10.0 -- https://github.com/benhoyt/scandir
Copyright (c) 2012, Ben Hoyt
Copyright (c) 2012, Ben Hoyt
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of Ben Hoyt nor the names of its contributors may be used
to endorse or promote products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
------
** enum34; version 1.1.10 -- https://pypi.python.org/pypi/enum34/1.1.6
Copyright (c) 2013, Ethan Furman.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
Redistributions of source code must retain the above
copyright notice, this list of conditions and the
following disclaimer.
Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials
provided with the distribution.
Neither the name Ethan Furman nor the names of any
contributors may be used to endorse or promote products
derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
------
** protobuf; version 3.11.3 -- https://github.com/protocolbuffers/protobuf
Copyright 2008 Google Inc. All rights reserved.
Copyright 2008 Google Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Code generated by the Protocol Buffer compiler is owned by the owner
of the input file used when generating it. This code is not
standalone and requires a support library to be linked with it. This
support library is itself covered by the above license.
------
** numpy; version 1.16.6 -- https://numpy.org/
Copyright 2020 NumPy developers.
** packaging; version 20.3 -- https://github.com/pypa/packaging
Copyright (c) Donald Stufft and individual contributors.
** python-dateutil; version 2.8.1 -- https://dateutil.readthedocs.io/en/stable/
© Copyright 2019, dateutil
** scipy; version 1.2.3 -- https://www.scipy.org/
Copyright © 2001, 2002 Enthought, Inc.
All rights reserved.
Copyright © 2003-2019 SciPy Developers.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
------
** six; version 1.14.0 -- https://github.com/benjaminp/six
copyright u'2010-2015, Benjamin Peterson
Copyright (c) 2010-2015 Benjamin Peterson
Copyright (c) 2010-2018 Benjamin Peterson
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
------
** pyyaml; version 3.12 -- http://pyyaml.org/
Copyright (c) 2020 Ingy döt Net
Copyright (c) 2006 Kirill Simonov
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
------
** configparser; version 4.0.2 -- https://github.com/jaraco/configparser/
Copyright Jason R. Coombs
** zipp; version 1.2.0 -- https://github.com/jaraco/zipp
Copyright Jason R. Coombs
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to
deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
sell copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
IN THE SOFTWARE.
------
** pathlib2; version 2.3.5 -- https://pypi.org/project/pathlib2/
Copyright (c) 2014-2017 Matthias C. M. Troffaes Copyright (c) 2012-2014 Antoine
Pitrou and contributors
The MIT License (MIT)
Copyright (c) 2014-2017 Matthias C. M. Troffaes Copyright (c) 2012-2014 Antoine
Pitrou and contributors
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
------
** jmespath; version 0.9.5 -- https://pypi.org/project/jmespath/
Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved
Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish, dis-
tribute, sublicense, and/or sell copies of the Software, and to permit
persons to whom the Software is furnished to do so, subject to the fol-
lowing conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
IN THE SOFTWARE.
------
** pyparsing; version 2.4.6 -- https://github.com/pyparsing/pyparsing
Paul McGuire
** wheel; version 0.24.0 -- https://github.com/pypa/wheel
"wheel" copyright (c) 2012-2014 Daniel Holth <[email protected]> and
contributors.
MIT License
Copyright (c) <year> <copyright holders>
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
------
** python-urllib3; version 1.25.8 -- https://urllib3.readthedocs.io/
Copyright 2008-2016 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
This is the MIT license: http://www.opensource.org/licenses/mit-license.php
Copyright 2008-2016 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
Permission is hereby granted, free of charge, to any person obtaining a copy of
this
software and associated documentation files (the "Software"), to deal in the
Software
without restriction, including without limitation the rights to use, copy,
modify, merge,
publish, distribute, sublicense, and/or sell copies of the Software, and to
permit persons
to whom the Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be included in all
copies or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR
PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
BE LIABLE
FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
OR OTHER
DEALINGS IN THE SOFTWARE.
------
** futures; version 3.3.0 -- https://github.com/agronholm/pythonfutures
opyright (c)
2001, 2002, 2003, 2004, 2005, 2006 Python Software Foundation
PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
1. This LICENSE AGREEMENT is between the Python Software Foundation ("PSF"),
and the Individual or Organization ("Licensee") accessing and otherwise
using this software ("Python") in source or binary form and its associated
documentation.
2. Subject to the terms and conditions of this License Agreement, PSF hereby
grants Licensee a nonexclusive, royalty-free, world-wide license to
reproduce, analyze, test, perform and/or display publicly, prepare
derivative works, distribute, and otherwise use Python alone or in any
derivative version, provided, however, that PSF's License Agreement and
PSF's notice of copyright, i.e., "Copyright (c) 2001, 2002, 2003, 2004,
2005, 2006 Python Software Foundation; All Rights Reserved" are retained in
Python alone or in any derivative version prepared by Licensee.
3. In the event Licensee prepares a derivative work that is based on or
incorporates Python or any part thereof, and wants to make the derivative
work available to others as provided herein, then Licensee hereby agrees to
include in any such work a brief summary of the changes made to Python.
4. PSF is making Python available to Licensee on an "AS IS" basis. PSF MAKES
NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED. BY WAY OF EXAMPLE, BUT
NOT LIMITATION, PSF MAKES NO AND DISCLAIMS ANY REPRESENTATION OR WARRANTY OF
MERCHANTABILITY OR FITNESS FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF
PYTHON WILL NOT INFRINGE ANY THIRD PARTY RIGHTS.
5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON FOR ANY
INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS A RESULT OF
MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON, OR ANY DERIVATIVE
THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
6. This License Agreement will automatically terminate upon a material
breach of its terms and conditions.
7. Nothing in this License Agreement shall be deemed to create any
relationship of agency, partnership, or joint venture between PSF and
Licensee. This License Agreement does not grant permission to use PSF
trademarks or trade name in a trademark sense to endorse or promote products
or services of Licensee, or any third party.
8. By copying, installing or otherwise using Python, Licensee agrees to be
bound by the terms and conditions of this License Agreement. BEOPEN.COM
LICENSE AGREEMENT FOR PYTHON 2.0
BEOPEN PYTHON OPEN SOURCE LICENSE AGREEMENT VERSION 1
1. This LICENSE AGREEMENT is between BeOpen.com ("BeOpen"), having an office
at 160 Saratoga Avenue, Santa Clara, CA 95051, and the Individual or
Organization ("Licensee") accessing and otherwise using this software in
source or binary form and its associated documentation ("the Software").
2. Subject to the terms and conditions of this BeOpen Python License
Agreement, BeOpen hereby grants Licensee a non-exclusive, royalty-free,
world-wide license to reproduce, analyze, test, perform and/or display
publicly, prepare derivative works, distribute, and otherwise use the
Software alone or in any derivative version, provided, however, that the
BeOpen Python License is retained in the Software, alone or in any
derivative version prepared by Licensee.
3. BeOpen is making the Software available to Licensee on an "AS IS" basis.
BEOPEN MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED. BY WAY OF
EXAMPLE, BUT NOT LIMITATION, BEOPEN MAKES NO AND DISCLAIMS ANY
REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS FOR ANY PARTICULAR
PURPOSE OR THAT THE USE OF THE SOFTWARE WILL NOT INFRINGE ANY THIRD PARTY
RIGHTS.
4. BEOPEN SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF THE SOFTWARE
FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS A RESULT OF
USING, MODIFYING OR DISTRIBUTING THE SOFTWARE, OR ANY DERIVATIVE THEREOF,
EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
5. This License Agreement will automatically terminate upon a material
breach of its terms and conditions.
6. This License Agreement shall be governed by and interpreted in all
respects by the law of the State of California, excluding conflict of law
provisions. Nothing in this License Agreement shall be deemed to create any
relationship of agency, partnership, or joint venture between BeOpen and
Licensee. This License Agreement does not grant permission to use BeOpen
trademarks or trade names in a trademark sense to endorse or promote
products or services of Licensee, or any third party. As an exception, the
"BeOpen Python" logos available at http://www.pythonlabs.com/logos.html may
be used according to the permissions granted on that web page.
7. By copying, installing or otherwise using the software, Licensee agrees
to be bound by the terms and conditions of this License Agreement. CNRI OPEN
SOURCE LICENSE AGREEMENT (for Python 1.6b1) IMPORTANT: PLEASE READ THE
FOLLOWING AGREEMENT CAREFULLY.
BY CLICKING ON "ACCEPT" WHERE INDICATED BELOW, OR BY COPYING, INSTALLING OR
OTHERWISE USING PYTHON 1.6, beta 1 SOFTWARE, YOU ARE DEEMED TO HAVE AGREED TO
THE TERMS AND CONDITIONS OF THIS LICENSE AGREEMENT.
1. This LICENSE AGREEMENT is between the Corporation for National Research
Initiatives, having an office at 1895 Preston White Drive, Reston, VA 20191
("CNRI"), and the Individual or Organization ("Licensee") accessing and
otherwise using Python 1.6, beta 1 software in source or binary form and its
associated documentation, as released at the www.python.org Internet site on
August 4, 2000 ("Python 1.6b1").
2. Subject to the terms and conditions of this License Agreement, CNRI
hereby grants Licensee a non-exclusive, royalty-free, world-wide license to
reproduce, analyze, test, perform and/or display publicly, prepare
derivative works, distribute, and otherwise use Python 1.6b1 alone or in any
derivative version, provided, however, that CNRIs License Agreement is
retained in Python 1.6b1, alone or in any derivative version prepared by
Licensee.
Alternately, in lieu of CNRIs License Agreement, Licensee may substitute the
following text (omitting the quotes): "Python 1.6, beta 1, is made available
subject to the terms and conditions in CNRIs License Agreement. This
Agreement may be located on the Internet using the following unique,
persistent identifier (known as a handle): 1895.22/1011. This Agreement may
also be obtained from a proxy server on the Internet using the
URL:http://hdl.handle.net/1895.22/1011".
3. In the event Licensee prepares a derivative work that is based on or
incorporates Python 1.6b1 or any part thereof, and wants to make the
derivative work available to the public as provided herein, then Licensee
hereby agrees to indicate in any such work the nature of the modifications
made to Python 1.6b1.
4. CNRI is making Python 1.6b1 available to Licensee on an "AS IS" basis.
CNRI MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED. BY WAY OF
EXAMPLE, BUT NOT LIMITATION, CNRI MAKES NO AND DISCLAIMS ANY REPRESENTATION
OR WARRANTY OF MERCHANTABILITY OR FITNESS FOR ANY PARTICULAR PURPOSE OR THAT
THE USE OF PYTHON 1.6b1 WILL NOT INFRINGE ANY THIRD PARTY RIGHTS.
5. CNRI SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF THE SOFTWARE
FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS A RESULT OF
USING, MODIFYING OR DISTRIBUTING PYTHON 1.6b1, OR ANY DERIVATIVE THEREOF,
EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
6. This License Agreement will automatically terminate upon a material
breach of its terms and conditions.
7. This License Agreement shall be governed by and interpreted in all
respects by the law of the State of Virginia, excluding conflict of law
provisions. Nothing in this License Agreement shall be deemed to create any
relationship of agency, partnership, or joint venture between CNRI and
Licensee. This License Agreement does not grant permission to use CNRI
trademarks or trade name in a trademark sense to endorse or promote products
or services of Licensee, or any third party.
8. By clicking on the "ACCEPT" button where indicated, or by copying,
installing or otherwise using Python 1.6b1, Licensee agrees to be bound by
the terms and conditions of this License Agreement. ACCEPT CWI LICENSE
AGREEMENT FOR PYTHON 0.9.0 THROUGH 1.2
Copyright (c) 1991 - 1995, Stichting Mathematisch Centrum Amsterdam, The
Netherlands. All rights reserved.
Permission to use, copy, modify, and distribute this software and its
documentation for any purpose and without fee is hereby granted, provided that
the above copyright notice appear in all copies and that both that copyright
notice and this permission notice appear in supporting documentation, and that
the name of Stichting Mathematisch Centrum or CWI not be used in advertising or
publicity pertaining to distribution of the software without specific, written
prior permission.
STICHTING MATHEMATISCH CENTRUM DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS
SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN
NO EVENT SHALL STICHTING MATHEMATISCH CENTRUM BE LIABLE FOR ANY SPECIAL,
INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF
THIS SOFTWARE.
------
** contextlib2; version 0.6.0 -- https://contextlib2.readthedocs.io/en/stable/
This is a Jazzband project. By contributing you agree to abide by the
Contributor Code of Conduct and follow the guidelines.
A. HISTORY OF THE SOFTWARE
==========================
contextlib2 is a derivative of the contextlib module distributed by the PSF
as part of the Python standard library. According, it is itself redistributed
under the PSF license (reproduced in full below). As the contextlib module
was added only in Python 2.5, the licenses for earlier Python versions are
not applicable and have not been included.
Python was created in the early 1990s by Guido van Rossum at Stichting
Mathematisch Centrum (CWI, see http://www.cwi.nl) in the Netherlands
as a successor of a language called ABC. Guido remains Python's
principal author, although it includes many contributions from others.
In 1995, Guido continued his work on Python at the Corporation for
National Research Initiatives (CNRI, see http://www.cnri.reston.va.us)
in Reston, Virginia where he released several versions of the
software.
In May 2000, Guido and the Python core development team moved to
BeOpen.com to form the BeOpen PythonLabs team. In October of the same
year, the PythonLabs team moved to Digital Creations (now Zope
Corporation, see http://www.zope.com). In 2001, the Python Software
Foundation (PSF, see http://www.python.org/psf/) was formed, a
non-profit organization created specifically to own Python-related
Intellectual Property. Zope Corporation is a sponsoring member of
the PSF.
All Python releases are Open Source (see http://www.opensource.org for
the Open Source Definition). Historically, most, but not all, Python
releases have also been GPL-compatible; the table below summarizes
the various releases that included the contextlib module.
Release Derived Year Owner GPL-
from compatible? (1)
2.5 2.4 2006 PSF yes
2.5.1 2.5 2007 PSF yes
2.5.2 2.5.1 2008 PSF yes
2.5.3 2.5.2 2008 PSF yes
2.6 2.5 2008 PSF yes
2.6.1 2.6 2008 PSF yes
2.6.2 2.6.1 2009 PSF yes
2.6.3 2.6.2 2009 PSF yes
2.6.4 2.6.3 2009 PSF yes
2.6.5 2.6.4 2010 PSF yes
3.0 2.6 2008 PSF yes
3.0.1 3.0 2009 PSF yes
3.1 3.0.1 2009 PSF yes
3.1.1 3.1 2009 PSF yes
3.1.2 3.1.1 2010 PSF yes
3.1.3 3.1.2 2010 PSF yes
3.1.4 3.1.3 2011 PSF yes
3.2 3.1 2011 PSF yes
3.2.1 3.2 2011 PSF yes
3.2.2 3.2.1 2011 PSF yes
3.3 3.2 2012 PSF yes
Footnotes:
(1) GPL-compatible doesn't mean that we're distributing Python under
the GPL. All Python licenses, unlike the GPL, let you distribute
a modified version without making your changes open source. The
GPL-compatible licenses make it possible to combine Python with
other software that is released under the GPL; the others don't.
Thanks to the many outside volunteers who have worked under Guido's
direction to make these releases possible.
B. TERMS AND CONDITIONS FOR ACCESSING OR OTHERWISE USING PYTHON
===============================================================
PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
--------------------------------------------
1. This LICENSE AGREEMENT is between the Python Software Foundation
("PSF"), and the Individual or Organization ("Licensee") accessing and
otherwise using this software ("Python") in source or binary form and
its associated documentation.
2. Subject to the terms and conditions of this License Agreement, PSF hereby
grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce,
analyze, test, perform and/or display publicly, prepare derivative works,
distribute, and otherwise use Python alone or in any derivative version,
provided, however, that PSF's License Agreement and PSF's notice of copyright,
i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009,
2010,
2011 Python Software Foundation; All Rights Reserved" are retained in Python
alone or in any derivative version prepared by Licensee.
3. In the event Licensee prepares a derivative work that is based on
or incorporates Python or any part thereof, and wants to make
the derivative work available to others as provided herein, then
Licensee hereby agrees to include in any such work a brief summary of
the changes made to Python.
4. PSF is making Python available to Licensee on an "AS IS"
basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND
DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT
INFRINGE ANY THIRD PARTY RIGHTS.
5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON,
OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
6. This License Agreement will automatically terminate upon a material
breach of its terms and conditions.
7. Nothing in this License Agreement shall be deemed to create any
relationship of agency, partnership, or joint venture between PSF and
Licensee. This License Agreement does not grant permission to use PSF
trademarks or trade name in a trademark sense to endorse or promote
products or services of Licensee, or any third party.
8. By copying, installing or otherwise using Python, Licensee
agrees to be bound by the terms and conditions of this License
Agreement.
------
** protobuf3-to-dict; version 0.1.5 --
https://github.com/kaporzhu/protobuf-to-dict
In jurisdictions that recognize copyright laws, the author or authors of this
software dedicate any and all copyright interest in the software to the public
domain.
This is free and unencumbered software released into the public domain
by its author, Ben Hodgson <[email protected]>.
Anyone is free to copy, modify, publish, use, compile, sell, or
distribute this software, either in source code form or as a compiled
binary, for any purpose, commercial or non-commercial, and by any
means.
In jurisdictions that recognise copyright laws, the author or authors
of this software dedicate any and all copyright interest in the
software to the public domain. We make this dedication for the benefit
of the public at large and to the detriment of our heirs and
successors. We intend this dedication to be an overt act of
relinquishment in perpetuity of all present and future rights to this
software under copyright law.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
For more information, please refer to <http://unlicense.org/> | 8,368 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/aws | kubeflow_public_repos/kfp-tekton-backend/components/aws/sagemaker/Changelog.md | # Change log for AWS SageMaker Components
The version of the AWS SageMaker Components is determined by the docker image tag used in YAML spec
Repository: https://hub.docker.com/repository/docker/amazon/aws-sagemaker-kfp-components
---------------------------------------------
**Change log for version 0.3.0**
- Remove data_location parameters from all components
(Use "channes" parameter instead)
> Pull requests : [#3518](https://github.com/kubeflow/pipelines/pull/3518)
**Change log for version 2.0 (Apr 14, 2020)**
- Fix bug in Ground Truth component
- Add user agent header to boto3 client
> Pull requests: [#3474](https://github.com/kubeflow/pipelines/pull/3474), [#3487](https://github.com/kubeflow/pipelines/pull/3487)
---------------------------------------------
## Old
These are the old images which were in https://hub.docker.com/r/redbackthomson/aws-kubeflow-sagemaker/tags
**Change log 20200402**
- Fix for vpc issue
- Add license files
- Use AmazonLinux instead of Ubuntu
- Pin the pip packages
> Pull requests: [#3374](https://github.com/kubeflow/pipelines/pull/3374), [#3397](https://github.com/kubeflow/pipelines/pull/3397)
No change log available for older images
Please check git log
| 8,369 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/aws | kubeflow_public_repos/kfp-tekton-backend/components/aws/sagemaker/Dockerfile | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM amazonlinux:2
ENV PYTHONPATH /app
RUN yum update -y \
&& yum install -y -q \
ca-certificates \
python3 \
wget \
unzip
RUN pip3 install \
boto3==1.12.33 \
sagemaker==1.54.0 \
pathlib2==2.3.5 \
pyyaml==3.12
COPY LICENSE.txt .
COPY NOTICE.txt .
COPY THIRD-PARTY-LICENSES.txt .
COPY hyperparameter_tuning/src/hyperparameter_tuning.py .
COPY train/src/train.py .
COPY deploy/src/deploy.py .
COPY model/src/create_model.py .
COPY batch_transform/src/batch_transform.py .
COPY workteam/src/workteam.py .
COPY ground_truth/src/ground_truth.py .
COPY common /app/common/
ENTRYPOINT [ "bash" ]
| 8,370 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/aws | kubeflow_public_repos/kfp-tekton-backend/components/aws/sagemaker/NOTICE.txt | SageMaker Components for Kubeflow Pipelines
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. | 8,371 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/aws/sagemaker | kubeflow_public_repos/kfp-tekton-backend/components/aws/sagemaker/batch_transform/README.md | # SageMaker Batch Transform Kubeflow Pipeline component
## Summary
Component to get inferences for an entire dataset in SageMaker from a Kubeflow Pipelines workflow.
## Details
With [batch transform](https://docs.aws.amazon.com/sagemaker/latest/dg/how-it-works-batch.html), you create a batch transform job using a trained model and the dataset, which must be stored in Amazon S3. Use batch transform when you:
* Want to get inferences for an entire dataset and index them to serve inferences in real time
* Don't need a persistent endpoint that applications (for example, web or mobile apps) can call to get inferences
* Don't need the subsecond latency that Amazon SageMaker hosted endpoints provide
## Intended Use
Create a transform job in AWS SageMaker.
## Runtime Arguments
Argument | Description | Optional (in pipeline definition) | Optional (in UI) | Data type | Accepted values | Default |
:--- | :---------- | :---------- | :---------- | :----------| :---------- | :----------|
region | The region where the endpoint is created | No | No | String | | |
endpoint_url | The endpoint URL for the private link VPC endpoint | Yes | Yes | String | | |
job_name | The name of the transform job. The name must be unique within an AWS Region in an AWS account | Yes | Yes | String | | is a generated name (combination of model_name and 'BatchTransform' string)|
model_name | The name of the model that you want to use for the transform job. Model name must be the name of an existing Amazon SageMaker model within an AWS Region in an AWS account | No | No | String | | |
max_concurrent | The maximum number of parallel requests that can be sent to each instance in a transform job | Yes | Yes | Integer | | 0 |
max_payload | The maximum allowed size of the payload, in MB | Yes | Yes | Integer | The value in max_payload must be greater than, or equal to, the size of a single record | 6 |
batch_strategy | The number of records to include in a mini-batch for an HTTP inference request | Yes | Yes | String | | |
environment | The environment variables to set in the Docker container | Yes | Yes | Dict | Maximum length of 1024. Key Pattern: `[a-zA-Z_][a-zA-Z0-9_]*`. Value Pattern: `[\S\s]*`. Upto 16 key and values entries in the map | |
The following parameters construct [`TransformInput`](https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_TransformInput.html) object of the CreateTransformJob API. These describe the input source and the way the transform job consumes it.
Argument | Description | Optional (in pipeline definition) | Optional (in UI) | Data type | Accepted values | Default |
:--- | :---------- | :---------- | :---------- | :----------| :---------- | :----------|
input_location | The S3 location of the data source that is associated with a channel. [Read more on S3Uri](https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_TransformS3DataSource.html) | No | No | String | | |
data_type | Used by SageMaker to identify the objects from the S3 bucket to be used for batch transform. [Read more on S3DataType](https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_TransformS3DataSource.html) | Yes | Yes | String | `ManifestFile`, `S3Prefix`, `AugmentedManifestFile`| `S3Prefix` |
content_type | The multipurpose internet mail extension (MIME) type of the data. Amazon SageMaker uses the MIME type with each http call to transfer data to the transform job | Yes | Yes | String | | |
split_type | The method to use to split the transform job data files into smaller batches | Yes | Yes | String | `Line`, `RecordIO`, `TFRecord`, `None` | `None` |
compression_type | If the transform data is compressed, specify the compression type | Yes | Yes | String | `GZip`, `None` | `None` |
* `input_location` and `data_type` parameters above are used to construct [`S3DataSource`](https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_TransformS3DataSource.html) object which is part of [`TransformDataSource`](https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_TransformDataSource.html) object in [`TransformInput`](https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_TransformInput.html) part of the CreateTransformJob API.
```
TransformInput={
'DataSource': {
'S3DataSource': {
'S3DataType': 'ManifestFile'|'S3Prefix'|'AugmentedManifestFile',
'S3Uri': 'string'
}
},
... other input parameters ...
}
```
[Ref](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/sagemaker.html#SageMaker.Client.create_transform_job)
The following parameters are used to construct [`TransformOutput`](https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_TransformOutput.html) object of the CreateTransformJob API. These describe the results of a transform job.
Argument | Description | Optional (in pipeline definition) | Optional (in UI) | Data type | Accepted values | Default |
:--- | :---------- | :---------- | :---------- | :----------| :---------- | :----------|
output_location | The Amazon S3 path where you want Amazon SageMaker to store the results of the transform job | No | No | String | | |
accept | The MIME type used to specify the output data. Amazon SageMaker uses the MIME type with each http call to transfer data from the transform job | Yes | Yes | String | | |
assemble_with | Defines how to assemble the results of the transform job as a single S3 object. To concatenate the results in binary format, specify None. To add a newline character at the end of every transformed record, specify Line | Yes | Yes | String | `Line`, `None` | `None`|
output_encryption_key | The AWS Key Management Service key to encrypt the model artifacts at rest using Amazon S3 server-side encryption | Yes | Yes | String | [KmsKeyId formats](https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_TransformOutput.html) | |
The following parameters are used to construct [`TransformResources`](https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_TransformResources.html) object of the CreateTransformJob API. These describe the resources, including ML instance types and ML instance count, to use for the transform job.
Argument | Description | Optional (in pipeline definition) | Optional (in UI) | Data type | Accepted values | Default |
:--- | :---------- | :---------- | :---------- | :----------| :---------- | :----------|
instance_type | The ML compute instance type for the transform job | Yes | Yes | String | ml.m4.xlarge, ml.m4.2xlarge, ml.m4.4xlarge, ml.m4.10xlarge, ml.m4.16xlarge, ml.m5.large, ml.m5.xlarge, ml.m5.2xlarge, ml.m5.4xlarge, ml.m5.12xlarge, ml.m5.24xlarge, ml.c4.xlarge, ml.c4.2xlarge, ml.c4.4xlarge, ml.c4.8xlarge, ml.p2.xlarge, ml.p2.8xlarge, ml.p2.16xlarge, ml.p3.2xlarge, ml.p3.8xlarge, ml.p3.16xlarge, ml.c5.xlarge, ml.c5.2xlarge, ml.c5.4xlarge, ml.c5.9xlarge, ml.c5.18xlarge | ml.m4.xlarge |
instance_count | The number of ML compute instances to use in the transform job | Yes | Yes | Integer | | 1 |
resource_encryption_key | The AWS Key Management Service (AWS KMS) key used to encrypt model data on the storage volume attached to the ML compute instance(s) that run the batch transform job. | Yes | Yes | String | [VolumeKmsKeyId formats](https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_TransformResources.html) | |
The following parameters are used to construct [`DataProcessing`](https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_DataProcessing.html) object of the CreateTransformJob API. The data structure used to specify the data to be used for inference in a batch transform job and to associate the data that is relevant to the prediction results in the output.
Argument | Description | Optional (in pipeline definition) | Optional (in UI) | Data type | Accepted values | Default |
:--- | :---------- | :---------- | :---------- | :----------| :---------- | :----------|
input_filter | A JSONPath expression used to select a portion of the input data to pass to the algorithm. [ReadMore on InputFilter](https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_DataProcessing.html) | Yes | Yes | String | | |
output_filter | A JSONPath expression used to select a portion of the joined dataset to save in the output file for a batch transform job. [ReadMore on OutputFilter](https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_DataProcessing.html) | Yes | Yes | String | | |
join_source | Specifies the source of the data to join with the transformed data. [ReadMore on JoinSource](https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_DataProcessing.html) | Yes | Yes | String | `Input`, `None` | None |
Notes:
* Please use the links in the [Resources section](#Resources) for detailed information on each input parameter and SageMaker APIs used in this component
## Outputs
Name | Description
:--- | :----------
output_location | The Amazon S3 path where you want Amazon SageMaker to store the results of the transform job
## Requirements
* [Kubeflow pipelines SDK](https://www.kubeflow.org/docs/pipelines/sdk/install-sdk/)
* [Kubeflow set-up](https://www.kubeflow.org/docs/aws/deploy/install-kubeflow/)
## Samples
### Integrated into a pipeline
MNIST Classification pipeline: [Pipeline](https://github.com/kubeflow/pipelines/blob/master/samples/contrib/aws-samples/mnist-kmeans-sagemaker/mnist-classification-pipeline.py) | [Steps](https://github.com/kubeflow/pipelines/blob/master/samples/contrib/aws-samples/mnist-kmeans-sagemaker/README.md)
## Resources
* [Batch Transform on SageMaker](https://docs.aws.amazon.com/sagemaker/latest/dg/how-it-works-batch.html)
* [Create Transform Job API documentation](https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_CreateTransformJob.html)
* [Boto3 API reference](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/sagemaker.html#SageMaker.Client.create_transform_job)
| 8,372 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/aws/sagemaker | kubeflow_public_repos/kfp-tekton-backend/components/aws/sagemaker/batch_transform/component.yaml | name: 'SageMaker - Batch Transformation'
description: |
Batch Transformation Jobs in SageMaker
inputs:
- name: region
description: 'The region where the cluster launches.'
type: String
- name: job_name
description: 'The name of the batch transform job.'
default: ''
type: String
- name: model_name
description: 'The name of the model that you want to use for the transform job.'
type: String
- name: max_concurrent
description: 'The maximum number of parallel requests that can be sent to each instance in a transform job.'
default: '0'
type: Integer
- name: max_payload
description: 'The maximum allowed size of the payload, in MB.'
default: '6'
type: Integer
- name: batch_strategy
description: 'The number of records to include in a mini-batch for an HTTP inference request.'
default: ''
type: String
- name: environment
description: 'The environment variables to set in the Docker container. Up to 16 key-value entries in the map.'
default: '{}'
type: JsonObject
- name: input_location
description: 'The S3 location of the data source that is associated with a channel.'
type: String
- name: data_type
description: 'Data type of the input. Can be ManifestFile, S3Prefix, or AugmentedManifestFile.'
default: 'S3Prefix'
type: String
- name: content_type
description: 'The multipurpose internet mail extension (MIME) type of the data.'
default: ''
type: String
- name: split_type
description: 'The method to use to split the transform job data files into smaller batches.'
default: 'None'
type: String
- name: compression_type
description: 'If the transform data is compressed, the specification of the compression type.'
default: 'None'
type: String
- name: output_location
description: 'The Amazon S3 path where you want Amazon SageMaker to store the results of the transform job.'
type: String
- name: accept
description: 'The MIME type used to specify the output data.'
default: ''
type: String
- name: assemble_with
description: 'Defines how to assemble the results of the transform job as a single S3 object. Either None or Line.'
default: ''
type: String
- name: output_encryption_key
description: 'The AWS Key Management Service ID of the key used to encrypt the output data.'
default: ''
type: String
- name: input_filter
description: 'A JSONPath expression used to select a portion of the input data to pass to the algorithm.'
default: ''
type: String
- name: output_filter
description: 'A JSONPath expression used to select a portion of the joined dataset to save in the output file for a batch transform job.'
default: ''
type: String
- name: join_source
description: 'Specifies the source of the data to join with the transformed data.'
default: 'None'
type: String
- name: instance_type
description: 'The ML compute instance type.'
default: 'ml.m4.xlarge'
type: String
- name: instance_count
description: 'The number of ML compute instances to use in each training job.'
default: '1'
type: Integer
- name: resource_encryption_key
description: 'The AWS KMS key that Amazon SageMaker uses to encrypt data on the storage volume attached to the ML compute instance(s).'
default: ''
type: String
- name: endpoint_url
description: 'The endpoint URL for the private link VPC endpoint.'
default: ''
type: String
- name: tags
description: 'Key-value pairs to categorize AWS resources.'
default: '{}'
type: JsonObject
outputs:
- {name: output_location, description: 'S3 URI of the transform job results.'}
implementation:
container:
image: amazon/aws-sagemaker-kfp-components:0.3.1
command: ['python3']
args: [
batch_transform.py,
--region, {inputValue: region},
--endpoint_url, {inputValue: endpoint_url},
--job_name, {inputValue: job_name},
--model_name, {inputValue: model_name},
--max_concurrent, {inputValue: max_concurrent},
--max_payload, {inputValue: max_payload},
--batch_strategy, {inputValue: batch_strategy},
--environment, {inputValue: environment},
--input_location, {inputValue: input_location},
--data_type, {inputValue: data_type},
--content_type, {inputValue: content_type},
--split_type, {inputValue: split_type},
--compression_type, {inputValue: compression_type},
--output_location, {inputValue: output_location},
--accept, {inputValue: accept},
--assemble_with, {inputValue: assemble_with},
--output_encryption_key, {inputValue: output_encryption_key},
--input_filter, {inputValue: input_filter},
--output_filter, {inputValue: output_filter},
--join_source, {inputValue: join_source},
--instance_type, {inputValue: instance_type},
--instance_count, {inputValue: instance_count},
--resource_encryption_key, {inputValue: resource_encryption_key},
--tags, {inputValue: tags},
--output_location_file, {outputPath: output_location}
]
| 8,373 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/aws/sagemaker/batch_transform | kubeflow_public_repos/kfp-tekton-backend/components/aws/sagemaker/batch_transform/src/batch_transform.py | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import logging
from pathlib2 import Path
from common import _utils
try:
unicode
except NameError:
unicode = str
def create_parser():
parser = argparse.ArgumentParser(description='SageMaker Batch Transformation Job')
_utils.add_default_client_arguments(parser)
parser.add_argument('--job_name', type=str, required=False, help='The name of the transform job.', default='')
parser.add_argument('--model_name', type=str, required=True, help='The name of the model that you want to use for the transform job.')
parser.add_argument('--max_concurrent', type=int, required=False, help='The maximum number of parallel requests that can be sent to each instance in a transform job.', default='0')
parser.add_argument('--max_payload', type=int, required=False, help='The maximum allowed size of the payload, in MB.', default='6')
parser.add_argument('--batch_strategy', choices=['MultiRecord', 'SingleRecord', ''], type=str, required=False, help='The number of records to include in a mini-batch for an HTTP inference request.', default='')
parser.add_argument('--environment', type=_utils.yaml_or_json_str, required=False, help='The dictionary of the environment variables to set in the Docker container. Up to 16 key-value entries in the map.', default={})
parser.add_argument('--input_location', type=str, required=True, help='The S3 location of the data source that is associated with a channel.')
parser.add_argument('--data_type', choices=['ManifestFile', 'S3Prefix', 'AugmentedManifestFile', ''], type=str, required=False, help='Data type of the input. Can be ManifestFile, S3Prefix, or AugmentedManifestFile.', default='S3Prefix')
parser.add_argument('--content_type', type=str, required=False, help='The multipurpose internet mail extension (MIME) type of the data.', default='')
parser.add_argument('--split_type', choices=['None', 'Line', 'RecordIO', 'TFRecord', ''], type=str, required=False, help='The method to use to split the transform job data files into smaller batches.', default='None')
parser.add_argument('--compression_type', choices=['None', 'Gzip', ''], type=str, required=False, help='If the transform data is compressed, the specification of the compression type.', default='None')
parser.add_argument('--output_location', type=str, required=True, help='The Amazon S3 path where you want Amazon SageMaker to store the results of the transform job.')
parser.add_argument('--accept', type=str, required=False, help='The MIME type used to specify the output data.')
parser.add_argument('--assemble_with', choices=['None', 'Line', ''], type=str, required=False, help='Defines how to assemble the results of the transform job as a single S3 object. Either None or Line.')
parser.add_argument('--output_encryption_key', type=str, required=False, help='The AWS KMS key that Amazon SageMaker uses to encrypt the model artifacts.', default='')
parser.add_argument('--input_filter', type=str, required=False, help='A JSONPath expression used to select a portion of the input data to pass to the algorithm.', default='')
parser.add_argument('--output_filter', type=str, required=False, help='A JSONPath expression used to select a portion of the joined dataset to save in the output file for a batch transform job.', default='')
parser.add_argument('--join_source', choices=['None', 'Input', ''], type=str, required=False, help='Specifies the source of the data to join with the transformed data.', default='None')
parser.add_argument('--instance_type', choices=['ml.m4.xlarge', 'ml.m4.2xlarge', 'ml.m4.4xlarge', 'ml.m4.10xlarge', 'ml.m4.16xlarge', 'ml.m5.large', 'ml.m5.xlarge', 'ml.m5.2xlarge', 'ml.m5.4xlarge',
'ml.m5.12xlarge', 'ml.m5.24xlarge', 'ml.c4.xlarge', 'ml.c4.2xlarge', 'ml.c4.4xlarge', 'ml.c4.8xlarge', 'ml.p2.xlarge', 'ml.p2.8xlarge', 'ml.p2.16xlarge', 'ml.p3.2xlarge', 'ml.p3.8xlarge', 'ml.p3.16xlarge',
'ml.c5.xlarge', 'ml.c5.2xlarge', 'ml.c5.4xlarge', 'ml.c5.9xlarge', 'ml.c5.18xlarge'], type=str, required=True, help='The ML compute instance type for the transform job.', default='ml.m4.xlarge')
parser.add_argument('--instance_count', type=int, required=False, help='The number of ML compute instances to use in the transform job.')
parser.add_argument('--resource_encryption_key', type=str, required=False, help='The AWS KMS key that Amazon SageMaker uses to encrypt data on the storage volume attached to the ML compute instance(s).', default='')
parser.add_argument('--tags', type=_utils.yaml_or_json_str, required=False, help='An array of key-value pairs, to categorize AWS resources.', default={})
parser.add_argument('--output_location_file', type=str, required=True, help='File path where the program will write the Amazon S3 URI of the transform job results.')
return parser
def main(argv=None):
parser = create_parser()
args = parser.parse_args()
logging.getLogger().setLevel(logging.INFO)
client = _utils.get_sagemaker_client(args.region, args.endpoint_url)
logging.info('Submitting Batch Transformation request to SageMaker...')
batch_job_name = _utils.create_transform_job(client, vars(args))
logging.info('Batch Job request submitted. Waiting for completion...')
_utils.wait_for_transform_job(client, batch_job_name)
Path(args.output_location_file).parent.mkdir(parents=True, exist_ok=True)
Path(args.output_location_file).write_text(unicode(args.output_location))
logging.info('Batch Transformation creation completed.')
if __name__== "__main__":
main()
| 8,374 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/aws/sagemaker | kubeflow_public_repos/kfp-tekton-backend/components/aws/sagemaker/hyperparameter_tuning/README.md | # SageMaker hyperparameter optimization Kubeflow Pipeline component
## Summary
Component to submit hyperparameter tuning jobs to SageMaker directly from a Kubeflow Pipelines workflow.
# Details
## Intended Use
For hyperparameter tuning jobs using AWS SageMaker.
## Runtime Arguments
Argument | Description | Optional (in pipeline definition) | Optional (in UI) | Data type | Accepted values | Default |
:--- | :---------- | :---------- | :---------- | :----------| :---------- | :----------|
region | The region where the cluster launches | No | No | String | | |
job_name | The name of the tuning job. Must be unique within the same AWS account and AWS region | Yes | Yes | String | | HPOJob-[datetime]-[random id] |
role | The Amazon Resource Name (ARN) that Amazon SageMaker assumes to perform tasks on your behalf | No | No | String | | |
image | The registry path of the Docker image that contains the training algorithm | Yes | Yes | String | | |
algorithm_name | The name of the algorithm resource to use for the hyperparameter tuning job; only specify this parameter if training image is not specified | Yes | Yes | String | | |
training_input_mode | The input mode that the algorithm supports | Yes | No | String | File, Pipe | File |
metric_definitions | The dictionary of name-regex pairs specify the metrics that the algorithm emits | Yes | Yes | Dict | | {} |
strategy | How hyperparameter tuning chooses the combinations of hyperparameter values to use for the training job it launches | Yes | No | String | Bayesian, Random | Bayesian |
metric_name | The name of the metric to use for the objective metric | No | No | String | | |
metric_type | Whether to minimize or maximize the objective metric | No | No | String | Maximize, Minimize | |
early_stopping_type | Whether to minimize or maximize the objective metric | Yes | No | String | Off, Auto | Off |
static_parameters | The values of hyperparameters that do not change for the tuning job | Yes | Yes | Dict | | {} |
integer_parameters | The array of IntegerParameterRange objects that specify ranges of integer hyperparameters that you want to search | Yes | Yes | List of Dicts | | [] |
continuous_parameters | The array of ContinuousParameterRange objects that specify ranges of continuous hyperparameters that you want to search | Yes | Yes | List of Dicts | | [] |
categorical_parameters | The array of CategoricalParameterRange objects that specify ranges of categorical hyperparameters that you want to search | Yes | Yes | List of Dicts | | [] |
channels | A list of dicts specifying the input channels (at least one); refer to [documentation](https://github.com/awsdocs/amazon-sagemaker-developer-guide/blob/master/doc_source/API_Channel.md) for parameters | No | No | List of Dicts | | |
output_location | The Amazon S3 path where you want Amazon SageMaker to store the results of the transform job | No | No | String | | |
output_encryption_key | The AWS KMS key that Amazon SageMaker uses to encrypt the model artifacts | Yes | Yes | String | | |
instance_type | The ML compute instance type | Yes | No | String | ml.m4.xlarge, ml.m4.2xlarge, ml.m4.4xlarge, ml.m4.10xlarge, ml.m4.16xlarge, ml.m5.large, ml.m5.xlarge, ml.m5.2xlarge, ml.m5.4xlarge, ml.m5.12xlarge, ml.m5.24xlarge, ml.c4.xlarge, ml.c4.2xlarge, ml.c4.4xlarge, ml.c4.8xlarge, ml.p2.xlarge, ml.p2.8xlarge, ml.p2.16xlarge, ml.p3.2xlarge, ml.p3.8xlarge, ml.p3.16xlarge, ml.c5.xlarge, ml.c5.2xlarge, ml.c5.4xlarge, ml.c5.9xlarge, ml.c5.18xlarge | ml.m4.xlarge |
instance_count | The number of ML compute instances to use in each training job | Yes | Yes | Int | ≥ 1 | 1 |
volume_size | The size of the ML storage volume that you want to provision in GB | Yes | Yes | Int | ≥ 1 | 30 |
max_num_jobs | The maximum number of training jobs that a hyperparameter tuning job can launch | No | No | Int | [1, 500] | |
max_parallel_jobs | The maximum number of concurrent training jobs that a hyperparameter tuning job can launch | No | No | Int | [1, 10] | |
max_run_time | The maximum run time in seconds per training job | Yes | Yes | Int | ≤ 432000 (5 days) | 86400 (1 day) |
resource_encryption_key | The AWS KMS key that Amazon SageMaker uses to encrypt data on the storage volume attached to the ML compute instance(s) | Yes | Yes | String | | |
vpc_security_group_ids | A comma-delimited list of security group IDs, in the form sg-xxxxxxxx | Yes | Yes | String | | |
vpc_subnets | A comma-delimited list of subnet IDs in the VPC to which you want to connect your hpo job | Yes | Yes | String | | |
network_isolation | Isolates the training container if true | Yes | No | Boolean | False, True | True |
traffic_encryption | Encrypts all communications between ML compute instances in distributed training if true | Yes | No | Boolean | False, True | False |
spot_instance | Use managed spot training if true | Yes | No | Boolean | False, True | False |
max_wait_time | The maximum time in seconds you are willing to wait for a managed spot training job to complete | Yes | Yes | Int | ≤ 432000 (5 days) | 86400 (1 day) |
checkpoint_config | Dictionary of information about the output location for managed spot training checkpoint data | Yes | Yes | Dict | | {} |
warm_start_type | Specifies the type of warm start used | Yes | No | String | IdenticalDataAndAlgorithm, TransferLearning | |
parent_hpo_jobs | List of previously completed or stopped hyperparameter tuning jobs to be used as a starting point | Yes | Yes | String | Yes | | |
endpoint_url | The endpoint URL for the private link VPC endpoint. | Yes | Yes | String | | |
tags | Key-value pairs to categorize AWS resources | Yes | Yes | Dict | | {} |
Notes:
* Specify training image OR algorithm name. Use the image parameter for Bring Your Own Container (BYOC) algorithms, and algorithm name for Amazon built-in algorithms, custom algorithm resources in SageMaker, and algorithms subscribed to from the AWS Marketplace.
* Specify VPC security group IDs AND VPC subnets to specify the VPC that you want the training jobs to connect to.
* Specify warm start type AND 1 to 5 parent HPO jobs to launch the hyperparameter tuning job with previous jobs as a starting point.
## Outputs
Name | Description
:--- | :----------
hpo_job_name | The name of the hyper parameter tuning job
model_artifact_url | URL where model artifacts were stored
best_job_name | Best hyperparameter tuning training job name
best_hyperparameters | Tuned hyperparameters
training_image | The registry path of the Docker image that contains the training algorithm
# Requirements
* [Kubeflow pipelines SDK](https://www.kubeflow.org/docs/pipelines/sdk/install-sdk/)
* [Kubeflow set-up](https://www.kubeflow.org/docs/aws/deploy/install-kubeflow/)
# Samples
## On its own
K-Means algorithm tuning on MNIST dataset: [pipeline](https://github.com/kubeflow/pipelines/blob/master/samples/contrib/aws-samples/mnist-kmeans-sagemaker/kmeans-hpo-pipeline.py)
Follow the steps as in the [README](https://github.com/kubeflow/pipelines/blob/master/samples/contrib/aws-samples/mnist-kmeans-sagemaker/README.md) with some modification:
1. Get and store data in S3 buckets
2. Prepare an IAM roles with permissions to run SageMaker jobs
3. Add 'aws-secret' to your kubeflow namespace
4. Compile the pipeline:
```bash
dsl-compile --py kmeans-hpo-pipeline.py --output kmeans-hpo-pipeline.tar.gz
```
5. In the Kubeflow UI, upload the compiled pipeline specification (the .tar.gz file) and create a new run. Update the role_arn and the data paths, and optionally any other run parameters.
6. Once the pipeline completes, you can see the outputs under 'Output parameters' in the HPO component's Input/Output section.
## Integrated into a pipeline
MNIST Classification using K-Means pipeline: [Pipeline](https://github.com/kubeflow/pipelines/blob/master/samples/contrib/aws-samples/mnist-kmeans-sagemaker/mnist-classification-pipeline.py) | [Steps](https://github.com/kubeflow/pipelines/blob/master/samples/contrib/aws-samples/mnist-kmeans-sagemaker/README.md)
# Resources
* [Using Amazon built-in algorithms](https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-algo-docker-registry-paths.html)
* [More information on request parameters](https://github.com/awsdocs/amazon-sagemaker-developer-guide/blob/master/doc_source/API_CreateHyperParameterTuningJob.md#request-parameters)
| 8,375 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/aws/sagemaker | kubeflow_public_repos/kfp-tekton-backend/components/aws/sagemaker/hyperparameter_tuning/component.yaml | name: 'SageMaker - Hyperparameter Tuning'
description: |
Hyperparameter Tuning Jobs in SageMaker
inputs:
- name: region
description: 'The region where the cluster launches.'
- name: job_name
description: 'The name of the tuning job. Must be unique within the same AWS account and AWS region.'
default: ''
type: String
- name: role
description: 'The Amazon Resource Name (ARN) that Amazon SageMaker assumes to perform tasks on your behalf.'
type: String
- name: image
description: 'The registry path of the Docker image that contains the training algorithm.'
default: ''
type: String
- name: algorithm_name
description: 'The name of the algorithm resource to use for the hyperparameter tuning job. Do not specify a value for this if using training image.'
default: ''
type: String
- name: training_input_mode
description: 'The input mode that the algorithm supports. File or Pipe.'
default: 'File'
type: String
- name: metric_definitions
description: 'The dictionary of name-regex pairs specify the metrics that the algorithm emits.'
default: '{}'
type: JsonObject
- name: strategy
description: 'How hyperparameter tuning chooses the combinations of hyperparameter values to use for the training job it launches.'
default: 'Bayesian'
type: String
- name: metric_name
description: 'The name of the metric to use for the objective metric.'
type: String
- name: metric_type
description: 'Whether to minimize or maximize the objective metric.'
type: String
- name: early_stopping_type
description: 'Whether to use early stopping for training jobs launched by the tuning job.'
default: 'Off'
type: String
- name: static_parameters
description: 'The values of hyperparameters that do not change for the tuning job.'
default: '{}'
type: JsonObject
- name: integer_parameters
description: 'The array of IntegerParameterRange objects that specify ranges of integer hyperparameters that you want to search.'
default: '[]'
type: JsonArray
- name: continuous_parameters
description: 'The array of ContinuousParameterRange objects that specify ranges of continuous hyperparameters that you want to search.'
default: '[]'
type: JsonObject
- name: categorical_parameters
description: 'The array of CategoricalParameterRange objects that specify ranges of categorical hyperparameters that you want to search.'
default: '[]'
type: JsonArray
- name: channels
description: 'A list of dicts specifying the input channels. Must have at least one.'
type: JsonArray
- name: output_location
description: 'The Amazon S3 path where you want Amazon SageMaker to store the model artifacts is from the best training job.'
type: String
- name: output_encryption_key
description: 'The AWS KMS key that Amazon SageMaker uses to encrypt the model artifacts.'
default: ''
type: String
- name: instance_type
description: 'The ML compute instance type.'
default: 'ml.m4.xlarge'
type: String
- name: instance_count
description: 'The number of ML compute instances to use in each training job.'
default: '1'
type: Integer
- name: volume_size
description: 'The size of the ML storage volume that you want to provision.'
default: '30'
type: Integer
- name: max_num_jobs
description: 'The maximum number of training jobs that a hyperparameter tuning job can launch.'
type: Integer
- name: max_parallel_jobs
description: 'The maximum number of concurrent training jobs that a hyperparameter tuning job can launch.'
type: Integer
- name: max_run_time
description: 'The maximum run time in seconds per training job.'
default: '86400'
type: Integer
- name: resource_encryption_key
description: 'The AWS KMS key that Amazon SageMaker uses to encrypt data on the storage volume attached to the ML compute instance(s).'
default: ''
type: String
- name: vpc_security_group_ids
description: 'The VPC security group IDs, in the form sg-xxxxxxxx.'
default: ''
type: String
- name: vpc_subnets
description: 'The ID of the subnets in the VPC to which you want to connect your hpo job.'
default: ''
type: String
- name: network_isolation
description: 'Isolates the training container.'
default: 'True'
type: Bool
- name: traffic_encryption
description: 'Encrypts all communications between ML compute instances in distributed training.'
default: 'False'
type: Bool
- name: spot_instance
description: 'Use managed spot training.'
default: 'False'
type: Bool
- name: max_wait_time
description: 'The maximum time in seconds you are willing to wait for a managed spot training job to complete.'
default: '86400'
type: Integer
- name: checkpoint_config
description: 'Dictionary of information about the output location for managed spot training checkpoint data.'
default: '{}'
type: JsonObject
- name: warm_start_type
description: 'Specifies either "IdenticalDataAndAlgorithm" or "TransferLearning"'
default: ''
type: String
- name: parent_hpo_jobs
description: 'List of previously completed or stopped hyperparameter tuning jobs to be used as a starting point.'
default: ''
type: String
- name: endpoint_url
description: 'The endpoint URL for the private link VPC endpoint.'
default: ''
type: String
- name: tags
description: 'Key-value pairs, to categorize AWS resources.'
default: '{}'
type: JsonObject
outputs:
- name: hpo_job_name
description: 'The name of the hyper parameter tuning job'
- name: model_artifact_url
description: 'Model artifacts url'
- name: best_job_name
description: 'Best training job in the hyper parameter tuning job'
- name: best_hyperparameters
description: 'Tuned hyperparameters'
- name: training_image
description: 'The registry path of the Docker image that contains the training algorithm'
implementation:
container:
image: amazon/aws-sagemaker-kfp-components:0.3.1
command: ['python3']
args: [
hyperparameter_tuning.py,
--region, {inputValue: region},
--endpoint_url, {inputValue: endpoint_url},
--job_name, {inputValue: job_name},
--role, {inputValue: role},
--image, {inputValue: image},
--algorithm_name, {inputValue: algorithm_name},
--training_input_mode, {inputValue: training_input_mode},
--metric_definitions, {inputValue: metric_definitions},
--strategy, {inputValue: strategy},
--metric_name, {inputValue: metric_name},
--metric_type, {inputValue: metric_type},
--early_stopping_type, {inputValue: early_stopping_type},
--static_parameters, {inputValue: static_parameters},
--integer_parameters, {inputValue: integer_parameters},
--continuous_parameters, {inputValue: continuous_parameters},
--categorical_parameters, {inputValue: categorical_parameters},
--channels, {inputValue: channels},
--output_location, {inputValue: output_location},
--output_encryption_key, {inputValue: output_encryption_key},
--instance_type, {inputValue: instance_type},
--instance_count, {inputValue: instance_count},
--volume_size, {inputValue: volume_size},
--max_num_jobs, {inputValue: max_num_jobs},
--max_parallel_jobs, {inputValue: max_parallel_jobs},
--resource_encryption_key, {inputValue: resource_encryption_key},
--max_run_time, {inputValue: max_run_time},
--vpc_security_group_ids, {inputValue: vpc_security_group_ids},
--vpc_subnets, {inputValue: vpc_subnets},
--network_isolation, {inputValue: network_isolation},
--traffic_encryption, {inputValue: traffic_encryption},
--spot_instance, {inputValue: spot_instance},
--max_wait_time, {inputValue: max_wait_time},
--checkpoint_config, {inputValue: checkpoint_config},
--warm_start_type, {inputValue: warm_start_type},
--parent_hpo_jobs, {inputValue: parent_hpo_jobs},
--tags, {inputValue: tags}
]
fileOutputs:
hpo_job_name: /tmp/hpo_job_name.txt
model_artifact_url: /tmp/model_artifact_url.txt
best_job_name: /tmp/best_job_name.txt
best_hyperparameters: /tmp/best_hyperparameters.txt
training_image: /tmp/training_image.txt
| 8,376 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/aws/sagemaker/hyperparameter_tuning | kubeflow_public_repos/kfp-tekton-backend/components/aws/sagemaker/hyperparameter_tuning/src/hyperparameter_tuning.py | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import logging
import json
from common import _utils
def create_parser():
parser = argparse.ArgumentParser(description='SageMaker Hyperparameter Tuning Job')
_utils.add_default_client_arguments(parser)
parser.add_argument('--job_name', type=str, required=False, help='The name of the tuning job. Must be unique within the same AWS account and AWS region.')
parser.add_argument('--role', type=str, required=True, help='The Amazon Resource Name (ARN) that Amazon SageMaker assumes to perform tasks on your behalf.')
parser.add_argument('--image', type=str, required=True, help='The registry path of the Docker image that contains the training algorithm.', default='')
parser.add_argument('--algorithm_name', type=str, required=False, help='The name of the resource algorithm to use for the hyperparameter tuning job.', default='')
parser.add_argument('--training_input_mode', choices=['File', 'Pipe'], type=str, required=False, help='The input mode that the algorithm supports. File or Pipe.', default='File')
parser.add_argument('--metric_definitions', type=_utils.yaml_or_json_str, required=False, help='The dictionary of name-regex pairs specify the metrics that the algorithm emits.', default={})
parser.add_argument('--strategy', choices=['Bayesian', 'Random'], type=str, required=False, help='How hyperparameter tuning chooses the combinations of hyperparameter values to use for the training job it launches.', default='Bayesian')
parser.add_argument('--metric_name', type=str, required=True, help='The name of the metric to use for the objective metric.')
parser.add_argument('--metric_type', choices=['Maximize', 'Minimize'], type=str, required=True, help='Whether to minimize or maximize the objective metric.')
parser.add_argument('--early_stopping_type', choices=['Off', 'Auto'], type=str, required=False, help='Whether to minimize or maximize the objective metric.', default='Off')
parser.add_argument('--static_parameters', type=_utils.yaml_or_json_str, required=False, help='The values of hyperparameters that do not change for the tuning job.', default={})
parser.add_argument('--integer_parameters', type=_utils.yaml_or_json_str, required=False, help='The array of IntegerParameterRange objects that specify ranges of integer hyperparameters that you want to search.', default=[])
parser.add_argument('--continuous_parameters', type=_utils.yaml_or_json_str, required=False, help='The array of ContinuousParameterRange objects that specify ranges of continuous hyperparameters that you want to search.', default=[])
parser.add_argument('--categorical_parameters', type=_utils.yaml_or_json_str, required=False, help='The array of CategoricalParameterRange objects that specify ranges of categorical hyperparameters that you want to search.', default=[])
parser.add_argument('--channels', type=_utils.yaml_or_json_str, required=True, help='A list of dicts specifying the input channels. Must have at least one.')
parser.add_argument('--output_location', type=str, required=True, help='The Amazon S3 path where you want Amazon SageMaker to store the results of the transform job.')
parser.add_argument('--output_encryption_key', type=str, required=False, help='The AWS KMS key that Amazon SageMaker uses to encrypt the model artifacts.', default='')
parser.add_argument('--instance_type', choices=['ml.m4.xlarge', 'ml.m4.2xlarge', 'ml.m4.4xlarge', 'ml.m4.10xlarge', 'ml.m4.16xlarge', 'ml.m5.large', 'ml.m5.xlarge', 'ml.m5.2xlarge', 'ml.m5.4xlarge',
'ml.m5.12xlarge', 'ml.m5.24xlarge', 'ml.c4.xlarge', 'ml.c4.2xlarge', 'ml.c4.4xlarge', 'ml.c4.8xlarge', 'ml.p2.xlarge', 'ml.p2.8xlarge', 'ml.p2.16xlarge', 'ml.p3.2xlarge', 'ml.p3.8xlarge', 'ml.p3.16xlarge',
'ml.c5.xlarge', 'ml.c5.2xlarge', 'ml.c5.4xlarge', 'ml.c5.9xlarge', 'ml.c5.18xlarge'], type=str, required=False, help='The ML compute instance type.', default='ml.m4.xlarge')
parser.add_argument('--instance_count', type=int, required=False, help='The number of ML compute instances to use in each training job.', default=1)
parser.add_argument('--volume_size', type=int, required=False, help='The size of the ML storage volume that you want to provision.', default=1)
parser.add_argument('--max_num_jobs', type=int, required=True, help='The maximum number of training jobs that a hyperparameter tuning job can launch.')
parser.add_argument('--max_parallel_jobs', type=int, required=True, help='The maximum number of concurrent training jobs that a hyperparameter tuning job can launch.')
parser.add_argument('--max_run_time', type=int, required=False, help='The maximum run time in seconds per training job.', default=86400)
parser.add_argument('--resource_encryption_key', type=str, required=False, help='The AWS KMS key that Amazon SageMaker uses to encrypt data on the storage volume attached to the ML compute instance(s).', default='')
parser.add_argument('--vpc_security_group_ids', type=str, required=False, help='The VPC security group IDs, in the form sg-xxxxxxxx.')
parser.add_argument('--vpc_subnets', type=str, required=False, help='The ID of the subnets in the VPC to which you want to connect your hpo job.')
parser.add_argument('--network_isolation', type=_utils.str_to_bool, required=False, help='Isolates the training container.', default=True)
parser.add_argument('--traffic_encryption', type=_utils.str_to_bool, required=False, help='Encrypts all communications between ML compute instances in distributed training.', default=False)
parser.add_argument('--warm_start_type', choices=['IdenticalDataAndAlgorithm', 'TransferLearning', ''], type=str, required=False, help='Specifies either "IdenticalDataAndAlgorithm" or "TransferLearning"')
parser.add_argument('--parent_hpo_jobs', type=str, required=False, help='List of previously completed or stopped hyperparameter tuning jobs to be used as a starting point.', default='')
### Start spot instance support
parser.add_argument('--spot_instance', type=_utils.str_to_bool, required=False, help='Use managed spot training.', default=False)
parser.add_argument('--max_wait_time', type=int, required=False, help='The maximum time in seconds you are willing to wait for a managed spot training job to complete.', default=86400)
parser.add_argument('--checkpoint_config', type=_utils.yaml_or_json_str, required=False, help='Dictionary of information about the output location for managed spot training checkpoint data.', default={})
### End spot instance support
parser.add_argument('--tags', type=_utils.yaml_or_json_str, required=False, help='An array of key-value pairs, to categorize AWS resources.', default={})
return parser
def main(argv=None):
parser = create_parser()
args = parser.parse_args()
logging.getLogger().setLevel(logging.INFO)
client = _utils.get_sagemaker_client(args.region)
logging.info('Submitting HyperParameter Tuning Job request to SageMaker...')
hpo_job_name = _utils.create_hyperparameter_tuning_job(client, vars(args))
logging.info('HyperParameter Tuning Job request submitted. Waiting for completion...')
_utils.wait_for_hyperparameter_training_job(client, hpo_job_name)
best_job, best_hyperparameters = _utils.get_best_training_job_and_hyperparameters(client, hpo_job_name)
model_artifact_url = _utils.get_model_artifacts_from_job(client, best_job)
image = _utils.get_image_from_job(client, best_job)
logging.info('HyperParameter Tuning Job completed.')
with open('/tmp/hpo_job_name.txt', 'w') as f:
f.write(hpo_job_name)
with open('/tmp/best_job_name.txt', 'w') as f:
f.write(best_job)
with open('/tmp/best_hyperparameters.txt', 'w') as f:
f.write(json.dumps(best_hyperparameters))
with open('/tmp/model_artifact_url.txt', 'w') as f:
f.write(model_artifact_url)
with open('/tmp/training_image.txt', 'w') as f:
f.write(image)
if __name__== "__main__":
main()
| 8,377 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/aws/sagemaker | kubeflow_public_repos/kfp-tekton-backend/components/aws/sagemaker/codebuild/deploy.buildspec.yml | version: 0.2
phases:
pre_build:
commands:
# Log in to Dockerhub
- docker login -u $DOCKER_CONFIG_USERNAME -p $DOCKER_CONFIG_PASSWORD
build:
commands:
- cd components/aws/sagemaker
- ./codebuild/scripts/deploy.sh -d "${DRY_RUN}" | 8,378 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/aws/sagemaker | kubeflow_public_repos/kfp-tekton-backend/components/aws/sagemaker/codebuild/integration-test.buildspec.yml | version: 0.2
phases:
build:
commands:
- cd components/aws
- docker build . -f ./sagemaker/tests/integration_tests/Dockerfile -t amazon/integration-test-image --quiet
# Run the container and copy the results to /tmp
# Passes all host environment variables through to the container
- docker run --name integration-test-container $(env | cut -f1 -d= | sed 's/^/-e /') amazon/integration-test-image
- docker cp integration-test-container:/app/tests/integration_tests/integration_tests.log /tmp/results.xml
- docker rm -f integration-test-container
reports:
IntegrationTestReport:
files:
- "results.xml"
base-directory: "/tmp" | 8,379 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/aws/sagemaker | kubeflow_public_repos/kfp-tekton-backend/components/aws/sagemaker/codebuild/unit-test.buildspec.yml | version: 0.2
phases:
build:
commands:
- cd components/aws
- docker build . -f ./sagemaker/tests/unit_tests/Dockerfile -t amazon/unit-test-image --quiet
# Run the container and copy the results to /tmp
# Passes all host environment variables through to the container
- docker run --name unit-test-container $(env | cut -f1 -d= | sed 's/^/-e /') amazon/unit-test-image
- docker cp unit-test-container:/app/tests/unit_tests/unit_tests.log /tmp/results.xml
- docker rm -f unit-test-container
reports:
UnitTestReport:
files:
- "results.xml"
base-directory: "/tmp" | 8,380 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/aws/sagemaker/codebuild | kubeflow_public_repos/kfp-tekton-backend/components/aws/sagemaker/codebuild/scripts/deploy.sh | #!/usr/bin/env bash
set -e
REMOTE_REPOSITORY="amazon/aws-sagemaker-kfp-components"
DRYRUN="true"
FULL_VERSION_TAG=""
DOCKER_CONFIG_PATH=${DOCKER_CONFIG_PATH:-"/root/.docker"}
while getopts ":d:v:" opt; do
case ${opt} in
d)
if [[ "${OPTARG}" = "false" ]]; then
DRYRUN="false"
else
DRYRUN="true"
fi
;;
v)
FULL_VERSION_TAG="${OPTARG}"
;;
esac
done
function docker_tag_exists() {
curl --silent -f -lSL https://index.docker.io/v1/repositories/$1/tags/$2 > /dev/null 2> /dev/null
}
if [[ ! -z "${FULL_VERSION_TAG}" && ! "${FULL_VERSION_TAG}" =~ ^[0-9]+\.[0-9]+\.[0-9]+ ]]; then
>&2 echo "Version tag does not match SEMVER style (X.Y.Z)"
exit 1
fi
# Check version does not already exist
VERSION_LICENSE_FILE="THIRD-PARTY-LICENSES.txt"
if [[ -z "${FULL_VERSION_TAG}" ]]; then
FULL_VERSION_TAG="$(cat ${VERSION_LICENSE_FILE} | head -n1 | grep -Po '(?<=version )\d.\d.\d')"
fi
if [ -z "$FULL_VERSION_TAG" ]; then
>&2 echo "Could not find version inside ${VERSION_LICENSE_FILE} file."
exit 1
fi
echo "Deploying version ${FULL_VERSION_TAG}"
if docker_tag_exists "$REMOTE_REPOSITORY" "$FULL_VERSION_TAG"; then
>&2 echo "Tag ${REMOTE_REPOSITORY}:${FULL_VERSION_TAG} already exists. Cannot overwrite an existing image."
exit 1
fi
# Build the image
FULL_VERSION_IMAGE="${REMOTE_REPOSITORY}:${FULL_VERSION_TAG}"
docker build . -f Dockerfile -t "${FULL_VERSION_IMAGE}"
# Get the minor and major versions
[[ $FULL_VERSION_TAG =~ ^[0-9]+\.[0-9]+ ]] && MINOR_VERSION_IMAGE="${REMOTE_REPOSITORY}:${BASH_REMATCH[0]}"
[[ $FULL_VERSION_TAG =~ ^[0-9]+ ]] && MAJOR_VERSION_IMAGE="${REMOTE_REPOSITORY}:${BASH_REMATCH[0]}"
# Re-tag the image with major and minor versions
docker tag "${FULL_VERSION_IMAGE}" "${MINOR_VERSION_IMAGE}"
echo "Tagged image with ${MINOR_VERSION_IMAGE}"
docker tag "${FULL_VERSION_IMAGE}" "${MAJOR_VERSION_IMAGE}"
echo "Tagged image with ${MAJOR_VERSION_IMAGE}"
# Push to the remote repository
if [ "${DRYRUN}" == "false" ]; then
docker --config "$DOCKER_CONFIG_PATH" push "${FULL_VERSION_IMAGE}"
echo "Successfully pushed tag ${FULL_VERSION_IMAGE} to Docker Hub"
docker --config "$DOCKER_CONFIG_PATH" push "${MINOR_VERSION_IMAGE}"
echo "Successfully pushed tag ${MINOR_VERSION_IMAGE} to Docker Hub"
docker --config "$DOCKER_CONFIG_PATH" push "${MAJOR_VERSION_IMAGE}"
echo "Successfully pushed tag ${MAJOR_VERSION_IMAGE} to Docker Hub"
else
echo "Dry run detected. Not pushing images."
fi | 8,381 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/aws/sagemaker | kubeflow_public_repos/kfp-tekton-backend/components/aws/sagemaker/ground_truth/README.md | # SageMaker Ground Truth Kubeflow Pipelines component
## Summary
Component to submit SageMaker Ground Truth labeling jobs directly from a Kubeflow Pipelines workflow.
# Details
## Intended Use
For Ground Truth jobs using AWS SageMaker.
## Runtime Arguments
Argument | Description | Optional | Data type | Accepted values | Default |
:--- | :---------- | :----------| :----------| :---------- | :----------|
region | The region where the cluster launches | No | String | | |
role | The Amazon Resource Name (ARN) that Amazon SageMaker assumes to perform tasks on your behalf | No | String | | |
job_name | The name of the Ground Truth job. Must be unique within the same AWS account and AWS region | Yes | String | | LabelingJob-[datetime]-[random id]|
label_attribute_name | The attribute name to use for the label in the output manifest file | Yes | String | | job_name |
manifest_location | The Amazon S3 location of the manifest file that describes the input data objects | No | String | | |
output_location | The Amazon S3 path where you want Amazon SageMaker to store the results of the transform job | No | String | | |
output_encryption_key | The AWS KMS key that Amazon SageMaker uses to encrypt the model artifacts | Yes | String | | |
task_type | Built in image classification, bounding box, text classification, or semantic segmentation, or custom; If custom, please provide pre- and post-labeling task lambda functions | No | String | Image Classification, Bounding Box, Text Classification, Semantic Segmentation, Custom | |
worker_type | The workteam for data labeling | No | String | Public, Private, Vendor | |
workteam_arn | The ARN of the work team assigned to complete the tasks; specify if worker type is private or vendor | Yes | String | | |
no_adult_content | If data is free of adult content; specify if worker type is public | Yes | Boolean | False, True | False |
no_ppi | If data is free of personally identifiable information; specify if worker type is public | Yes | Boolean | False, True | False |
label_category_config | The S3 URL of the JSON structured file that defines the categories used to label the data objects | Yes | String | | |
max_human_labeled_objects | The maximum number of objects that can be labeled by human workers | Yes | Int | ≥ 1 | all objects |
max_percent_objects | The maximum percentage of input data objects that should be labeled | Yes | Int | [1, 100] | 100 |
enable_auto_labeling | Enables auto-labeling; only for bounding box, text classification, and image classification | Yes | Boolean | False, True | False |
initial_model_arn | The ARN of the final model used for a previous auto-labeling job | Yes | String | | |
resource_encryption_key | The AWS KMS key that Amazon SageMaker uses to encrypt data on the storage volume attached to the ML compute instance(s) | Yes | String | | |
ui_template | The Amazon S3 bucket location of the UI template | No | String | | |
pre_human_task_function | The ARN of a Lambda function that is run before a data object is sent to a human worker | Yes | String | | |
post_human_task_function | The ARN of a Lambda function implements the logic for annotation consolidation | Yes | String | | |
task_keywords | Keywords used to describe the task so that workers on Amazon Mechanical Turk can discover the task | Yes | String | | |
title | A title for the task for your human workers | No | String | | |
description | A description of the task for your human workers | No | String | | |
num_workers_per_object | The number of human workers that will label an object | No | Int | [1, 9] | |
time_limit | The maximum run time in seconds per training job | No | Int | [30, 28800] | |
task_availibility | The length of time that a task remains available for labeling by human workers | Yes | Int | Public workforce: [1, 43200], other: [1, 864000] | |
max_concurrent_tasks | The maximum number of data objects that can be labeled by human workers at the same time | Yes | Int | [1, 1000] | |
workforce_task_price | The price that you pay for each task performed by a public worker in USD; Specify to the tenth fractions of a cent; Format as "0.000" | Yes | Float | 0.000 |
endpoint_url | The endpoint URL for the private link VPC endpoint. | Yes | String | | |
tags | Key-value pairs to categorize AWS resources | Yes | Dict | | {} |
## Outputs
Name | Description
:--- | :----------
output_manifest_location | URL where labeling results were stored
active_learning_model_arn | ARN of the resulting active learning model
# Requirements
* [Kubeflow pipelines SDK](https://www.kubeflow.org/docs/pipelines/sdk/install-sdk/)
* [Kubeflow set-up on AWS](https://www.kubeflow.org/docs/aws/deploy/install-kubeflow/)
# Samples
## Used in a pipeline with workteam creation and training
Mini image classification demo: [Demo](https://github.com/kubeflow/pipelines/blob/master/samples/contrib/aws-samples/ground_truth_pipeline_demo/)
# References
* [Ground Truth documentation](https://docs.aws.amazon.com/sagemaker/latest/dg/sms.html)
* [Building a custom data labeling workflow](https://aws.amazon.com/blogs/machine-learning/build-a-custom-data-labeling-workflow-with-amazon-sagemaker-ground-truth/)
* [Sample UI template for Bounding Box](https://github.com/awslabs/amazon-sagemaker-examples/blob/master/ground_truth_labeling_jobs/ground_truth_object_detection_tutorial/object_detection_tutorial.ipynb)
* [Sample UI template for Image Classification](https://github.com/awslabs/amazon-sagemaker-examples/tree/master/ground_truth_labeling_jobs/from_unlabeled_data_to_deployed_machine_learning_model_ground_truth_demo_image_classification)
* [Using Ground Truth results in training jobs](https://github.com/awslabs/amazon-sagemaker-examples/blob/master/ground_truth_labeling_jobs/object_detection_augmented_manifest_training/object_detection_augmented_manifest_training.ipynb)
| 8,382 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/aws/sagemaker | kubeflow_public_repos/kfp-tekton-backend/components/aws/sagemaker/ground_truth/component.yaml | name: 'SageMaker - Ground Truth'
description: |
Ground Truth Jobs in SageMaker
inputs:
- name: region
description: 'The region where the cluster launches.'
type: String
- name: role
description: 'The Amazon Resource Name (ARN) that Amazon SageMaker assumes to perform tasks on your behalf.'
type: String
- name: job_name
description: 'The name of the labeling job.'
type: String
- name: label_attribute_name
description: 'The attribute name to use for the label in the output manifest file. Default is the job name.'
default: ''
type: String
- name: manifest_location
description: 'The Amazon S3 location of the manifest file that describes the input data objects.'
type: String
- name: output_location
description: 'The Amazon S3 location to write output data.'
type: String
- name: output_encryption_key
description: 'The AWS Key Management Service ID of the key used to encrypt the output data.'
default: ''
type: String
- name: task_type
description: 'Built in image classification, bounding box, text classification, or semantic segmentation, or custom. If custom, please provide pre- and post-labeling task lambda functions.'
type: String
- name: worker_type
description: 'The workteam for data labeling, either public, private, or vendor.'
type: String
- name: workteam_arn
description: 'The ARN of the work team assigned to complete the tasks.'
default: ''
type: String
- name: no_adult_content
description: 'If true, your data is free of adult content.'
default: 'False'
type: Bool
- name: no_ppi
description: 'If true, your data is free of personally identifiable information.'
default: 'False'
type: Bool
- name: label_category_config
description: 'The S3 URL of the JSON structured file that defines the categories used to label the data objects.'
default: ''
type: String
- name: max_human_labeled_objects
description: 'The maximum number of objects that can be labeled by human workers.'
default: ''
type: Integer
- name: max_percent_objects
description: 'The maximum number of input data objects that should be labeled.'
default: ''
type: Integer
- name: enable_auto_labeling
description: 'Enables auto-labeling, only for bounding box, text classification, and image classification.'
default: 'False'
type: Bool
- name: initial_model_arn
description: 'The ARN of the final model used for a previous auto-labeling job.'
default: ''
type: String
- name: resource_encryption_key
description: 'The AWS KMS key that Amazon SageMaker uses to encrypt data on the storage volume attached to the ML compute instance(s).'
default: ''
type: String
- name: ui_template
description: 'The Amazon S3 bucket location of the UI template.'
type: String
- name: pre_human_task_function
description: 'The ARN of a Lambda function that is run before a data object is sent to a human worker.'
default: ''
type: String
- name: post_human_task_function
description: 'The ARN of a Lambda function implements the logic for annotation consolidation.'
default: ''
type: String
- name: task_keywords
description: 'Keywords used to describe the task so that workers on Amazon Mechanical Turk can discover the task.'
default: ''
type: String
- name: title
description: 'A title for the task for your human workers.'
type: String
- name: description
description: 'A description of the task for your human workers.'
type: String
- name: num_workers_per_object
description: 'The number of human workers that will label an object.'
type: Integer
- name: time_limit
description: 'The amount of time that a worker has to complete a task in seconds'
type: Integer
- name: task_availibility
description: 'The length of time that a task remains available for labeling by human workers.'
default: ''
type: Integer
- name: max_concurrent_tasks
description: 'The maximum number of data objects that can be labeled by human workers at the same time.'
default: ''
type: Integer
- name: workforce_task_price
description: 'The price that you pay for each task performed by a public worker in USD. Specify to the tenth fractions of a cent. Format as "0.000".'
default: '0.000'
type: Float
- name: endpoint_url
description: 'The endpoint URL for the private link VPC endpoint.'
default: ''
type: String
- name: tags
description: 'Key-value pairs to categorize AWS resources.'
default: '{}'
type: JsonObject
outputs:
- {name: output_manifest_location, description: 'The Amazon S3 bucket location of the manifest file for labeled data.'}
- {name: active_learning_model_arn, description: 'The ARN for the most recent Amazon SageMaker model trained as part of automated data labeling.'}
implementation:
container:
image: amazon/aws-sagemaker-kfp-components:0.3.1
command: ['python3']
args: [
ground_truth.py,
--region, {inputValue: region},
--endpoint_url, {inputValue: endpoint_url},
--role, {inputValue: role},
--job_name, {inputValue: job_name},
--label_attribute_name, {inputValue: label_attribute_name},
--manifest_location, {inputValue: manifest_location},
--output_location, {inputValue: output_location},
--output_encryption_key, {inputValue: output_encryption_key},
--task_type, {inputValue: task_type},
--worker_type, {inputValue: worker_type},
--workteam_arn, {inputValue: workteam_arn},
--no_adult_content, {inputValue: no_adult_content},
--no_ppi, {inputValue: no_ppi},
--label_category_config, {inputValue: label_category_config},
--max_human_labeled_objects, {inputValue: max_human_labeled_objects},
--max_percent_objects, {inputValue: max_percent_objects},
--enable_auto_labeling, {inputValue: enable_auto_labeling},
--initial_model_arn, {inputValue: initial_model_arn},
--resource_encryption_key, {inputValue: resource_encryption_key},
--ui_template, {inputValue: ui_template},
--pre_human_task_function, {inputValue: pre_human_task_function},
--post_human_task_function, {inputValue: post_human_task_function},
--task_keywords, {inputValue: task_keywords},
--title, {inputValue: title},
--description, {inputValue: description},
--num_workers_per_object, {inputValue: num_workers_per_object},
--time_limit, {inputValue: time_limit},
--task_availibility, {inputValue: task_availibility},
--max_concurrent_tasks, {inputValue: max_concurrent_tasks},
--workforce_task_price, {inputValue: workforce_task_price},
--tags, {inputValue: tags}
]
fileOutputs:
output_manifest_location: /tmp/output_manifest_location.txt
active_learning_model_arn: /tmp/active_learning_model_arn.txt
| 8,383 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/aws/sagemaker/ground_truth | kubeflow_public_repos/kfp-tekton-backend/components/aws/sagemaker/ground_truth/src/ground_truth.py | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import logging
from common import _utils
def create_parser():
parser = argparse.ArgumentParser(description='SageMaker Ground Truth Job')
_utils.add_default_client_arguments(parser)
parser.add_argument('--role', type=str, required=True, help='The Amazon Resource Name (ARN) that Amazon SageMaker assumes to perform tasks on your behalf.')
parser.add_argument('--job_name', type=str, required=True, help='The name of the labeling job.')
parser.add_argument('--label_attribute_name', type=str, required=False, help='The attribute name to use for the label in the output manifest file. Default is the job name.', default='')
parser.add_argument('--manifest_location', type=str, required=True, help='The Amazon S3 location of the manifest file that describes the input data objects.')
parser.add_argument('--output_location', type=str, required=True, help='The Amazon S3 location to write output data.')
parser.add_argument('--output_encryption_key', type=str, required=False, help='The AWS KMS key that Amazon SageMaker uses to encrypt the model artifacts.', default='')
parser.add_argument('--task_type', type=str, required=True, help='Built in image classification, bounding box, text classification, or semantic segmentation, or custom. If custom, please provide pre- and post-labeling task lambda functions.')
parser.add_argument('--worker_type', type=str, required=True, help='The workteam for data labeling, either public, private, or vendor.')
parser.add_argument('--workteam_arn', type=str, required=False, help='The ARN of the work team assigned to complete the tasks.')
parser.add_argument('--no_adult_content', type=_utils.str_to_bool, required=False, help='If true, your data is free of adult content.', default='False')
parser.add_argument('--no_ppi', type=_utils.str_to_bool, required=False, help='If true, your data is free of personally identifiable information.', default='False')
parser.add_argument('--label_category_config', type=str, required=False, help='The S3 URL of the JSON structured file that defines the categories used to label the data objects.', default='')
parser.add_argument('--max_human_labeled_objects', type=int, required=False, help='The maximum number of objects that can be labeled by human workers.', default=0)
parser.add_argument('--max_percent_objects', type=int, required=False, help='The maximum percentatge of input data objects that should be labeled.', default=0)
parser.add_argument('--enable_auto_labeling', type=_utils.str_to_bool, required=False, help='Enables auto-labeling, only for bounding box, text classification, and image classification.', default=False)
parser.add_argument('--initial_model_arn', type=str, required=False, help='The ARN of the final model used for a previous auto-labeling job.', default='')
parser.add_argument('--resource_encryption_key', type=str, required=False, help='The AWS KMS key that Amazon SageMaker uses to encrypt data on the storage volume attached to the ML compute instance(s).', default='')
parser.add_argument('--ui_template', type=str, required=True, help='The Amazon S3 bucket location of the UI template.')
parser.add_argument('--pre_human_task_function', type=str, required=False, help='The ARN of a Lambda function that is run before a data object is sent to a human worker.', default='')
parser.add_argument('--post_human_task_function', type=str, required=False, help='The ARN of a Lambda function implements the logic for annotation consolidation.', default='')
parser.add_argument('--task_keywords', type=str, required=False, help='Keywords used to describe the task so that workers on Amazon Mechanical Turk can discover the task.', default='')
parser.add_argument('--title', type=str, required=True, help='A title for the task for your human workers.')
parser.add_argument('--description', type=str, required=True, help='A description of the task for your human workers.')
parser.add_argument('--num_workers_per_object', type=int, required=True, help='The number of human workers that will label an object.')
parser.add_argument('--time_limit', type=int, required=True, help='The amount of time that a worker has to complete a task in seconds')
parser.add_argument('--task_availibility', type=int, required=False, help='The length of time that a task remains available for labelling by human workers.', default=0)
parser.add_argument('--max_concurrent_tasks', type=int, required=False, help='The maximum number of data objects that can be labeled by human workers at the same time.', default=0)
parser.add_argument('--workforce_task_price', type=float, required=False, help='The price that you pay for each task performed by a public worker in USD. Specify to the tenth fractions of a cent. Format as "0.000".', default=0.000)
parser.add_argument('--tags', type=_utils.yaml_or_json_str, required=False, help='An array of key-value pairs, to categorize AWS resources.', default={})
return parser
def main(argv=None):
parser = create_parser()
args = parser.parse_args()
logging.getLogger().setLevel(logging.INFO)
client = _utils.get_sagemaker_client(args.region, args.endpoint_url)
logging.info('Submitting Ground Truth Job request to SageMaker...')
_utils.create_labeling_job(client, vars(args))
logging.info('Ground Truth labeling job request submitted. Waiting for completion...')
_utils.wait_for_labeling_job(client, args.job_name)
output_manifest, active_learning_model_arn = _utils.get_labeling_job_outputs(client, args.job_name, args.enable_auto_labeling)
logging.info('Ground Truth Labeling Job completed.')
with open('/tmp/output_manifest_location.txt', 'w') as f:
f.write(output_manifest)
with open('/tmp/active_learning_model_arn.txt', 'w') as f:
f.write(active_learning_model_arn)
if __name__== "__main__":
main()
| 8,384 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/aws/sagemaker | kubeflow_public_repos/kfp-tekton-backend/components/aws/sagemaker/common/gt.template.yaml | LabelingJobName: ''
LabelAttributeName: ''
InputConfig:
DataSource:
S3DataSource:
ManifestS3Uri: ''
DataAttributes:
ContentClassifiers: []
OutputConfig:
S3OutputPath: ''
KmsKeyId: ''
RoleArn: ''
LabelCategoryConfigS3Uri: ''
StoppingConditions:
MaxHumanLabeledObjectCount: 1
MaxPercentageOfInputDatasetLabeled: 100
LabelingJobAlgorithmsConfig:
LabelingJobAlgorithmSpecificationArn: ''
InitialActiveLearningModelArn: ''
LabelingJobResourceConfig:
VolumeKmsKeyId: ''
HumanTaskConfig:
WorkteamArn: ''
UiConfig:
UiTemplateS3Uri: ''
PreHumanTaskLambdaArn: ''
TaskKeywords: []
TaskTitle: ''
TaskDescription: ''
NumberOfHumanWorkersPerDataObject: 1
TaskTimeLimitInSeconds: 30
TaskAvailabilityLifetimeInSeconds: 1
MaxConcurrentTaskCount: 1
AnnotationConsolidationConfig:
AnnotationConsolidationLambdaArn: ''
PublicWorkforceTaskPrice:
AmountInUsd:
Dollars: 0
Cents: 0
TenthFractionsOfACent: 0
Tags: []
| 8,385 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/aws/sagemaker | kubeflow_public_repos/kfp-tekton-backend/components/aws/sagemaker/common/train.template.yaml | TrainingJobName: ''
HyperParameters: {}
AlgorithmSpecification:
TrainingImage: ''
AlgorithmName: ''
TrainingInputMode: ''
MetricDefinitions: []
RoleArn: ''
InputDataConfig: []
OutputDataConfig:
KmsKeyId: ''
S3OutputPath: ''
ResourceConfig:
InstanceType: ''
InstanceCount: 1
VolumeSizeInGB: 1
VolumeKmsKeyId: ''
VpcConfig:
SecurityGroupIds: []
Subnets: []
StoppingCondition:
MaxRuntimeInSeconds: 86400
MaxWaitTimeInSeconds: 86400
CheckpointConfig:
S3Uri: ''
LocalPath: ''
Tags: []
EnableNetworkIsolation: True
EnableInterContainerTrafficEncryption: False
EnableManagedSpotTraining: False | 8,386 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/aws/sagemaker | kubeflow_public_repos/kfp-tekton-backend/components/aws/sagemaker/common/transform.template.yaml | TransformJobName: ''
ModelName: ''
MaxConcurrentTransforms: 0
MaxPayloadInMB: 6
BatchStrategy: ''
Environment: {}
TransformInput:
DataSource:
S3DataSource:
S3DataType: 'S3Prefix'
S3Uri: ''
ContentType: ''
CompressionType: 'None'
SplitType: 'None'
TransformOutput:
S3OutputPath: ''
Accept: ''
AssembleWith: 'None'
KmsKeyId: ''
TransformResources:
InstanceType: ''
InstanceCount: 1
VolumeKmsKeyId: ''
DataProcessing:
InputFilter: ''
OutputFilter: ''
JoinSource: 'None'
Tags: []
| 8,387 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/aws/sagemaker | kubeflow_public_repos/kfp-tekton-backend/components/aws/sagemaker/common/model.template.yaml | ModelName: ''
PrimaryContainer:
ContainerHostname: ''
Image: ''
ModelDataUrl: ''
Environment: {}
ModelPackageName: ''
Containers: []
ExecutionRoleArn: ''
Tags: []
VpcConfig:
SecurityGroupIds: []
Subnets: []
EnableNetworkIsolation: True
| 8,388 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/aws/sagemaker | kubeflow_public_repos/kfp-tekton-backend/components/aws/sagemaker/common/workteam.template.yaml | WorkteamName: ''
MemberDefinitions: []
Description: ''
NotificationConfiguration:
NotificationTopicArn: ''
Tags: []
| 8,389 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/aws/sagemaker | kubeflow_public_repos/kfp-tekton-backend/components/aws/sagemaker/common/endpoint_config.template.yaml | EndpointConfigName: ''
ProductionVariants:
- VariantName: 'variant-name-1'
ModelName: ''
InitialInstanceCount: 1
InstanceType: 'ml.m4.xlarge'
InitialVariantWeight: 1.0
AcceleratorType: ''
- VariantName: 'variant-name-2'
ModelName: ''
InitialInstanceCount: 1
InstanceType: 'ml.m4.xlarge'
InitialVariantWeight: 1.0
AcceleratorType: ''
- VariantName: 'variant-name-3'
ModelName: ''
InitialInstanceCount: 1
InstanceType: 'ml.m4.xlarge'
InitialVariantWeight: 1.0
AcceleratorType: ''
Tags: []
KmsKeyId: ''
| 8,390 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/aws/sagemaker | kubeflow_public_repos/kfp-tekton-backend/components/aws/sagemaker/common/_utils.py | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import argparse
from time import gmtime, strftime
from distutils.util import strtobool
import time
import string
import random
import json
import yaml
import re
import boto3
import botocore
from botocore.exceptions import ClientError
from sagemaker.amazon.amazon_estimator import get_image_uri
import logging
logging.getLogger().setLevel(logging.INFO)
# Mappings are extracted from the first table in https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-algo-docker-registry-paths.html
built_in_algos = {
'blazingtext': 'blazingtext',
'deepar forecasting': 'forecasting-deepar',
'factorization machines': 'factorization-machines',
'image classification': 'image-classification',
'ip insights': 'ipinsights',
'k-means': 'kmeans',
'k-nearest neighbors': 'knn',
'k-nn': 'knn',
'lda': 'lda',
'linear learner': 'linear-learner',
'neural topic model': 'ntm',
'object2vec': 'object2vec',
'object detection': 'object-detection',
'pca': 'pca',
'random cut forest': 'randomcutforest',
'semantic segmentation': 'semantic-segmentation',
'sequence to sequence': 'seq2seq',
'seq2seq modeling': 'seq2seq',
'xgboost': 'xgboost'
}
# Get current directory to open templates
__cwd__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
def nullable_string_argument(value):
value = value.strip()
if not value:
return None
return value
def add_default_client_arguments(parser):
parser.add_argument('--region', type=str, required=True, help='The region where the training job launches.')
parser.add_argument('--endpoint_url', type=nullable_string_argument, required=False, help='The URL to use when communicating with the Sagemaker service.')
def get_component_version():
"""Get component version from the first line of License file"""
component_version = 'NULL'
# Get license file using known common directory
license_file_path = os.path.abspath(os.path.join(__cwd__, '../THIRD-PARTY-LICENSES.txt'))
with open(license_file_path, 'r') as license_file:
version_match = re.search('Amazon SageMaker Components for Kubeflow Pipelines; version (([0-9]+[.])+[0-9]+)',
license_file.readline())
if version_match is not None:
component_version = version_match.group(1)
return component_version
def get_sagemaker_client(region, endpoint_url=None):
"""Builds a client to the AWS SageMaker API."""
session_config = botocore.config.Config(
user_agent='sagemaker-on-kubeflow-pipelines-v{}'.format(get_component_version())
)
client = boto3.client('sagemaker', region_name=region, endpoint_url=endpoint_url, config=session_config)
return client
def create_training_job_request(args):
### Documentation: https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/sagemaker.html#SageMaker.Client.create_training_job
with open(os.path.join(__cwd__, 'train.template.yaml'), 'r') as f:
request = yaml.safe_load(f)
job_name = args['job_name'] if args['job_name'] else 'TrainingJob-' + strftime("%Y%m%d%H%M%S", gmtime()) + '-' + id_generator()
request['TrainingJobName'] = job_name
request['RoleArn'] = args['role']
request['HyperParameters'] = create_hyperparameters(args['hyperparameters'])
request['AlgorithmSpecification']['TrainingInputMode'] = args['training_input_mode']
### Update training image (for BYOC and built-in algorithms) or algorithm resource name
if not args['image'] and not args['algorithm_name']:
logging.error('Please specify training image or algorithm name.')
raise Exception('Could not create job request')
if args['image'] and args['algorithm_name']:
logging.error('Both image and algorithm name inputted, only one should be specified. Proceeding with image.')
if args['image']:
request['AlgorithmSpecification']['TrainingImage'] = args['image']
request['AlgorithmSpecification'].pop('AlgorithmName')
else:
# TODO: Adjust this implementation to account for custom algorithm resources names that are the same as built-in algorithm names
algo_name = args['algorithm_name'].lower().strip()
if algo_name in built_in_algos.keys():
request['AlgorithmSpecification']['TrainingImage'] = get_image_uri(args['region'], built_in_algos[algo_name])
request['AlgorithmSpecification'].pop('AlgorithmName')
logging.warning('Algorithm name is found as an Amazon built-in algorithm. Using built-in algorithm.')
# Just to give the user more leeway for built-in algorithm name inputs
elif algo_name in built_in_algos.values():
request['AlgorithmSpecification']['TrainingImage'] = get_image_uri(args['region'], algo_name)
request['AlgorithmSpecification'].pop('AlgorithmName')
logging.warning('Algorithm name is found as an Amazon built-in algorithm. Using built-in algorithm.')
else:
request['AlgorithmSpecification']['AlgorithmName'] = args['algorithm_name']
request['AlgorithmSpecification'].pop('TrainingImage')
### Update metric definitions
if args['metric_definitions']:
for key, val in args['metric_definitions'].items():
request['AlgorithmSpecification']['MetricDefinitions'].append({'Name': key, 'Regex': val})
else:
request['AlgorithmSpecification'].pop('MetricDefinitions')
### Update or pop VPC configs
if args['vpc_security_group_ids'] and args['vpc_subnets']:
request['VpcConfig']['SecurityGroupIds'] = args['vpc_security_group_ids'].split(',')
request['VpcConfig']['Subnets'] = args['vpc_subnets'].split(',')
else:
request.pop('VpcConfig')
### Update input channels, must have at least one specified
if len(args['channels']) > 0:
request['InputDataConfig'] = args['channels']
else:
logging.error("Must specify at least one input channel.")
raise Exception('Could not create job request')
request['OutputDataConfig']['S3OutputPath'] = args['model_artifact_path']
request['OutputDataConfig']['KmsKeyId'] = args['output_encryption_key']
request['ResourceConfig']['InstanceType'] = args['instance_type']
request['ResourceConfig']['VolumeKmsKeyId'] = args['resource_encryption_key']
request['EnableNetworkIsolation'] = args['network_isolation']
request['EnableInterContainerTrafficEncryption'] = args['traffic_encryption']
### Update InstanceCount, VolumeSizeInGB, and MaxRuntimeInSeconds if input is non-empty and > 0, otherwise use default values
if args['instance_count']:
request['ResourceConfig']['InstanceCount'] = args['instance_count']
if args['volume_size']:
request['ResourceConfig']['VolumeSizeInGB'] = args['volume_size']
if args['max_run_time']:
request['StoppingCondition']['MaxRuntimeInSeconds'] = args['max_run_time']
enable_spot_instance_support(request, args)
### Update tags
for key, val in args['tags'].items():
request['Tags'].append({'Key': key, 'Value': val})
return request
def create_training_job(client, args):
"""Create a Sagemaker training job."""
request = create_training_job_request(args)
try:
client.create_training_job(**request)
training_job_name = request['TrainingJobName']
logging.info("Created Training Job with name: " + training_job_name)
logging.info("Training job in SageMaker: https://{}.console.aws.amazon.com/sagemaker/home?region={}#/jobs/{}"
.format(args['region'], args['region'], training_job_name))
logging.info("CloudWatch logs: https://{}.console.aws.amazon.com/cloudwatch/home?region={}#logStream:group=/aws/sagemaker/TrainingJobs;prefix={};streamFilter=typeLogStreamPrefix"
.format(args['region'], args['region'], training_job_name))
return training_job_name
except ClientError as e:
raise Exception(e.response['Error']['Message'])
def wait_for_training_job(client, training_job_name, poll_interval=30):
while(True):
response = client.describe_training_job(TrainingJobName=training_job_name)
status = response['TrainingJobStatus']
if status == 'Completed':
logging.info("Training job ended with status: " + status)
break
if status == 'Failed':
message = response['FailureReason']
logging.info('Training failed with the following error: {}'.format(message))
raise Exception('Training job failed')
logging.info("Training job is still in status: " + status)
time.sleep(poll_interval)
def get_model_artifacts_from_job(client, job_name):
info = client.describe_training_job(TrainingJobName=job_name)
model_artifact_url = info['ModelArtifacts']['S3ModelArtifacts']
return model_artifact_url
def get_image_from_job(client, job_name):
info = client.describe_training_job(TrainingJobName=job_name)
if 'TrainingImage' in info['AlgorithmSpecification']:
image = info['AlgorithmSpecification']['TrainingImage']
else:
algorithm_name = info['AlgorithmSpecification']['AlgorithmName']
image = client.describe_algorithm(AlgorithmName=algorithm_name)['TrainingSpecification']['TrainingImage']
return image
def create_model(client, args):
request = create_model_request(args)
try:
create_model_response = client.create_model(**request)
logging.info("Model Config Arn: " + create_model_response['ModelArn'])
return create_model_response['ModelArn']
except ClientError as e:
raise Exception(e.response['Error']['Message'])
def create_model_request(args):
### Documentation: https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/sagemaker.html#SageMaker.Client.create_model
with open(os.path.join(__cwd__, 'model.template.yaml'), 'r') as f:
request = yaml.safe_load(f)
request['ModelName'] = args['model_name']
request['PrimaryContainer']['Environment'] = args['environment']
if args['secondary_containers']:
request['Containers'] = args['secondary_containers']
request.pop('PrimaryContainer')
else:
request.pop('Containers')
### Update primary container and handle input errors
if args['container_host_name']:
request['PrimaryContainer']['ContainerHostname'] = args['container_host_name']
else:
request['PrimaryContainer'].pop('ContainerHostname')
if (args['image'] or args['model_artifact_url']) and args['model_package']:
logging.error("Please specify an image AND model artifact url, OR a model package name.")
raise Exception("Could not make create model request.")
elif args['model_package']:
request['PrimaryContainer']['ModelPackageName'] = args['model_package']
request['PrimaryContainer'].pop('Image')
request['PrimaryContainer'].pop('ModelDataUrl')
else:
if args['image'] and args['model_artifact_url']:
request['PrimaryContainer']['Image'] = args['image']
request['PrimaryContainer']['ModelDataUrl'] = args['model_artifact_url']
request['PrimaryContainer'].pop('ModelPackageName')
else:
logging.error("Please specify an image AND model artifact url.")
raise Exception("Could not make create model request.")
request['ExecutionRoleArn'] = args['role']
request['EnableNetworkIsolation'] = args['network_isolation']
### Update or pop VPC configs
if args['vpc_security_group_ids'] and args['vpc_subnets']:
request['VpcConfig']['SecurityGroupIds'] = args['vpc_security_group_ids'].split(',')
request['VpcConfig']['Subnets'] = args['vpc_subnets'].split(',')
else:
request.pop('VpcConfig')
### Update tags
for key, val in args['tags'].items():
request['Tags'].append({'Key': key, 'Value': val})
return request
def deploy_model(client, args):
endpoint_config_name = create_endpoint_config(client, args)
endpoint_name = create_endpoint(client, args['region'], args['endpoint_name'], endpoint_config_name, args['endpoint_tags'])
return endpoint_name
def create_endpoint_config_request(args):
### Documentation: https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/sagemaker.html#SageMaker.Client.create_endpoint_config
with open(os.path.join(__cwd__, 'endpoint_config.template.yaml'), 'r') as f:
request = yaml.safe_load(f)
endpoint_config_name = args['endpoint_config_name'] if args['endpoint_config_name'] else 'EndpointConfig' + args['model_name_1'][args['model_name_1'].index('-'):]
request['EndpointConfigName'] = endpoint_config_name
if args['resource_encryption_key']:
request['KmsKeyId'] = args['resource_encryption_key']
else:
request.pop('KmsKeyId')
if not args['model_name_1']:
logging.error("Must specify at least one model (model name) to host.")
raise Exception("Could not create endpoint config.")
for i in range(len(request['ProductionVariants']), 0, -1):
if args['model_name_' + str(i)]:
request['ProductionVariants'][i-1]['ModelName'] = args['model_name_' + str(i)]
if args['variant_name_' + str(i)]:
request['ProductionVariants'][i-1]['VariantName'] = args['variant_name_' + str(i)]
if args['initial_instance_count_' + str(i)]:
request['ProductionVariants'][i-1]['InitialInstanceCount'] = args['initial_instance_count_' + str(i)]
if args['instance_type_' + str(i)]:
request['ProductionVariants'][i-1]['InstanceType'] = args['instance_type_' + str(i)]
if args['initial_variant_weight_' + str(i)]:
request['ProductionVariants'][i-1]['InitialVariantWeight'] = args['initial_variant_weight_' + str(i)]
if args['accelerator_type_' + str(i)]:
request['ProductionVariants'][i-1]['AcceleratorType'] = args['accelerator_type_' + str(i)]
else:
request['ProductionVariants'][i-1].pop('AcceleratorType')
else:
request['ProductionVariants'].pop(i-1)
### Update tags
for key, val in args['endpoint_config_tags'].items():
request['Tags'].append({'Key': key, 'Value': val})
return request
def create_endpoint_config(client, args):
request = create_endpoint_config_request(args)
try:
create_endpoint_config_response = client.create_endpoint_config(**request)
logging.info("Endpoint configuration in SageMaker: https://{}.console.aws.amazon.com/sagemaker/home?region={}#/endpointConfig/{}"
.format(args['region'], args['region'], request['EndpointConfigName']))
logging.info("Endpoint Config Arn: " + create_endpoint_config_response['EndpointConfigArn'])
return request['EndpointConfigName']
except ClientError as e:
raise Exception(e.response['Error']['Message'])
def create_endpoint(client, region, endpoint_name, endpoint_config_name, endpoint_tags):
### Documentation: https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/sagemaker.html#SageMaker.Client.create_endpoint
endpoint_name = endpoint_name if endpoint_name else 'Endpoint' + endpoint_config_name[endpoint_config_name.index('-'):]
### Update tags
tags=[]
for key, val in endpoint_tags.items():
tags.append({'Key': key, 'Value': val})
try:
create_endpoint_response = client.create_endpoint(
EndpointName=endpoint_name,
EndpointConfigName=endpoint_config_name,
Tags=tags)
logging.info("Created endpoint with name: " + endpoint_name)
logging.info("Endpoint in SageMaker: https://{}.console.aws.amazon.com/sagemaker/home?region={}#/endpoints/{}"
.format(region, region, endpoint_name))
logging.info("CloudWatch logs: https://{}.console.aws.amazon.com/cloudwatch/home?region={}#logStream:group=/aws/sagemaker/Endpoints/{};streamFilter=typeLogStreamPrefix"
.format(region, region, endpoint_name))
return endpoint_name
except ClientError as e:
raise Exception(e.response['Error']['Message'])
def wait_for_endpoint_creation(client, endpoint_name):
status = client.describe_endpoint(EndpointName=endpoint_name)['EndpointStatus']
logging.info("Status: " + status)
try:
client.get_waiter('endpoint_in_service').wait(EndpointName=endpoint_name)
finally:
resp = client.describe_endpoint(EndpointName=endpoint_name)
status = resp['EndpointStatus']
logging.info("Endpoint Arn: " + resp['EndpointArn'])
logging.info("Create endpoint ended with status: " + status)
if status != 'InService':
message = client.describe_endpoint(EndpointName=endpoint_name)['FailureReason']
logging.info('Create endpoint failed with the following error: {}'.format(message))
raise Exception('Endpoint creation did not succeed')
def create_transform_job_request(args):
### Documentation: https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/sagemaker.html#SageMaker.Client.create_transform_job
with open(os.path.join(__cwd__, 'transform.template.yaml'), 'r') as f:
request = yaml.safe_load(f)
job_name = args['job_name'] if args['job_name'] else 'BatchTransform' + args['model_name'][args['model_name'].index('-'):]
request['TransformJobName'] = job_name
request['ModelName'] = args['model_name']
if args['max_concurrent']:
request['MaxConcurrentTransforms'] = args['max_concurrent']
if args['max_payload'] or args['max_payload'] == 0:
request['MaxPayloadInMB'] = args['max_payload']
if args['batch_strategy']:
request['BatchStrategy'] = args['batch_strategy']
else:
request.pop('BatchStrategy')
request['Environment'] = args['environment']
if args['data_type']:
request['TransformInput']['DataSource']['S3DataSource']['S3DataType'] = args['data_type']
request['TransformInput']['DataSource']['S3DataSource']['S3Uri'] = args['input_location']
request['TransformInput']['ContentType'] = args['content_type']
if args['compression_type']:
request['TransformInput']['CompressionType'] = args['compression_type']
if args['split_type']:
request['TransformInput']['SplitType'] = args['split_type']
request['TransformOutput']['S3OutputPath'] = args['output_location']
request['TransformOutput']['Accept'] = args['accept']
request['TransformOutput']['KmsKeyId'] = args['output_encryption_key']
if args['assemble_with']:
request['TransformOutput']['AssembleWith'] = args['assemble_with']
else:
request['TransformOutput'].pop('AssembleWith')
request['TransformResources']['InstanceType'] = args['instance_type']
request['TransformResources']['InstanceCount'] = args['instance_count']
request['TransformResources']['VolumeKmsKeyId'] = args['resource_encryption_key']
request['DataProcessing']['InputFilter'] = args['input_filter']
request['DataProcessing']['OutputFilter'] = args['output_filter']
if args['join_source']:
request['DataProcessing']['JoinSource'] = args['join_source']
### Update tags
if not args['tags'] is None:
for key, val in args['tags'].items():
request['Tags'].append({'Key': key, 'Value': val})
return request
def create_transform_job(client, args):
request = create_transform_job_request(args)
try:
client.create_transform_job(**request)
batch_job_name = request['TransformJobName']
logging.info("Created Transform Job with name: " + batch_job_name)
logging.info("Transform job in SageMaker: https://{}.console.aws.amazon.com/sagemaker/home?region={}#/transform-jobs/{}"
.format(args['region'], args['region'], batch_job_name))
logging.info("CloudWatch logs: https://{}.console.aws.amazon.com/cloudwatch/home?region={}#logStream:group=/aws/sagemaker/TransformJobs;prefix={};streamFilter=typeLogStreamPrefix"
.format(args['region'], args['region'], batch_job_name))
return batch_job_name
except ClientError as e:
raise Exception(e.response['Error']['Message'])
def wait_for_transform_job(client, batch_job_name):
### Wait until the job finishes
while(True):
response = client.describe_transform_job(TransformJobName=batch_job_name)
status = response['TransformJobStatus']
if status == 'Completed':
logging.info("Transform job ended with status: " + status)
break
if status == 'Failed':
message = response['FailureReason']
logging.info('Transform failed with the following error: {}'.format(message))
raise Exception('Transform job failed')
logging.info("Transform job is still in status: " + status)
time.sleep(30)
def create_hyperparameter_tuning_job_request(args):
### Documentation: https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/sagemaker.html#SageMaker.Client.create_hyper_parameter_tuning_job
with open(os.path.join(__cwd__, 'hpo.template.yaml'), 'r') as f:
request = yaml.safe_load(f)
### Create a hyperparameter tuning job
request['HyperParameterTuningJobName'] = args['job_name'] if args['job_name'] else "HPOJob-" + strftime("%Y%m%d%H%M%S", gmtime()) + '-' + id_generator()
request['HyperParameterTuningJobConfig']['Strategy'] = args['strategy']
request['HyperParameterTuningJobConfig']['HyperParameterTuningJobObjective']['Type'] = args['metric_type']
request['HyperParameterTuningJobConfig']['HyperParameterTuningJobObjective']['MetricName'] = args['metric_name']
request['HyperParameterTuningJobConfig']['ResourceLimits']['MaxNumberOfTrainingJobs'] = args['max_num_jobs']
request['HyperParameterTuningJobConfig']['ResourceLimits']['MaxParallelTrainingJobs'] = args['max_parallel_jobs']
request['HyperParameterTuningJobConfig']['ParameterRanges']['IntegerParameterRanges'] = args['integer_parameters']
request['HyperParameterTuningJobConfig']['ParameterRanges']['ContinuousParameterRanges'] = args['continuous_parameters']
request['HyperParameterTuningJobConfig']['ParameterRanges']['CategoricalParameterRanges'] = args['categorical_parameters']
request['HyperParameterTuningJobConfig']['TrainingJobEarlyStoppingType'] = args['early_stopping_type']
request['TrainingJobDefinition']['StaticHyperParameters'] = create_hyperparameters(args['static_parameters'])
request['TrainingJobDefinition']['AlgorithmSpecification']['TrainingInputMode'] = args['training_input_mode']
### Update training image (for BYOC) or algorithm resource name
if not args['image'] and not args['algorithm_name']:
logging.error('Please specify training image or algorithm name.')
raise Exception('Could not create job request')
if args['image'] and args['algorithm_name']:
logging.error('Both image and algorithm name inputted, only one should be specified. Proceeding with image.')
if args['image']:
request['TrainingJobDefinition']['AlgorithmSpecification']['TrainingImage'] = args['image']
request['TrainingJobDefinition']['AlgorithmSpecification'].pop('AlgorithmName')
else:
# TODO: Adjust this implementation to account for custom algorithm resources names that are the same as built-in algorithm names
algo_name = args['algorithm_name'].lower().strip()
if algo_name in built_in_algos.keys():
request['TrainingJobDefinition']['AlgorithmSpecification']['TrainingImage'] = get_image_uri(args['region'], built_in_algos[algo_name])
request['TrainingJobDefinition']['AlgorithmSpecification'].pop('AlgorithmName')
logging.warning('Algorithm name is found as an Amazon built-in algorithm. Using built-in algorithm.')
# To give the user more leeway for built-in algorithm name inputs
elif algo_name in built_in_algos.values():
request['TrainingJobDefinition']['AlgorithmSpecification']['TrainingImage'] = get_image_uri(args['region'], algo_name)
request['TrainingJobDefinition']['AlgorithmSpecification'].pop('AlgorithmName')
logging.warning('Algorithm name is found as an Amazon built-in algorithm. Using built-in algorithm.')
else:
request['TrainingJobDefinition']['AlgorithmSpecification']['AlgorithmName'] = args['algorithm_name']
request['TrainingJobDefinition']['AlgorithmSpecification'].pop('TrainingImage')
### Update metric definitions
if args['metric_definitions']:
for key, val in args['metric_definitions'].items():
request['TrainingJobDefinition']['AlgorithmSpecification']['MetricDefinitions'].append({'Name': key, 'Regex': val})
else:
request['TrainingJobDefinition']['AlgorithmSpecification'].pop('MetricDefinitions')
### Update or pop VPC configs
if args['vpc_security_group_ids'] and args['vpc_subnets']:
request['TrainingJobDefinition']['VpcConfig']['SecurityGroupIds'] = args['vpc_security_group_ids'].split(',')
request['TrainingJobDefinition']['VpcConfig']['Subnets'] = args['vpc_subnets'].split(',')
else:
request['TrainingJobDefinition'].pop('VpcConfig')
### Update input channels, must have at least one specified
if len(args['channels']) > 0:
request['TrainingJobDefinition']['InputDataConfig'] = args['channels']
else:
logging.error("Must specify at least one input channel.")
raise Exception('Could not make job request')
request['TrainingJobDefinition']['OutputDataConfig']['S3OutputPath'] = args['output_location']
request['TrainingJobDefinition']['OutputDataConfig']['KmsKeyId'] = args['output_encryption_key']
request['TrainingJobDefinition']['ResourceConfig']['InstanceType'] = args['instance_type']
request['TrainingJobDefinition']['ResourceConfig']['VolumeKmsKeyId'] = args['resource_encryption_key']
request['TrainingJobDefinition']['EnableNetworkIsolation'] = args['network_isolation']
request['TrainingJobDefinition']['EnableInterContainerTrafficEncryption'] = args['traffic_encryption']
request['TrainingJobDefinition']['RoleArn'] = args['role']
### Update InstanceCount, VolumeSizeInGB, and MaxRuntimeInSeconds if input is non-empty and > 0, otherwise use default values
if args['instance_count']:
request['TrainingJobDefinition']['ResourceConfig']['InstanceCount'] = args['instance_count']
if args['volume_size']:
request['TrainingJobDefinition']['ResourceConfig']['VolumeSizeInGB'] = args['volume_size']
if args['max_run_time']:
request['TrainingJobDefinition']['StoppingCondition']['MaxRuntimeInSeconds'] = args['max_run_time']
### Update or pop warm start configs
if args['warm_start_type'] and args['parent_hpo_jobs']:
request['WarmStartConfig']['WarmStartType'] = args['warm_start_type']
parent_jobs = [n.strip() for n in args['parent_hpo_jobs'].split(',')]
for i in range(len(parent_jobs)):
request['WarmStartConfig']['ParentHyperParameterTuningJobs'].append({'HyperParameterTuningJobName': parent_jobs[i]})
else:
if args['warm_start_type'] or args['parent_hpo_jobs']:
if not args['warm_start_type']:
logging.error('Must specify warm start type as either "IdenticalDataAndAlgorithm" or "TransferLearning".')
if not args['parent_hpo_jobs']:
logging.error("Must specify at least one parent hyperparameter tuning job")
raise Exception('Could not make job request')
request.pop('WarmStartConfig')
enable_spot_instance_support(request['TrainingJobDefinition'], args)
### Update tags
for key, val in args['tags'].items():
request['Tags'].append({'Key': key, 'Value': val})
return request
def create_hyperparameter_tuning_job(client, args):
"""Create a Sagemaker HPO job"""
request = create_hyperparameter_tuning_job_request(args)
try:
job_arn = client.create_hyper_parameter_tuning_job(**request)
hpo_job_name = request['HyperParameterTuningJobName']
logging.info("Created Hyperparameter Training Job with name: " + hpo_job_name)
logging.info("HPO job in SageMaker: https://{}.console.aws.amazon.com/sagemaker/home?region={}#/hyper-tuning-jobs/{}"
.format(args['region'], args['region'], hpo_job_name))
logging.info("CloudWatch logs: https://{}.console.aws.amazon.com/cloudwatch/home?region={}#logStream:group=/aws/sagemaker/TrainingJobs;prefix={};streamFilter=typeLogStreamPrefix"
.format(args['region'], args['region'], hpo_job_name))
return hpo_job_name
except ClientError as e:
raise Exception(e.response['Error']['Message'])
def wait_for_hyperparameter_training_job(client, hpo_job_name):
### Wait until the job finishes
while(True):
response = client.describe_hyper_parameter_tuning_job(HyperParameterTuningJobName=hpo_job_name)
status = response['HyperParameterTuningJobStatus']
if status == 'Completed':
logging.info("Hyperparameter tuning job ended with status: " + status)
break
if status == 'Failed':
message = response['FailureReason']
logging.error('Hyperparameter tuning failed with the following error: {}'.format(message))
raise Exception('Hyperparameter tuning job failed')
logging.info("Hyperparameter tuning job is still in status: " + status)
time.sleep(30)
def get_best_training_job_and_hyperparameters(client, hpo_job_name):
### Get and return best training job and its hyperparameters, without the objective metric
info = client.describe_hyper_parameter_tuning_job(HyperParameterTuningJobName=hpo_job_name)
best_job = info['BestTrainingJob']['TrainingJobName']
training_info = client.describe_training_job(TrainingJobName=best_job)
train_hyperparameters = training_info['HyperParameters']
train_hyperparameters.pop('_tuning_objective_metric')
return best_job, train_hyperparameters
def create_workteam(client, args):
try:
request = create_workteam_request(args)
response = client.create_workteam(**request)
portal = client.describe_workteam(WorkteamName=args['team_name'])['Workteam']['SubDomain']
logging.info("Labeling portal: " + portal)
return response['WorkteamArn']
except ClientError as e:
raise Exception(e.response['Error']['Message'])
def create_workteam_request(args):
### Documentation: https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/sagemaker.html#SageMaker.Client.create_workteam
"""Create a workteam"""
with open(os.path.join(__cwd__, 'workteam.template.yaml'), 'r') as f:
request = yaml.safe_load(f)
request['WorkteamName'] = args['team_name']
request['Description'] = args['description']
if args['sns_topic']:
request['NotificationConfiguration']['NotificationTopicArn'] = args['sns_topic']
else:
request.pop('NotificationConfiguration')
for group in [n.strip() for n in args['user_groups'].split(',')]:
request['MemberDefinitions'].append({'CognitoMemberDefinition': {'UserPool': args['user_pool'], 'UserGroup': group, 'ClientId': args['client_id']}})
for key, val in args['tags'].items():
request['Tags'].append({'Key': key, 'Value': val})
return request
def create_labeling_job_request(args):
### Documentation: https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/sagemaker.html#SageMaker.Client.create_labeling_job
with open(os.path.join(__cwd__, 'gt.template.yaml'), 'r') as f:
request = yaml.safe_load(f)
# Mapping are extracted from ARNs listed in https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/sagemaker.html#SageMaker.Client.create_labeling_job
algorithm_arn_map = {'us-west-2': '081040173940',
'us-east-1': '432418664414',
'us-east-2': '266458841044',
'eu-west-1': '568282634449',
'ap-northeast-1': '477331159723',
'ap-southeast-1': '454466003867'}
task_map = {'bounding box': 'BoundingBox',
'image classification': 'ImageMultiClass',
'semantic segmentation': 'SemanticSegmentation',
'text classification': 'TextMultiClass'}
auto_labeling_map = {'bounding box': 'object-detection',
'image classification': 'image-classification',
'text classification': 'text-classification'}
task = args['task_type'].lower()
request['LabelingJobName'] = args['job_name'] if args['job_name'] else "LabelingJob-" + strftime("%Y%m%d%H%M%S", gmtime()) + '-' + id_generator()
if args['label_attribute_name']:
name_check = args['label_attribute_name'].split('-')[-1]
if task == 'semantic segmentation' and name_check == 'ref' or task != 'semantic segmentation' and name_check != 'metadata' and name_check != 'ref':
request['LabelAttributeName'] = args['label_attribute_name']
else:
logging.error('Invalid label attribute name. If task type is semantic segmentation, name must end in "-ref". Else, name must not end in "-ref" or "-metadata".')
else:
request['LabelAttributeName'] = args['job_name']
request['InputConfig']['DataSource']['S3DataSource']['ManifestS3Uri'] = args['manifest_location']
request['OutputConfig']['S3OutputPath'] = args['output_location']
request['OutputConfig']['KmsKeyId'] = args['output_encryption_key']
request['RoleArn'] = args['role']
request['LabelCategoryConfigS3Uri'] = args['label_category_config']
### Update or pop stopping conditions
if not args['max_human_labeled_objects'] and not args['max_percent_objects']:
request.pop('StoppingConditions')
else:
if args['max_human_labeled_objects']:
request['StoppingConditions']['MaxHumanLabeledObjectCount'] = args['max_human_labeled_objects']
else:
request['StoppingConditions'].pop('MaxHumanLabeledObjectCount')
if args['max_percent_objects']:
request['StoppingConditions']['MaxPercentageOfInputDatasetLabeled'] = args['max_percent_objects']
else:
request['StoppingConditions'].pop('MaxPercentageOfInputDatasetLabeled')
### Update or pop automatic labeling configs
if args['enable_auto_labeling']:
if task == 'image classification' or task == 'bounding box' or task == 'text classification':
labeling_algorithm_arn = 'arn:aws:sagemaker:{}:027400017018:labeling-job-algorithm-specification/image-classification'.format(args['region'], auto_labeling_map[task])
request['LabelingJobAlgorithmsConfig']['LabelingJobAlgorithmSpecificationArn'] = labeling_algorithm_arn
if args['initial_model_arn']:
request['LabelingJobAlgorithmsConfig']['InitialActiveLearningModelArn'] = args['initial_model_arn']
else:
request['LabelingJobAlgorithmsConfig'].pop('InitialActiveLearningModelArn')
request['LabelingJobAlgorithmsConfig']['LabelingJobResourceConfig']['VolumeKmsKeyId'] = args['resource_encryption_key']
else:
logging.error("Automated data labeling not available for semantic segmentation or custom algorithms. Proceeding without automated data labeling.")
else:
request.pop('LabelingJobAlgorithmsConfig')
### Update pre-human and annotation consolidation task lambda functions
if task == 'image classification' or task == 'bounding box' or task == 'text classification' or task == 'semantic segmentation':
prehuman_arn = 'arn:aws:lambda:{}:{}:function:PRE-{}'.format(args['region'], algorithm_arn_map[args['region']], task_map[task])
acs_arn = 'arn:aws:lambda:{}:{}:function:ACS-{}'.format(args['region'], algorithm_arn_map[args['region']], task_map[task])
request['HumanTaskConfig']['PreHumanTaskLambdaArn'] = prehuman_arn
request['HumanTaskConfig']['AnnotationConsolidationConfig']['AnnotationConsolidationLambdaArn'] = acs_arn
elif task == 'custom' or task == '':
if args['pre_human_task_function'] and args['post_human_task_function']:
request['HumanTaskConfig']['PreHumanTaskLambdaArn'] = args['pre_human_task_function']
request['HumanTaskConfig']['AnnotationConsolidationConfig']['AnnotationConsolidationLambdaArn'] = args['post_human_task_function']
else:
logging.error("Must specify pre-human task lambda arn and annotation consolidation post-human task lambda arn.")
else:
logging.error("Task type must be Bounding Box, Image Classification, Semantic Segmentation, Text Classification, or Custom.")
request['HumanTaskConfig']['UiConfig']['UiTemplateS3Uri'] = args['ui_template']
request['HumanTaskConfig']['TaskTitle'] = args['title']
request['HumanTaskConfig']['TaskDescription'] = args['description']
request['HumanTaskConfig']['NumberOfHumanWorkersPerDataObject'] = args['num_workers_per_object']
request['HumanTaskConfig']['TaskTimeLimitInSeconds'] = args['time_limit']
if args['task_availibility']:
request['HumanTaskConfig']['TaskAvailabilityLifetimeInSeconds'] = args['task_availibility']
else:
request['HumanTaskConfig'].pop('TaskAvailabilityLifetimeInSeconds')
if args['max_concurrent_tasks']:
request['HumanTaskConfig']['MaxConcurrentTaskCount'] = args['max_concurrent_tasks']
else:
request['HumanTaskConfig'].pop('MaxConcurrentTaskCount')
if args['task_keywords']:
for word in [n.strip() for n in args['task_keywords'].split(',')]:
request['HumanTaskConfig']['TaskKeywords'].append(word)
else:
request['HumanTaskConfig'].pop('TaskKeywords')
### Update worker configurations
if args['worker_type'].lower() == 'public':
if args['no_adult_content']:
request['InputConfig']['DataAttributes']['ContentClassifiers'].append('FreeOfAdultContent')
if args['no_ppi']:
request['InputConfig']['DataAttributes']['ContentClassifiers'].append('FreeOfPersonallyIdentifiableInformation')
request['HumanTaskConfig']['WorkteamArn'] = 'arn:aws:sagemaker:{}:394669845002:workteam/public-crowd/default'.format(args['region'])
dollars = int(args['workforce_task_price'])
cents = int(100 * (args['workforce_task_price'] - dollars))
tenth_of_cents = int((args['workforce_task_price'] * 1000) - (dollars * 1000) - (cents * 10))
request['HumanTaskConfig']['PublicWorkforceTaskPrice']['AmountInUsd']['Dollars'] = dollars
request['HumanTaskConfig']['PublicWorkforceTaskPrice']['AmountInUsd']['Cents'] = cents
request['HumanTaskConfig']['PublicWorkforceTaskPrice']['AmountInUsd']['TenthFractionsOfACent'] = tenth_of_cents
else:
request['InputConfig'].pop('DataAttributes')
request['HumanTaskConfig']['WorkteamArn'] = args['workteam_arn']
request['HumanTaskConfig'].pop('PublicWorkforceTaskPrice')
for key, val in args['tags'].items():
request['Tags'].append({'Key': key, 'Value': val})
return request
def create_labeling_job(client, args):
"""Create a SageMaker Ground Truth job"""
request = create_labeling_job_request(args)
try:
client.create_labeling_job(**request)
gt_job_name = request['LabelingJobName']
logging.info("Created Ground Truth Labeling Job with name: " + gt_job_name)
logging.info("Ground Truth job in SageMaker: https://{}.console.aws.amazon.com/sagemaker/groundtruth?region={}#/labeling-jobs/details/{}"
.format(args['region'], args['region'], gt_job_name))
return gt_job_name
except ClientError as e:
raise Exception(e.response['Error']['Message'])
def wait_for_labeling_job(client, labeling_job_name):
### Wait until the job finishes
status = 'InProgress'
while(status == 'InProgress'):
response = client.describe_labeling_job(LabelingJobName=labeling_job_name)
status = response['LabelingJobStatus']
if status == 'Failed':
message = response['FailureReason']
logging.info('Labeling failed with the following error: {}'.format(message))
raise Exception('Labeling job failed')
logging.info("Labeling job is still in status: " + status)
time.sleep(30)
if status == 'Completed':
logging.info("Labeling job ended with status: " + status)
else:
raise Exception('Labeling job stopped')
def get_labeling_job_outputs(client, labeling_job_name, auto_labeling):
### Get and return labeling job outputs
info = client.describe_labeling_job(LabelingJobName=labeling_job_name)
output_manifest = info['LabelingJobOutput']['OutputDatasetS3Uri']
if auto_labeling:
active_learning_model_arn = info['LabelingJobOutput']['FinalActiveLearningModelArn']
else:
active_learning_model_arn = ' '
return output_manifest, active_learning_model_arn
def create_hyperparameters(hyperparam_args):
# Validate all values are strings
for key, value in hyperparam_args.items():
if not isinstance(value, str):
raise Exception(f"Could not parse hyperparameters. Value for {key} was not a string.")
return hyperparam_args
def enable_spot_instance_support(training_job_config, args):
if args['spot_instance']:
training_job_config['EnableManagedSpotTraining'] = args['spot_instance']
if args['max_wait_time'] >= training_job_config['StoppingCondition']['MaxRuntimeInSeconds']:
training_job_config['StoppingCondition']['MaxWaitTimeInSeconds'] = args['max_wait_time']
else:
logging.error("Max wait time must be greater than or equal to max run time.")
raise Exception('Could not create job request.')
if args['checkpoint_config'] and 'S3Uri' in args['checkpoint_config']:
training_job_config['CheckpointConfig'] = args['checkpoint_config']
else:
logging.error("EnableManagedSpotTraining requires checkpoint config with an S3 uri.")
raise Exception('Could not create job request.')
else:
# Remove any artifacts that require spot instance support
del training_job_config['StoppingCondition']['MaxWaitTimeInSeconds']
del training_job_config['CheckpointConfig']
def id_generator(size=4, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
def yaml_or_json_str(str):
if str == "" or str == None:
return None
try:
return json.loads(str)
except:
return yaml.safe_load(str)
def str_to_bool(str):
# This distutils function returns an integer representation of the boolean
# rather than a True/False value. This simply hard casts it.
return bool(strtobool(str)) | 8,391 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/aws/sagemaker | kubeflow_public_repos/kfp-tekton-backend/components/aws/sagemaker/common/hpo.template.yaml | HyperParameterTuningJobName: ''
HyperParameterTuningJobConfig:
Strategy: ''
HyperParameterTuningJobObjective:
Type: ''
MetricName: ''
ResourceLimits:
MaxNumberOfTrainingJobs: 0
MaxParallelTrainingJobs: 0
ParameterRanges:
IntegerParameterRanges: []
ContinuousParameterRanges: []
CategoricalParameterRanges: []
TrainingJobEarlyStoppingType: ''
TrainingJobDefinition:
StaticHyperParameters: {}
AlgorithmSpecification:
TrainingImage: ''
TrainingInputMode: ''
AlgorithmName: ''
MetricDefinitions: []
RoleArn: ''
InputDataConfig: []
VpcConfig:
SecurityGroupIds: []
Subnets: []
OutputDataConfig:
KmsKeyId: ''
S3OutputPath: ''
ResourceConfig:
InstanceType: ''
InstanceCount: 1
VolumeSizeInGB: 1
VolumeKmsKeyId: ''
StoppingCondition:
MaxRuntimeInSeconds: 86400
MaxWaitTimeInSeconds: 86400
CheckpointConfig:
S3Uri: ''
LocalPath: ''
EnableNetworkIsolation: True
EnableInterContainerTrafficEncryption: False
EnableManagedSpotTraining: False
WarmStartConfig:
ParentHyperParameterTuningJobs: []
WarmStartType: ''
Tags: []
| 8,392 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/aws/sagemaker | kubeflow_public_repos/kfp-tekton-backend/components/aws/sagemaker/train/README.md | # SageMaker Training Kubeflow Pipelines component
## Summary
Component to submit SageMaker Training jobs directly from a Kubeflow Pipelines workflow.
https://docs.aws.amazon.com/sagemaker/latest/dg/how-it-works-training.html
# Details
## Intended Use
For model training using AWS SageMaker.
## Runtime Arguments
Argument | Description | Optional | Data type | Accepted values | Default |
:--- | :---------- | :----------| :----------| :---------- | :----------|
region | The region where the cluster launches | No | String | | |
endpoint_url | The endpoint URL for the private link VPC endpoint. | Yes | String | | |
job_name | The name of the Ground Truth job. Must be unique within the same AWS account and AWS region | Yes | String | | LabelingJob-[datetime]-[random id]|
role | The Amazon Resource Name (ARN) that Amazon SageMaker assumes to perform tasks on your behalf | No | String | | |
image | The registry path of the Docker image that contains the training algorithm | Yes | String | | |
algorithm_name | The name of the algorithm resource to use for the hyperparameter tuning job; only specify this parameter if training image is not specified | Yes | String | | |
metric_definitions | The dictionary of name-regex pairs specify the metrics that the algorithm emits | Yes | Dict | | {} |
put_mode | The input mode that the algorithm supports | No | String | File, Pipe | File |
hyperparameters | Hyperparameters for the selected algorithm | No | Dict | [Depends on Algo](https://docs.aws.amazon.com/sagemaker/latest/dg/k-means-api-config.html)| |
channels | A list of dicts specifying the input channels (at least one); refer to [documentation](https://github.com/awsdocs/amazon-sagemaker-developer-guide/blob/master/doc_source/API_Channel.md) for parameters | No | No | List of Dicts | | |
instance_type | The ML compute instance type | Yes | No | String | ml.m4.xlarge, ml.m4.2xlarge, ml.m4.4xlarge, ml.m4.10xlarge, ml.m4.16xlarge, ml.m5.large, ml.m5.xlarge, ml.m5.2xlarge, ml.m5.4xlarge, ml.m5.12xlarge, ml.m5.24xlarge, ml.c4.xlarge, ml.c4.2xlarge, ml.c4.4xlarge, ml.c4.8xlarge, ml.p2.xlarge, ml.p2.8xlarge, ml.p2.16xlarge, ml.p3.2xlarge, ml.p3.8xlarge, ml.p3.16xlarge, ml.c5.xlarge, ml.c5.2xlarge, ml.c5.4xlarge, ml.c5.9xlarge, ml.c5.18xlarge | ml.m4.xlarge |
instance_count | The number of ML compute instances to use in each training job | Yes | Int | ≥ 1 | 1 |
volume_size | The size of the ML storage volume that you want to provision in GB | Yes | Int | ≥ 1 | 30 |
resource_encryption_key | The AWS KMS key that Amazon SageMaker uses to encrypt data on the storage volume attached to the ML compute instance(s) | Yes | String | | |
max_run_time | The maximum run time in seconds per training job | Yes | Int | ≤ 432000 (5 days) | 86400 (1 day) |
model_artifact_path | | No | String | | |
output_encryption_key | The AWS KMS key that Amazon SageMaker uses to encrypt the model artifacts | Yes | String | | |
vpc_security_group_ids | A comma-delimited list of security group IDs, in the form sg-xxxxxxxx | Yes | String | | |
vpc_subnets | A comma-delimited list of subnet IDs in the VPC to which you want to connect your hpo job | Yes | String | | |
network_isolation | Isolates the training container if true | No | Boolean | False, True | True |
traffic_encryption | Encrypts all communications between ML compute instances in distributed training if true | No | Boolean | False, True | False |
spot_instance | Use managed spot training if true | No | Boolean | False, True | False |
max_wait_time | The maximum time in seconds you are willing to wait for a managed spot training job to complete | Yes | Int | ≤ 432000 (5 days) | 86400 (1 day) |
checkpoint_config | Dictionary of information about the output location for managed spot training checkpoint data | Yes | Dict | | {} |
tags | Key-value pairs to categorize AWS resources | Yes | Dict | | {} |
## Output
Stores the Model in the s3 bucket you specified
# Example code
Simple example pipeline with only Train component : [simple_train_pipeline](https://github.com/kubeflow/pipelines/tree/documents/samples/contrib/aws-samples/simple_train_pipeline)
# Resources
* [Using Amazon built-in algorithms](https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-algo-docker-registry-paths.html)
| 8,393 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/aws/sagemaker | kubeflow_public_repos/kfp-tekton-backend/components/aws/sagemaker/train/component.yaml | name: 'Sagemaker - Training Job'
description: |
Train Machine Learning and Deep Learning Models using SageMaker
inputs:
- name: region
description: 'The region where the training job launches.'
type: String
- name: job_name
description: 'The name of the batch training job.'
default: ''
type: String
- name: role
description: 'The Amazon Resource Name (ARN) that Amazon SageMaker assumes to perform tasks on your behalf.'
type: String
- name: image
description: 'The registry path of the Docker image that contains the training algorithm.'
default: ''
type: String
- name: algorithm_name
description: 'The name of the algorithm resource to use for the training job. Do not specify a value for this if using training image.'
default: ''
type: String
- name: metric_definitions
description: 'The dictionary of name-regex pairs specify the metrics that the algorithm emits.'
default: '{}'
type: JsonObject
- name: training_input_mode
description: 'The input mode that the algorithm supports. File or Pipe.'
default: 'File'
type: String
- name: hyperparameters
description: 'Dictionary of hyperparameters for the the algorithm.'
default: '{}'
type: JsonObject
- name: channels
description: 'A list of dicts specifying the input channels. Must have at least one.'
type: JsonArray
- name: instance_type
description: 'The ML compute instance type.'
default: 'ml.m4.xlarge'
type: String
- name: instance_count
description: 'The number of ML compute instances to use in each training job.'
default: '1'
type: Integer
- name: volume_size
description: 'The size of the ML storage volume that you want to provision.'
default: '30'
type: Integer
- name: resource_encryption_key
description: 'The AWS KMS key that Amazon SageMaker uses to encrypt data on the storage volume attached to the ML compute instance(s).'
default: ''
type: String
- name: max_run_time
description: 'The maximum run time in seconds for the training job.'
default: '86400'
type: Integer
- name: model_artifact_path
description: 'Identifies the S3 path where you want Amazon SageMaker to store the model artifacts.'
type: String
- name: output_encryption_key
description: 'The AWS KMS key that Amazon SageMaker uses to encrypt the model artifacts.'
default: ''
type: String
- name: vpc_security_group_ids
description: 'The VPC security group IDs, in the form sg-xxxxxxxx.'
default: ''
type: String
- name: vpc_subnets
description: 'The ID of the subnets in the VPC to which you want to connect your hpo job.'
default: ''
type: String
- name: network_isolation
description: 'Isolates the training container.'
default: 'True'
type: Bool
- name: traffic_encryption
description: 'Encrypts all communications between ML compute instances in distributed training.'
default: 'False'
type: Bool
- name: spot_instance
description: 'Use managed spot training.'
default: 'False'
type: Bool
- name: max_wait_time
description: 'The maximum time in seconds you are willing to wait for a managed spot training job to complete.'
default: '86400'
type: Integer
- name: checkpoint_config
description: 'Dictionary of information about the output location for managed spot training checkpoint data.'
default: '{}'
type: JsonObject
- name: endpoint_url
description: 'The endpoint URL for the private link VPC endpoint.'
default: ''
type: String
- name: tags
description: 'Key-value pairs, to categorize AWS resources.'
default: '{}'
type: JsonObject
outputs:
- {name: model_artifact_url, description: 'Model artifacts url'}
- {name: job_name, description: 'Training job name'}
- {name: training_image, description: 'The registry path of the Docker image that contains the training algorithm'}
implementation:
container:
image: amazon/aws-sagemaker-kfp-components:0.3.1
command: ['python3']
args: [
train.py,
--region, {inputValue: region},
--endpoint_url, {inputValue: endpoint_url},
--job_name, {inputValue: job_name},
--role, {inputValue: role},
--image, {inputValue: image},
--algorithm_name, {inputValue: algorithm_name},
--metric_definitions, {inputValue: metric_definitions},
--training_input_mode, {inputValue: training_input_mode},
--hyperparameters, {inputValue: hyperparameters},
--channels, {inputValue: channels},
--instance_type, {inputValue: instance_type},
--instance_count, {inputValue: instance_count},
--volume_size, {inputValue: volume_size},
--resource_encryption_key, {inputValue: resource_encryption_key},
--max_run_time, {inputValue: max_run_time},
--model_artifact_path, {inputValue: model_artifact_path},
--output_encryption_key, {inputValue: output_encryption_key},
--vpc_security_group_ids, {inputValue: vpc_security_group_ids},
--vpc_subnets, {inputValue: vpc_subnets},
--network_isolation, {inputValue: network_isolation},
--traffic_encryption, {inputValue: traffic_encryption},
--spot_instance, {inputValue: spot_instance},
--max_wait_time, {inputValue: max_wait_time},
--checkpoint_config, {inputValue: checkpoint_config},
--tags, {inputValue: tags}
]
fileOutputs:
model_artifact_url: /tmp/model_artifact_url.txt
job_name: /tmp/job_name.txt
training_image: /tmp/training_image.txt
| 8,394 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/aws/sagemaker/train | kubeflow_public_repos/kfp-tekton-backend/components/aws/sagemaker/train/src/train.py | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import argparse
import logging
from common import _utils
def create_parser():
parser = argparse.ArgumentParser(description='SageMaker Training Job')
_utils.add_default_client_arguments(parser)
parser.add_argument('--job_name', type=str, required=False, help='The name of the training job.', default='')
parser.add_argument('--role', type=str, required=True, help='The Amazon Resource Name (ARN) that Amazon SageMaker assumes to perform tasks on your behalf.')
parser.add_argument('--image', type=str, required=False, help='The registry path of the Docker image that contains the training algorithm.', default='')
parser.add_argument('--algorithm_name', type=str, required=False, help='The name of the resource algorithm to use for the training job.', default='')
parser.add_argument('--metric_definitions', type=_utils.yaml_or_json_str, required=False, help='The dictionary of name-regex pairs specify the metrics that the algorithm emits.', default={})
parser.add_argument('--training_input_mode', choices=['File', 'Pipe'], type=str, help='The input mode that the algorithm supports. File or Pipe.', default='File')
parser.add_argument('--hyperparameters', type=_utils.yaml_or_json_str, help='Dictionary of hyperparameters for the the algorithm.', default={})
parser.add_argument('--channels', type=_utils.yaml_or_json_str, required=True, help='A list of dicts specifying the input channels. Must have at least one.')
parser.add_argument('--instance_type', required=True, choices=['ml.m4.xlarge', 'ml.m4.2xlarge', 'ml.m4.4xlarge', 'ml.m4.10xlarge', 'ml.m4.16xlarge', 'ml.m5.large', 'ml.m5.xlarge', 'ml.m5.2xlarge', 'ml.m5.4xlarge',
'ml.m5.12xlarge', 'ml.m5.24xlarge', 'ml.c4.xlarge', 'ml.c4.2xlarge', 'ml.c4.4xlarge', 'ml.c4.8xlarge', 'ml.p2.xlarge', 'ml.p2.8xlarge', 'ml.p2.16xlarge', 'ml.p3.2xlarge', 'ml.p3.8xlarge', 'ml.p3.16xlarge',
'ml.c5.xlarge', 'ml.c5.2xlarge', 'ml.c5.4xlarge', 'ml.c5.9xlarge', 'ml.c5.18xlarge'], type=str, help='The ML compute instance type.', default='ml.m4.xlarge')
parser.add_argument('--instance_count', required=True, type=int, help='The registry path of the Docker image that contains the training algorithm.', default=1)
parser.add_argument('--volume_size', type=int, required=True, help='The size of the ML storage volume that you want to provision.', default=1)
parser.add_argument('--resource_encryption_key', type=str, required=False, help='The AWS KMS key that Amazon SageMaker uses to encrypt data on the storage volume attached to the ML compute instance(s).', default='')
parser.add_argument('--max_run_time', type=int, required=True, help='The maximum run time in seconds for the training job.', default=86400)
parser.add_argument('--model_artifact_path', type=str, required=True, help='Identifies the S3 path where you want Amazon SageMaker to store the model artifacts.')
parser.add_argument('--output_encryption_key', type=str, required=False, help='The AWS KMS key that Amazon SageMaker uses to encrypt the model artifacts.', default='')
parser.add_argument('--vpc_security_group_ids', type=str, required=False, help='The VPC security group IDs, in the form sg-xxxxxxxx.')
parser.add_argument('--vpc_subnets', type=str, required=False, help='The ID of the subnets in the VPC to which you want to connect your hpo job.')
parser.add_argument('--network_isolation', type=_utils.str_to_bool, required=False, help='Isolates the training container.', default=True)
parser.add_argument('--traffic_encryption', type=_utils.str_to_bool, required=False, help='Encrypts all communications between ML compute instances in distributed training.', default=False)
### Start spot instance support
parser.add_argument('--spot_instance', type=_utils.str_to_bool, required=False, help='Use managed spot training.', default=False)
parser.add_argument('--max_wait_time', type=int, required=False, help='The maximum time in seconds you are willing to wait for a managed spot training job to complete.', default=86400)
parser.add_argument('--checkpoint_config', type=_utils.yaml_or_json_str, required=False, help='Dictionary of information about the output location for managed spot training checkpoint data.', default={})
### End spot instance support
parser.add_argument('--tags', type=_utils.yaml_or_json_str, required=False, help='An array of key-value pairs, to categorize AWS resources.', default={})
return parser
def main(argv=None):
parser = create_parser()
args = parser.parse_args(argv)
logging.getLogger().setLevel(logging.INFO)
client = _utils.get_sagemaker_client(args.region, args.endpoint_url)
logging.info('Submitting Training Job to SageMaker...')
job_name = _utils.create_training_job(client, vars(args))
logging.info('Job request submitted. Waiting for completion...')
_utils.wait_for_training_job(client, job_name)
image = _utils.get_image_from_job(client, job_name)
model_artifact_url = _utils.get_model_artifacts_from_job(client, job_name)
logging.info('Get model artifacts %s from training job %s.', model_artifact_url, job_name)
with open('/tmp/model_artifact_url.txt', 'w') as f:
f.write(model_artifact_url)
with open('/tmp/job_name.txt', 'w') as f:
f.write(job_name)
with open('/tmp/training_image.txt', 'w') as f:
f.write(image)
logging.info('Job completed.')
if __name__== "__main__":
main(sys.argv[1:])
| 8,395 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/aws/sagemaker/tests | kubeflow_public_repos/kfp-tekton-backend/components/aws/sagemaker/tests/integration_tests/README.md | ## Requirements
1. [Conda](https://docs.conda.io/en/latest/miniconda.html)
1. [Kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/)
1. Argo CLI: [Mac](https://github.com/argoproj/homebrew-tap), [Linux](https://eksworkshop.com/advanced/410_batch/install/)
1. K8s cluster with Kubeflow pipelines > 0.4.0 installed
1. [IAM Role](https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-roles.html) with a SageMakerFullAccess and S3FullAccess
1. IAM User credentials with SageMakerFullAccess permissions
## Creating S3 buckets with datasets
Change the bucket name and run the python script `[s3_sample_data_creator.py](https://github.com/kubeflow/pipelines/tree/master/samples/contrib/aws-samples/mnist-kmeans-sagemaker#the-sample-dataset)` to create S3 buckets with mnist dataset in the region where you want to run the tests
## Step to run integration tests
1. Configure AWS credentials with access to EKS cluster
1. Fetch kubeconfig to `~/.kube/config` or set `KUBECONFIG` environment variable to point to kubeconfig of the cluster
1. Create a [secret](https://kubernetes.io/docs/tasks/inject-data-application/distribute-credentials-secure/) named `aws-secret` in kubeflow namespace with credentials of IAM User for SageMakerFullAccess
```yaml
apiVersion: v1
kind: Secret
metadata:
name: aws-secret
namespace: kubeflow
type: Opaque
data:
AWS_ACCESS_KEY_ID: YOUR_BASE64_ACCESS_KEY
AWS_SECRET_ACCESS_KEY: YOUR_BASE64_SECRET_ACCESS
```
> Note: To get base64 string, run `echo -n $AWS_ACCESS_KEY_ID | base64`
1. Create conda environment using environment.yml for running tests. Run `conda env create -f environment.yml`
1. Activate the conda environment `conda activate kfp_test_env`
1. Run port-forward to minio service in background. Example: `kubectl port-forward svc/minio-service 9000:9000 -n kubeflow &`
1. Provide the following arguments to pytest:
1. `region`: AWS region where test will run. Default - us-west-2
1. `role-arn`: SageMaker execution IAM role ARN
1. `s3-data-bucket`: Regional S3 bucket in which test data is hosted
1. `minio-service-port`: Localhost port to which minio service is mapped to. Default - 9000
1. `kfp-namespace`: Cluster namespace where kubeflow pipelines is installed. Default - Kubeflow
1. cd into this directory and run
```
pytest --region <> --role-arn <> --s3-data-bucket <> --minio-service-port <> --kfp-namespace <>
```
| 8,396 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/aws/sagemaker/tests | kubeflow_public_repos/kfp-tekton-backend/components/aws/sagemaker/tests/integration_tests/pytest.ini | [pytest]
addopts = -rA
markers =
canary_test: test to be run as part of canaries. | 8,397 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/aws/sagemaker/tests | kubeflow_public_repos/kfp-tekton-backend/components/aws/sagemaker/tests/integration_tests/environment.yml | name: kfp_test_env
channels:
- conda-forge
- defaults
dependencies:
- python=3.7.*
- pip=20.0.*
- awscli=1.18.*
- boto3=1.12.*
- pytest=5.*
- pytest-xdist=1.31.*
- pyyaml=5.3.*
- flake8=3.7.*
- flake8-black=0.1.*
- pip:
- kubernetes==11.0.*
- kfp==0.5.*
- minio==5.0.10
- sagemaker==1.56.*
| 8,398 |
0 | kubeflow_public_repos/kfp-tekton-backend/components/aws/sagemaker/tests | kubeflow_public_repos/kfp-tekton-backend/components/aws/sagemaker/tests/integration_tests/.flake8 | [flake8]
max-line-length = 120
extend-ignore =
# See https://github.com/PyCQA/pycodestyle/issues/373
E203, | 8,399 |
Subsets and Splits