index
int64 0
0
| repo_id
stringlengths 21
232
| file_path
stringlengths 34
259
| content
stringlengths 1
14.1M
| __index_level_0__
int64 0
10k
|
---|---|---|---|---|
0 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/proto/template_metadata_pb2.py | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# NO CHECKED-IN PROTOBUF GENCODE
# Protobuf Python Version: 0.20240806.0
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(
b'\n\x13template_metadata.proto\x12\x11template_metadata\x1a\x1cgoogle/protobuf/struct.proto"\x89\x01\n\x10TemplateMetadata\x12\x32\n\x0bio_metadata\x18\x01'
b' \x01(\x0b\x32\x1d.template_metadata.IOMetadata\x12\x41\n\x15preflight_validations\x18\x02'
b' \x01(\x0b\x32".template_metadata.ValidationItems"L\n\nIOMetadata\x12&\n\x05pages\x18\x01'
b' \x03(\x0b\x32\x17.template_metadata.Page\x12\x16\n\x0eschema_version\x18\x02'
b' \x01(\t"W\n\x04Page\x12\x0c\n\x04name\x18\x01'
b' \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x02'
b' \x01(\t\x12,\n\x08sections\x18\x03'
b' \x03(\x0b\x32\x1a.template_metadata.Section"V\n\x07Section\x12\x0c\n\x04name\x18\x01'
b' \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x02'
b' \x01(\t\x12(\n\x06inputs\x18\x03'
b' \x03(\x0b\x32\x18.template_metadata.Input"\xa8\x01\n\x05Input\x12\x0c\n\x04name\x18\x01'
b' \x01(\t\x12\x14\n\x0c\x64isplay_name\x18\x02'
b' \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x03'
b' \x01(\t\x12\x1b\n\x13\x64\x65\x66\x61ult_explanation\x18\x04'
b' \x01(\t\x12\x11\n\thelp_text\x18\x05'
b' \x01(\t\x12\x36\n\rsemantic_type\x18\x06'
b' \x01(\x0b\x32\x1f.template_metadata.SemanticType"\xf6\x02\n\x0cSemanticType\x12.\n\nfloat_type\x18\x01'
b' \x01(\x0b\x32\x18.template_metadata.FloatH\x00\x12\x32\n\x0cinteger_type\x18\x02'
b' \x01(\x0b\x32\x1a.template_metadata.IntegerH\x00\x12\x30\n\x0bstring_type\x18\x03'
b' \x01(\x0b\x32\x19.template_metadata.StringH\x00\x12\x32\n\x0c\x62oolean_type\x18\x04'
b' \x01(\x0b\x32\x1a.template_metadata.BooleanH\x00\x12,\n\tlist_type\x18\x06'
b' \x01(\x0b\x32\x17.template_metadata.ListH\x00\x12\x30\n\x0bstruct_type\x18\x07'
b' \x01(\x0b\x32\x19.template_metadata.StructH\x00\x12\x34\n\rartifact_type\x18\x08'
b' \x01(\x0b\x32\x1b.template_metadata.ArtifactH\x00\x42\x06\n\x04type";\n\x05\x46loat\x12\x0b\n\x03min\x18\x01'
b' \x01(\x02\x12\x0b\n\x03max\x18\x02'
b' \x01(\x02\x12\x18\n\x10validation_error\x18\x03'
b' \x01(\t"=\n\x07Integer\x12\x0b\n\x03min\x18\x01'
b' \x01(\x05\x12\x0b\n\x03max\x18\x02'
b' \x01(\x05\x12\x18\n\x10validation_error\x18\x03'
b' \x01(\t"\xa6\x01\n\x06String\x12\x30\n\tfree_form\x18\x01'
b' \x01(\x0b\x32\x1b.template_metadata.FreeFormH\x00\x12\x32\n\nselect_one\x18\x02'
b' \x01(\x0b\x32\x1c.template_metadata.SelectOneH\x00\x12.\n\x08uri_type\x18\x03'
b' \x01(\x0e\x32\x1a.template_metadata.UriTypeH\x00\x42\x06\n\x04type"\t\n\x07\x42oolean"\xa6\x01\n\x04List\x12\x30\n\tfree_form\x18\x01'
b' \x01(\x0b\x32\x1b.template_metadata.FreeFormH\x00\x12\x34\n\x0bselect_many\x18\x02'
b' \x01(\x0b\x32\x1d.template_metadata.SelectManyH\x00\x12.\n\x08uri_type\x18\x03'
b' \x01(\x0e\x32\x1a.template_metadata.UriTypeH\x00\x42\x06\n\x04type"\x08\n\x06Struct"M\n\x08\x41rtifact\x12\'\n\x03uri\x18\x01'
b' \x01(\x0e\x32\x1a.template_metadata.UriType\x12\x18\n\x10validation_error\x18\x02'
b' \x01(\t"\x90\x01\n\x08\x46reeForm\x12%\n\x04size\x18\x01'
b' \x01(\x0e\x32\x17.template_metadata.Size\x12\r\n\x05regex\x18\x02'
b' \x01(\t\x12\x34\n\x0c\x63ontent_type\x18\x03'
b' \x01(\x0e\x32\x1e.template_metadata.ContentType\x12\x18\n\x10validation_error\x18\x04'
b' \x01(\t"\xbe\x01\n\tSelectOne\x12-\n\x07options\x18\x01'
b' \x01(\x0b\x32\x1a.template_metadata.OptionsH\x00\x12/\n\x08location\x18\x02'
b' \x01(\x0b\x32\x1b.template_metadata.LocationH\x00\x12\x11\n\x07project\x18\x03'
b' \x01(\x08H\x00\x12\x36\n\x0cmachine_type\x18\x04'
b' \x01(\x0b\x32\x1e.template_metadata.MachineTypeH\x00\x42\x06\n\x04type"K\n\nSelectMany\x12+\n\x07options\x18\x01'
b' \x01(\x0b\x32\x1a.template_metadata.Options\x12\x10\n\x08select_n\x18\x02'
b' \x01(\x05"R\n\x08Location\x12\r\n\x03\x61ny\x18\x01'
b' \x01(\x08H\x00\x12-\n\x07options\x18\x02'
b' \x01(\x0b\x32\x1a.template_metadata.OptionsH\x00\x42\x08\n\x06values"U\n\x0bMachineType\x12\r\n\x03\x61ny\x18\x01'
b' \x01(\x08H\x00\x12-\n\x07options\x18\x02'
b' \x01(\x0b\x32\x1a.template_metadata.OptionsH\x00\x42\x08\n\x06values"1\n\x07Options\x12&\n\x06values\x18\x01'
b' \x03(\x0b\x32\x16.google.protobuf.Value"\xcc\x02\n\x0fValidationItems\x12N\n\x0esa_validations\x18\x01'
b' \x03(\x0b\x32\x36.template_metadata.GoogleCloudServiceAccountValidation\x12O\n\x11quota_validations\x18\x02'
b' \x03(\x0b\x32\x34.template_metadata.GoogleCloudProjectQuotaValidation\x12N\n\x0f\x61pi_validations\x18\x03'
b' \x03(\x0b\x32\x35.template_metadata.GoogleCloudApiEnablementValidation\x12H\n\x0fgcs_validations\x18\x04'
b' \x03(\x0b\x32/.template_metadata.GoogleCloudStorageValidation"\x92\x01\n\x1cGoogleCloudStorageValidation\x12\x0f\n\x07gcs_uri\x18\x01'
b' \x01(\t\x12\x10\n\x08is_input\x18\x02'
b' \x01(\x08\x12\x1f\n\x17\x64\x65\x66\x61ult_service_account\x18\x03'
b' \x01(\t\x12\x1c\n\x14override_placeholder\x18\x04'
b' \x01(\t\x12\x10\n\x08gcs_uris\x18\x05'
b' \x03(\t"\x80\x01\n!GoogleCloudProjectQuotaValidation\x12\x13\n\x0bmetric_name\x18\x01'
b' \x01(\t\x12\x15\n\x0bint64_value\x18\x02'
b' \x01(\x03H\x00\x12\x16\n\x0c\x64ouble_value\x18\x03'
b' \x01(\x01H\x00\x12\x0e\n\x06region\x18\x04'
b' \x01(\tB\x07\n\x05value"\x8d\x01\n#GoogleCloudServiceAccountValidation\x12\x1f\n\x17\x64\x65\x66\x61ult_principal_email\x18\x01'
b' \x01(\t\x12\x1c\n\x14override_placeholder\x18\x02'
b' \x01(\t\x12\x13\n\x0bpermissions\x18\x03'
b' \x03(\t\x12\x12\n\nrole_names\x18\x04'
b' \x03(\t";\n"GoogleCloudApiEnablementValidation\x12\x15\n\rservice_names\x18\x01'
b' \x03(\t*G\n\x04Size\x12\x0e\n\nSIZE_UNSET\x10\x00\x12\x0e\n\nSIZE_SMALL\x10\x01\x12\x0f\n\x0bSIZE_MEDIUM\x10\x02\x12\x0e\n\nSIZE_LARGE\x10\x03*\x82\x01\n\x0b\x43ontentType\x12\x11\n\rUNSET_CONTENT\x10\x00\x12\x10\n\x0cYAML_CONTENT\x10\x01\x12\x10\n\x0cJSON_CONTENT\x10\x02\x12\x14\n\x10MARKDOWN_CONTENT\x10\x03\x12\x10\n\x0cHTML_CONTENT\x10\x04\x12\x14\n\x10\x44\x41TETIME_CONTENT\x10\x05*a\n\x07UriType\x12\x0b\n\x07\x41NY_URI\x10\x00\x12\x0f\n\x0bGCS_ANY_URI\x10\x01\x12\x12\n\x0eGCS_BUCKET_URI\x10\x02\x12\x12\n\x0eGCS_OBJECT_URI\x10\x03\x12\x10\n\x0c\x42IGQUERY_URI\x10\x04\x42\x02P\x01\x62\x06proto3'
)
_globals = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(
DESCRIPTOR,
'google_cloud_pipeline_components.google_cloud_pipeline_components.proto.template_metadata_pb2',
_globals,
)
if not _descriptor._USE_C_DESCRIPTORS:
_globals['DESCRIPTOR']._loaded_options = None
_globals['DESCRIPTOR']._serialized_options = b'P\001'
_globals['_SIZE']._serialized_start = 3127
_globals['_SIZE']._serialized_end = 3198
_globals['_CONTENTTYPE']._serialized_start = 3201
_globals['_CONTENTTYPE']._serialized_end = 3331
_globals['_URITYPE']._serialized_start = 3333
_globals['_URITYPE']._serialized_end = 3430
_globals['_TEMPLATEMETADATA']._serialized_start = 164
_globals['_TEMPLATEMETADATA']._serialized_end = 301
_globals['_IOMETADATA']._serialized_start = 303
_globals['_IOMETADATA']._serialized_end = 379
_globals['_PAGE']._serialized_start = 381
_globals['_PAGE']._serialized_end = 468
_globals['_SECTION']._serialized_start = 470
_globals['_SECTION']._serialized_end = 556
_globals['_INPUT']._serialized_start = 559
_globals['_INPUT']._serialized_end = 727
_globals['_SEMANTICTYPE']._serialized_start = 730
_globals['_SEMANTICTYPE']._serialized_end = 1104
_globals['_FLOAT']._serialized_start = 1106
_globals['_FLOAT']._serialized_end = 1165
_globals['_INTEGER']._serialized_start = 1167
_globals['_INTEGER']._serialized_end = 1228
_globals['_STRING']._serialized_start = 1231
_globals['_STRING']._serialized_end = 1397
_globals['_BOOLEAN']._serialized_start = 1399
_globals['_BOOLEAN']._serialized_end = 1408
_globals['_LIST']._serialized_start = 1411
_globals['_LIST']._serialized_end = 1577
_globals['_STRUCT']._serialized_start = 1579
_globals['_STRUCT']._serialized_end = 1587
_globals['_ARTIFACT']._serialized_start = 1589
_globals['_ARTIFACT']._serialized_end = 1666
_globals['_FREEFORM']._serialized_start = 1669
_globals['_FREEFORM']._serialized_end = 1813
_globals['_SELECTONE']._serialized_start = 1816
_globals['_SELECTONE']._serialized_end = 2006
_globals['_SELECTMANY']._serialized_start = 2008
_globals['_SELECTMANY']._serialized_end = 2083
_globals['_LOCATION']._serialized_start = 2085
_globals['_LOCATION']._serialized_end = 2167
_globals['_MACHINETYPE']._serialized_start = 2169
_globals['_MACHINETYPE']._serialized_end = 2254
_globals['_OPTIONS']._serialized_start = 2256
_globals['_OPTIONS']._serialized_end = 2305
_globals['_VALIDATIONITEMS']._serialized_start = 2308
_globals['_VALIDATIONITEMS']._serialized_end = 2640
_globals['_GOOGLECLOUDSTORAGEVALIDATION']._serialized_start = 2643
_globals['_GOOGLECLOUDSTORAGEVALIDATION']._serialized_end = 2789
_globals['_GOOGLECLOUDPROJECTQUOTAVALIDATION']._serialized_start = 2792
_globals['_GOOGLECLOUDPROJECTQUOTAVALIDATION']._serialized_end = 2920
_globals['_GOOGLECLOUDSERVICEACCOUNTVALIDATION']._serialized_start = 2923
_globals['_GOOGLECLOUDSERVICEACCOUNTVALIDATION']._serialized_end = 3064
_globals['_GOOGLECLOUDAPIENABLEMENTVALIDATION']._serialized_start = 3066
_globals['_GOOGLECLOUDAPIENABLEMENTVALIDATION']._serialized_end = 3125
# @@protoc_insertion_point(module_scope)
| 800 |
0 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/proto/task_error.proto | syntax = "proto3";
package task_error;
// The message allows the 1st party clients of Vertex Pipline to specify
// arbitary error messages they want to catch during the execution of the
// pipeline.
message TaskError {
// The primary error message.
string error_message = 1;
}
| 801 |
0 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/proto/gcp_resources_pb2.py | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: gcp_resources.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.rpc import status_pb2 as google_dot_rpc_dot_status__pb2
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x13gcp_resources.proto\x12\x0cgcp_launcher\x1a\x17google/rpc/status.proto\"\xe0\x01\n\x0cGcpResources\x12\x36\n\tresources\x18\x01 \x03(\x0b\x32#.gcp_launcher.GcpResources.Resource\x1a\x97\x01\n\x08Resource\x12\x1a\n\rresource_type\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x19\n\x0cresource_uri\x18\x02 \x01(\tH\x01\x88\x01\x01\x12!\n\x05\x65rror\x18\x03 \x01(\x0b\x32\x12.google.rpc.Status\x12\x0e\n\x06labels\x18\x04 \x03(\tB\x10\n\x0e_resource_typeB\x0f\n\r_resource_urib\x06proto3')
_GCPRESOURCES = DESCRIPTOR.message_types_by_name['GcpResources']
_GCPRESOURCES_RESOURCE = _GCPRESOURCES.nested_types_by_name['Resource']
GcpResources = _reflection.GeneratedProtocolMessageType('GcpResources', (_message.Message,), {
'Resource' : _reflection.GeneratedProtocolMessageType('Resource', (_message.Message,), {
'DESCRIPTOR' : _GCPRESOURCES_RESOURCE,
'__module__' : 'gcp_resources_pb2'
# @@protoc_insertion_point(class_scope:gcp_launcher.GcpResources.Resource)
})
,
'DESCRIPTOR' : _GCPRESOURCES,
'__module__' : 'gcp_resources_pb2'
# @@protoc_insertion_point(class_scope:gcp_launcher.GcpResources)
})
_sym_db.RegisterMessage(GcpResources)
_sym_db.RegisterMessage(GcpResources.Resource)
if _descriptor._USE_C_DESCRIPTORS == False:
DESCRIPTOR._options = None
_GCPRESOURCES._serialized_start=63
_GCPRESOURCES._serialized_end=287
_GCPRESOURCES_RESOURCE._serialized_start=136
_GCPRESOURCES_RESOURCE._serialized_end=287
# @@protoc_insertion_point(module_scope)
| 802 |
0 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/proto/gcp_resources.proto | syntax = "proto3";
package gcp_launcher;
import "google/rpc/status.proto";
// The schema of the GCP resource. It will be used to parse the output parameter
// "gcp_resources"
message GcpResources {
// The metadata of a resource
message Resource {
// The type of the resource. E.g. DataflowJob
optional string resource_type = 1;
// The unique resource uri. E.g.
// https://dataflow.googleapis.com/v1b3/projects/project_1/locations/us-central1/jobs/123
optional string resource_uri = 2;
// The error from the resource.
google.rpc.Status error = 3;
// Optional. Used by component to save extra custom metadata for the resource.
repeated string labels = 4;
}
// A list of resources.
repeated Resource resources = 1;
}
| 803 |
0 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/proto/task_error_pb2.py | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# NO CHECKED-IN PROTOBUF GENCODE
# Protobuf Python Version: 0.20240502.0
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(
b'\n\x13task_error.proto\x12\ntask_error""\n\tTaskError\x12\x15\n\rerror_message\x18\x01'
b' \x01(\tB\x02P\x01\x62\x06proto3'
)
_globals = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(
DESCRIPTOR,
'google_cloud_pipeline_components.google_cloud_pipeline_components.proto.task_error_pb2',
_globals,
)
if not _descriptor._USE_C_DESCRIPTORS:
_globals['DESCRIPTOR']._loaded_options = None
_globals['DESCRIPTOR']._serialized_options = b'P\001'
_globals['_TASKERROR']._serialized_start = 119
_globals['_TASKERROR']._serialized_end = 153
# @@protoc_insertion_point(module_scope)
| 804 |
0 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/__init__.py | # Copyright 2021 The Kubeflow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""V1 Google Cloud Pipeline Components.
These components correspond to the v1 Vertex AI API
(https://cloud.google.com/vertex-ai/docs/reference#versions).
"""
| 805 |
0 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/vertex_notification_email/component.py | # Copyright 2023 The Kubeflow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List
from google_cloud_pipeline_components import _image
from kfp.dsl import container_component
from kfp.dsl import ContainerSpec
from kfp.dsl import PipelineTaskFinalStatus
@container_component
def vertex_pipelines_notification_email(
recipients: List[str],
pipeline_task_final_status: PipelineTaskFinalStatus,
):
# fmt: off
"""Send notification email(s) when an upstream task/DAG completes.
This component can only be used as an [ExitHandler](https://www.kubeflow.org/docs/components/pipelines/v2/pipelines/control-flow/#exit-handling-dslexithandler)'s exit task. Note that the [PipelineTaskFinalStatus](https://kubeflow-pipelines.readthedocs.io/en/latest/source/dsl.html#kfp.dsl.PipelineTaskFinalStatus) is provided automatically by Vertex Pipelines at runtime. You should not provide any input to this parameter when you instantiate this component as a task. This component works only on Vertex Pipelines. This component raises an exception when run on Kubeflow Pipelines. See a [usage example](https://cloud.google.com/vertex-ai/docs/pipelines/email-notifications).
Args:
recipients: A list of email addresses to send a notification to.
"""
# fmt: on
return ContainerSpec(
image=_image.GCPC_IMAGE_TAG,
command=[
'python3',
'-u',
'-m',
'google_cloud_pipeline_components.container.v1.vertex_notification_email.executor',
],
args=[
'--type',
'VertexNotificationEmail',
'--payload',
'',
],
)
| 806 |
0 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/vertex_notification_email/__init__.py | # Copyright 2023 The Kubeflow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Email the completion status of a pipeline's sub-DAG."""
from google_cloud_pipeline_components.v1.vertex_notification_email.component import vertex_pipelines_notification_email as VertexNotificationEmailOp
__all__ = [
'VertexNotificationEmailOp',
]
| 807 |
0 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/automl/__init__.py | # Copyright 2023 The Kubeflow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""AutoML components."""
| 808 |
0 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/automl | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/automl/tabular/training_configurator_and_validator.py | # Copyright 2023 The Kubeflow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""AutoML Training Configurator and Validator component spec."""
from typing import Optional
from kfp import dsl
@dsl.container_component
def training_configurator_and_validator(
dataset_stats: dsl.Input[dsl.Artifact],
split_example_counts: str,
training_schema: dsl.Input[dsl.Artifact],
instance_schema: dsl.Input[dsl.Artifact],
metadata: dsl.Output[dsl.Artifact],
instance_baseline: dsl.Output[dsl.Artifact],
target_column: Optional[str] = '',
weight_column: Optional[str] = '',
prediction_type: Optional[str] = '',
optimization_objective: Optional[str] = '',
optimization_objective_recall_value: Optional[float] = -1,
optimization_objective_precision_value: Optional[float] = -1,
run_evaluation: Optional[bool] = False,
run_distill: Optional[bool] = False,
enable_probabilistic_inference: Optional[bool] = False,
time_series_identifier_column: Optional[str] = None,
time_series_identifier_columns: Optional[list] = [],
time_column: Optional[str] = '',
time_series_attribute_columns: Optional[list] = [],
available_at_forecast_columns: Optional[list] = [],
unavailable_at_forecast_columns: Optional[list] = [],
quantiles: Optional[list] = [],
context_window: Optional[int] = -1,
forecast_horizon: Optional[int] = -1,
forecasting_model_type: Optional[str] = '',
forecasting_transformations: Optional[dict] = {},
stage_1_deadline_hours: Optional[float] = None,
stage_2_deadline_hours: Optional[float] = None,
group_columns: Optional[list] = None,
group_total_weight: float = 0.0,
temporal_total_weight: float = 0.0,
group_temporal_total_weight: float = 0.0,
):
# fmt: off
"""Configures training and validates data and user-input configurations.
Args:
dataset_stats: Dataset stats generated by feature transform engine.
split_example_counts: JSON string of data split example counts for train, validate, and test splits.
training_schema_path: Schema of input data to the tf_model at training time.
instance_schema: Schema of input data to the tf_model at serving time.
target_column: Target column of input data.
weight_column: Weight column of input data.
prediction_type: Model prediction type. One of "classification", "regression", "time_series".
optimization_objective: Objective function the model is optimizing towards. The training process creates a model that maximizes/minimizes the value of the objective function over the validation set. The supported optimization objectives depend on the prediction type. If the field is not set, a default objective function is used. classification: "maximize-au-roc" (default) - Maximize the area under the receiver operating characteristic (ROC) curve. "minimize-log-loss" - Minimize log loss. "maximize-au-prc" - Maximize the area under the precision-recall curve. "maximize-precision-at-recall" - Maximize precision for a specified recall value. "maximize-recall-at-precision" - Maximize recall for a specified precision value. classification (multi-class): "minimize-log-loss" (default) - Minimize log loss. regression: "minimize-rmse" (default) - Minimize root-mean-squared error (RMSE). "minimize-mae" - Minimize mean-absolute error (MAE). "minimize-rmsle" - Minimize root-mean-squared log error (RMSLE).
optimization_objective_recall_value: Required when optimization_objective is "maximize-precision-at-recall". Must be between 0 and 1, inclusive.
optimization_objective_precision_value: Required when optimization_objective is "maximize-recall-at-precision". Must be between 0 and 1, inclusive.
run_evaluation: Whether we are running evaluation in the training pipeline.
run_distill: Whether the distillation should be applied to the training.
enable_probabilistic_inference: If probabilistic inference is enabled, the model will fit a distribution that captures the uncertainty of a prediction. At inference time, the predictive distribution is used to make a point prediction that minimizes the optimization objective. For example, the mean of a predictive distribution is the point prediction that minimizes RMSE loss. If quantiles are specified, then the quantiles of the distribution are also returned.
time_series_identifier_column: [Deprecated] The time series identifier column. Used by forecasting only. Raises exception if used - use the "time_series_identifier_column" field instead.
time_series_identifier_columns: The list of time series identifier columns. Used by forecasting only.
time_column: The column that indicates the time. Used by forecasting only.
time_series_attribute_columns: The column names of the time series attributes.
available_at_forecast_columns: The names of the columns that are available at forecast time.
unavailable_at_forecast_columns: The names of the columns that are not available at forecast time.
quantiles: All quantiles that the model need to predict.
context_window: The length of the context window.
forecast_horizon: The length of the forecast horizon.
forecasting_model_type: The model types, e.g. l2l, seq2seq, tft.
forecasting_transformations: Dict mapping auto and/or type-resolutions to feature columns. The supported types are auto, categorical, numeric, text, and timestamp.
stage_1_deadline_hours: Stage 1 training budget in hours.
stage_2_deadline_hours: Stage 2 training budget in hours.
group_columns: A list of time series attribute column names that define the time series hierarchy.
group_total_weight: The weight of the loss for predictions aggregated over time series in the same group.
temporal_total_weight: The weight of the loss for predictions aggregated over the horizon for a single time series.
group_temporal_total_weight: The weight of the loss for predictions aggregated over both the horizon and time series in the same hierarchy group.
Returns:
metadata: The tabular example gen metadata.
"""
# fmt: on
return dsl.ContainerSpec(
image='us-docker.pkg.dev/vertex-ai/automl-tabular/feature-transform-engine:20240808_0625',
command=[],
args=[
'training_configurator_and_validator',
dsl.ConcatPlaceholder(
items=['--instance_schema_path=', instance_schema.uri]
),
dsl.ConcatPlaceholder(
items=['--training_schema_path=', training_schema.uri]
),
dsl.ConcatPlaceholder(
items=['--dataset_stats_path=', dataset_stats.uri]
),
dsl.ConcatPlaceholder(
items=['--split_example_counts=', split_example_counts]
),
dsl.ConcatPlaceholder(items=['--target_column=', target_column]),
dsl.ConcatPlaceholder(items=['--weight_column=', weight_column]),
dsl.ConcatPlaceholder(items=['--prediction_type=', prediction_type]),
dsl.ConcatPlaceholder(
items=['--optimization_objective=', optimization_objective]
),
dsl.ConcatPlaceholder(
items=[
'--optimization_objective_recall_value=',
optimization_objective_recall_value,
]
),
dsl.ConcatPlaceholder(
items=[
'--optimization_objective_precision_value=',
optimization_objective_precision_value,
]
),
dsl.ConcatPlaceholder(items=['--metadata_path=', metadata.uri]),
dsl.ConcatPlaceholder(
items=['--instance_baseline_path=', instance_baseline.uri]
),
dsl.ConcatPlaceholder(items=['--run_evaluation=', run_evaluation]),
dsl.ConcatPlaceholder(items=['--run_distill=', run_distill]),
dsl.ConcatPlaceholder(
items=[
'--enable_probabilistic_inference=',
enable_probabilistic_inference,
]
),
dsl.IfPresentPlaceholder(
# Singular time series ID backwards support.
input_name='time_series_identifier_column',
then=dsl.ConcatPlaceholder(
items=[
'--time_series_identifier_column=',
time_series_identifier_column,
]
),
),
dsl.ConcatPlaceholder(
items=[
'--time_series_identifier_columns=',
time_series_identifier_columns,
]
),
dsl.ConcatPlaceholder(items=['--time_column=', time_column]),
dsl.ConcatPlaceholder(
items=[
'--time_series_attribute_columns=',
time_series_attribute_columns,
]
),
dsl.ConcatPlaceholder(
items=[
'--available_at_forecast_columns=',
available_at_forecast_columns,
]
),
dsl.ConcatPlaceholder(
items=[
'--unavailable_at_forecast_columns=',
unavailable_at_forecast_columns,
]
),
dsl.IfPresentPlaceholder(
input_name='quantiles',
then=dsl.ConcatPlaceholder(
items=[
'--quantiles=',
quantiles,
]
),
),
dsl.ConcatPlaceholder(items=['--context_window=', context_window]),
dsl.ConcatPlaceholder(
items=['--forecast_horizon=', forecast_horizon]
),
dsl.ConcatPlaceholder(
items=['--forecasting_model_type=', forecasting_model_type]
),
dsl.ConcatPlaceholder(
items=[
'--forecasting_transformations=',
forecasting_transformations,
]
),
dsl.IfPresentPlaceholder(
input_name='stage_1_deadline_hours',
then=dsl.ConcatPlaceholder(
items=[
'--stage_1_deadline_hours=',
stage_1_deadline_hours,
]
),
),
dsl.IfPresentPlaceholder(
input_name='stage_2_deadline_hours',
then=dsl.ConcatPlaceholder(
items=[
'--stage_2_deadline_hours=',
stage_2_deadline_hours,
]
),
),
dsl.IfPresentPlaceholder(
input_name='group_columns',
then=dsl.ConcatPlaceholder(
items=['--group_columns=', group_columns]
),
),
dsl.IfPresentPlaceholder(
input_name='group_total_weight',
then=dsl.ConcatPlaceholder(
items=['--group_total_weight=', group_total_weight]
),
),
dsl.IfPresentPlaceholder(
input_name='temporal_total_weight',
then=dsl.ConcatPlaceholder(
items=['--temporal_total_weight=', temporal_total_weight]
),
),
dsl.IfPresentPlaceholder(
input_name='group_temporal_total_weight',
then=dsl.ConcatPlaceholder(
items=[
'--group_temporal_total_weight=',
group_temporal_total_weight,
]
),
),
],
)
| 809 |
0 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/automl | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/automl/tabular/infra_validator.py | # Copyright 2023 The Kubeflow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""AutoML Infra Validator component spec."""
from google_cloud_pipeline_components.types.artifact_types import UnmanagedContainerModel
from kfp import dsl
from kfp.dsl import Input
@dsl.container_component
def automl_tabular_infra_validator(
unmanaged_container_model: Input[UnmanagedContainerModel], # pylint: disable=unused-argument
):
# fmt: off
"""Validates the trained AutoML Tabular model is a valid model.
Args:
unmanaged_container_model: google.UnmanagedContainerModel for model to be validated.
"""
# fmt: on
return dsl.ContainerSpec(
image='us-docker.pkg.dev/vertex-ai/automl-tabular/prediction-server:20240808_0625',
command=[],
args=['--executor_input', '{{$}}'],
)
| 810 |
0 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/automl | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/automl/tabular/finalizer.py | # Copyright 2023 The Kubeflow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""AutoML Pipeline Finalizer component spec."""
from typing import Optional
from kfp import dsl
@dsl.container_component
def automl_tabular_finalizer(
project: str,
location: str,
root_dir: str,
gcp_resources: dsl.OutputPath(str),
encryption_spec_key_name: Optional[str] = '',
):
# fmt: off
"""Finalizes AutoML Tabular pipelines.
Args:
project: Project to run Cross-validation trainer.
location: Location for running the Cross-validation trainer.
root_dir: The Cloud Storage location to store the output.
encryption_spec_key_name: Customer-managed encryption key.
Returns:
gcp_resources: GCP resources created by this component. For more details, see https://github.com/kubeflow/pipelines/blob/master/components/google-cloud/google_cloud_pipeline_components/proto/README.md.
"""
# fmt: on
return dsl.ContainerSpec(
image='gcr.io/ml-pipeline/google-cloud-pipeline-components:1.0.44',
command=[
'python3',
'-u',
'-m',
'google_cloud_pipeline_components.container.v1.custom_job.launcher',
],
args=[
'--type',
'CustomJob',
'--project',
project,
'--location',
location,
'--gcp_resources',
gcp_resources,
'--payload',
dsl.ConcatPlaceholder(
items=[
(
'{"display_name":'
f' "automl-tabular-finalizer-{dsl.PIPELINE_JOB_ID_PLACEHOLDER}-{dsl.PIPELINE_TASK_ID_PLACEHOLDER}",'
' "encryption_spec": {"kms_key_name":"'
),
encryption_spec_key_name,
(
'"}, "job_spec": {"worker_pool_specs": [{"replica_count":'
' 1, "machine_spec": {"machine_type": "n1-standard-8"},'
' "container_spec": {"image_uri":"'
),
'us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/training:20240808_0625',
'", "args": ["cancel_l2l_tuner", "--error_file_path=',
root_dir,
(
f'/{dsl.PIPELINE_JOB_ID_PLACEHOLDER}/{dsl.PIPELINE_TASK_ID_PLACEHOLDER}/error.pb",'
' "--cleanup_lro_job_infos='
),
root_dir,
f'/{dsl.PIPELINE_JOB_ID_PLACEHOLDER}/lro"' + ']}}]}}',
]
),
],
)
| 811 |
0 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/automl | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/automl/tabular/stage_1_tuner.py | # Copyright 2023 The Kubeflow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""AutoML Tabular Stage 1 Tuner component spec."""
from typing import Optional
from kfp import dsl
from kfp.dsl import Artifact
from kfp.dsl import Input
from kfp.dsl import Output
@dsl.container_component
def automl_tabular_stage_1_tuner(
project: str,
location: str,
root_dir: str,
num_selected_trials: int,
deadline_hours: float,
num_parallel_trials: int,
single_run_max_secs: int,
metadata: Input[Artifact],
transform_output: Input[Artifact],
materialized_train_split: Input[Artifact],
materialized_eval_split: Input[Artifact],
gcp_resources: dsl.OutputPath(str),
tuning_result_output: Output[Artifact],
execution_metrics: dsl.OutputPath(dict),
study_spec_parameters_override: Optional[list] = [],
worker_pool_specs_override_json: Optional[list] = [],
reduce_search_space_mode: Optional[str] = 'regular',
num_selected_features: Optional[int] = 0,
disable_early_stopping: Optional[bool] = False,
feature_ranking: Optional[Input[Artifact]] = None,
tune_feature_selection_rate: Optional[bool] = False,
encryption_spec_key_name: Optional[str] = '',
run_distillation: Optional[bool] = False,
):
# fmt: off
"""Searches AutoML Tabular architectures and selects the top trials.
Args:
project: Project to run Cross-validation trainer.
location: Location for running the Cross-validation trainer.
root_dir: The Cloud Storage location to store the output.
study_spec_parameters_override: JSON study spec. E.g., [{"parameter_id": "model_type","categorical_value_spec": {"values": ["nn"]}}]
worker_pool_specs_override_json: JSON worker pool specs. E.g., [{"machine_spec": {"machine_type": "n1-standard-16"}},{},{},{"machine_spec": {"machine_type": "n1-standard-16"}}]
reduce_search_space_mode: The reduce search space mode. Possible values: "regular" (default), "minimal", "full".
num_selected_trials: Number of selected trials. The number of weak learners in the final model is 5 * num_selected_trials.
num_selected_features: Number of selected features. The number of features to learn in the NN models.
deadline_hours: Number of hours the cross-validation trainer should run.
disable_early_stopping: True if disable early stopping. Default value is false.
num_parallel_trials: Number of parallel training trials.
single_run_max_secs: Max number of seconds each training trial runs.
metadata: The tabular example gen metadata.
transform_output: The transform output artifact.
materialized_train_split: The materialized train split.
materialized_eval_split: The materialized eval split.
encryption_spec_key_name: Customer-managed encryption key.
run_distillation: True if in distillation mode. The default value is false.
Returns:
gcp_resources: GCP resources created by this component. For more details, see https://github.com/kubeflow/pipelines/blob/master/components/google-cloud/google_cloud_pipeline_components/proto/README.md.
tuning_result_output: The trained model and architectures.
execution_metrics: Core metrics in dictionary of component execution.
"""
# fmt: on
return dsl.ContainerSpec(
image='gcr.io/ml-pipeline/google-cloud-pipeline-components:1.0.44',
command=[
'python3',
'-u',
'-m',
'google_cloud_pipeline_components.container.v1.custom_job.launcher',
],
args=[
'--type',
'CustomJob',
'--project',
project,
'--location',
location,
'--gcp_resources',
gcp_resources,
'--payload',
dsl.ConcatPlaceholder(
items=[
(
'{"display_name":'
f' "automl-tabular-stage-1-tuner-{dsl.PIPELINE_JOB_ID_PLACEHOLDER}-{dsl.PIPELINE_TASK_ID_PLACEHOLDER}",'
' "encryption_spec": {"kms_key_name":"'
),
encryption_spec_key_name,
(
'"}, "job_spec": {"worker_pool_specs": [{"replica_count":'
' 1, "machine_spec": {"machine_type": "n1-standard-8"},'
' "container_spec": {"image_uri":"'
),
'us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/training:20240808_0625',
'", "args": ["l2l_stage_1_tuner", "--transform_output_path=',
transform_output.uri,
'", "--training_docker_uri=',
'us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/training:20240808_0625',
'", "--feature_selection_result_path=',
feature_ranking.uri,
'", "--disable_early_stopping=',
disable_early_stopping,
'", "--tune_feature_selection_rate=',
tune_feature_selection_rate,
'", "--reduce_search_space_mode=',
reduce_search_space_mode,
(
f'", "--component_id={dsl.PIPELINE_TASK_ID_PLACEHOLDER}",'
' "--training_base_dir='
),
root_dir,
(
f'/{dsl.PIPELINE_JOB_ID_PLACEHOLDER}/{dsl.PIPELINE_TASK_ID_PLACEHOLDER}/train",'
' "--num_parallel_trial='
),
num_parallel_trials,
'", "--single_run_max_secs=',
single_run_max_secs,
'", "--deadline_hours=',
deadline_hours,
'", "--num_selected_trials=',
num_selected_trials,
'", "--num_selected_features=',
num_selected_features,
'", "--lro_job_info=',
root_dir,
(
f'/{dsl.PIPELINE_JOB_ID_PLACEHOLDER}/lro",'
' "--error_file_path='
),
root_dir,
(
f'/{dsl.PIPELINE_JOB_ID_PLACEHOLDER}/{dsl.PIPELINE_TASK_ID_PLACEHOLDER}/error.pb",'
' "--metadata_path='
),
metadata.uri,
'", "--materialized_train_split=',
materialized_train_split.uri,
'", "--materialized_eval_split=',
materialized_eval_split.uri,
'", "--is_distill=',
run_distillation,
'", "--tuning_result_output_path=',
tuning_result_output.uri,
'", "--kms_key_name=',
encryption_spec_key_name,
'", "--gcp_resources_path=',
gcp_resources,
'", "--execution_metrics_path=',
execution_metrics,
(
'", "--use_json=true", "--log_level=ERROR",'
' "--executor_input={{$.json_escape[1]}}"]}}]}}'
),
]
),
],
)
| 812 |
0 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/automl | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/automl/tabular/split_materialized_data.py | # Copyright 2023 The Kubeflow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""AutoML Split Materialized Data component spec."""
from kfp import dsl
from kfp.dsl import Artifact
from kfp.dsl import Dataset
from kfp.dsl import Input
from kfp.dsl import Output
@dsl.container_component
def split_materialized_data(
materialized_data: Input[Dataset],
materialized_train_split: Output[Artifact],
materialized_eval_split: Output[Artifact],
materialized_test_split: Output[Artifact],
):
# fmt: off
"""Splits materialized dataset into train, eval, and test data splits.
The materialized dataset generated by the Feature Transform Engine consists of
all the splits
that were combined into the input transform dataset (i.e., train, eval, and
test splits).
This components splits the output materialized dataset into corresponding
materialized data splits
so that the splits can be used by down-stream training or evaluation
components.
Args:
materialized_data: Materialized dataset output by the Feature
Transform Engine.
Returns:
materialized_train_split: Path patern to materialized train split.
materialized_eval_split: Path patern to materialized eval split.
materialized_test_split: Path patern to materialized test split.
"""
# fmt: on
return dsl.ContainerSpec(
image='us-docker.pkg.dev/vertex-ai/automl-tabular/dataflow-worker:20240808_0625',
command=[
'sh',
'-ec',
(
'program_path=$(mktemp -d)\nprintf "%s" "$0" >'
' "$program_path/ephemeral_component.py"\npython3 -m'
' kfp.components.executor_main '
' --component_module_path '
' "$program_path/ephemeral_component.py" '
' "$@"\n'
),
(
'\nimport kfp\nfrom kfp import dsl\nfrom kfp.dsl import *\nfrom'
' typing import *\n\ndef _split_materialized_data(\n '
' materialized_data: Input[Dataset],\n '
" materialized_train_split: OutputPath('MaterializedSplit'),\n "
" materialized_eval_split: OutputPath('MaterializedSplit'),\n "
" materialized_test_split: OutputPath('MaterializedSplit')):\n "
' """Splits materialized_data into materialized_data test,'
' train, and eval splits.\n\n Necessary adapter between FTE'
' pipeline and trainer.\n\n Args:\n materialized_data:'
' materialized_data dataset output by FTE.\n '
' materialized_train_split: Path patern to'
' materialized_train_split.\n materialized_eval_split: Path'
' patern to materialized_eval_split.\n '
' materialized_test_split: Path patern to'
' materialized_test_split.\n """\n # pylint:'
' disable=g-import-not-at-top,import-outside-toplevel,redefined-outer-name,reimported\n'
' import json\n import tensorflow as tf\n # pylint:'
' enable=g-import-not-at-top,import-outside-toplevel,redefined-outer-name,reimported\n\n'
" with tf.io.gfile.GFile(materialized_data.path, 'r') as f:\n "
' artifact_path = f.read()\n\n # needed to import tf because'
' this is a path in gs://\n with'
" tf.io.gfile.GFile(artifact_path, 'r') as f:\n "
' materialized_data_json = json.load(f)\n\n if'
" 'tf_record_data_source' in materialized_data_json:\n "
' file_patterns ='
" materialized_data_json['tf_record_data_source'][\n "
" 'file_patterns']\n elif 'avro_data_source' in"
' materialized_data_json:\n file_patterns ='
" materialized_data_json['avro_data_source'][\n "
" 'file_patterns']\n elif 'parquet_data_source' in"
' materialized_data_json:\n file_patterns ='
" materialized_data_json['parquet_data_source'][\n "
" 'file_patterns']\n else:\n raise ValueError(f'Unsupported"
" training data source: {materialized_data_json}')\n\n # we map"
' indices to file patterns based on the ordering of insertion'
' order\n # in our transform_data (see above in'
' _generate_analyze_and_transform_data)\n with'
" tf.io.gfile.GFile(materialized_train_split, 'w') as f:\n "
' f.write(file_patterns[0])\n\n with'
" tf.io.gfile.GFile(materialized_eval_split, 'w') as f:\n "
' f.write(file_patterns[1])\n\n with'
" tf.io.gfile.GFile(materialized_test_split, 'w') as f:\n "
' f.write(file_patterns[2])\n\n'
),
],
args=[
'--executor_input',
'{{$}}',
'--function_to_execute',
'_split_materialized_data',
],
)
| 813 |
0 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/automl | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/automl/tabular/cv_trainer.py | # Copyright 2023 The Kubeflow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""AutoML Tabular Cross Validation Trainer component spec."""
from typing import Optional
from kfp import dsl
from kfp.dsl import Artifact
from kfp.dsl import Input
from kfp.dsl import Output
@dsl.container_component
def automl_tabular_cv_trainer(
project: str,
location: str,
root_dir: str,
deadline_hours: float,
num_parallel_trials: int,
single_run_max_secs: int,
num_selected_trials: int,
transform_output: Input[Artifact],
metadata: Input[Artifact],
materialized_cv_splits: Input[Artifact],
tuning_result_input: Input[Artifact],
gcp_resources: dsl.OutputPath(str),
tuning_result_output: Output[Artifact],
execution_metrics: dsl.OutputPath(dict),
worker_pool_specs_override_json: Optional[list] = [],
num_selected_features: Optional[int] = 0,
encryption_spec_key_name: Optional[str] = '',
):
# fmt: off
"""Tunes AutoML Tabular models and selects top trials using cross-validation.
Args:
project: Project to run Cross-validation trainer.
location: Location for running the Cross-validation trainer.
root_dir: The Cloud Storage location to store the output.
worker_pool_specs_override_json: JSON worker pool specs. E.g., [{"machine_spec": {"machine_type": "n1-standard-16"}},{},{},{"machine_spec": {"machine_type": "n1-standard-16"}}]
deadline_hours: Number of hours the cross-validation trainer should run.
num_parallel_trials: Number of parallel training trials.
single_run_max_secs: Max number of seconds each training trial runs.
num_selected_trials: Number of selected trials. The number of weak learners in the final model is 5 * num_selected_trials.
num_selected_features: Number of selected features. The number of features to learn in the NN models.
transform_output: The transform output artifact.
metadata: The tabular example gen metadata.
materialized_cv_splits: The materialized cross-validation splits.
tuning_result_input: AutoML Tabular tuning result.
encryption_spec_key_name: Customer-managed encryption key.
Returns:
tuning_result_output: The trained model and architectures.
gcp_resources: GCP resources created by this component. For more details, see https://github.com/kubeflow/pipelines/blob/master/components/google-cloud/google_cloud_pipeline_components/proto/README.md.
execution_metrics: Core metrics in dictionary of component execution.
"""
# fmt: on
return dsl.ContainerSpec(
image='gcr.io/ml-pipeline/google-cloud-pipeline-components:1.0.44',
command=[
'python3',
'-u',
'-m',
'google_cloud_pipeline_components.container.v1.custom_job.launcher',
],
args=[
'--type',
'CustomJob',
'--project',
project,
'--location',
location,
'--gcp_resources',
gcp_resources,
'--payload',
dsl.ConcatPlaceholder(
items=[
(
'{"display_name":'
f' "automl-tabular-cv-tuner-{dsl.PIPELINE_JOB_ID_PLACEHOLDER}-{dsl.PIPELINE_TASK_ID_PLACEHOLDER}",'
' "encryption_spec": {"kms_key_name":"'
),
encryption_spec_key_name,
(
'"}, "job_spec": {"worker_pool_specs": [{"replica_count":'
' 1, "machine_spec": {"machine_type": "n1-standard-8"},'
' "container_spec": {"image_uri":"'
),
'us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/training:20240808_0625',
'", "args": ["l2l_cv_tuner", "--transform_output_path=',
transform_output.uri,
'", "--training_docker_uri=',
'us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/training:20240808_0625',
(
f'", "--component_id={dsl.PIPELINE_TASK_ID_PLACEHOLDER}",'
' "--training_base_dir='
),
root_dir,
(
f'/{dsl.PIPELINE_JOB_ID_PLACEHOLDER}/{dsl.PIPELINE_TASK_ID_PLACEHOLDER}/train",'
' "--num_parallel_trial='
),
num_parallel_trials,
'", "--single_run_max_secs=',
single_run_max_secs,
'", "--deadline_hours=',
deadline_hours,
(
'", "--valid_trials_completed_threshold=0.7",'
' "--num_selected_trials='
),
num_selected_trials,
'", "--num_selected_features=',
num_selected_features,
'", "--lro_job_info=',
root_dir,
(
f'/{dsl.PIPELINE_JOB_ID_PLACEHOLDER}/lro",'
' "--error_file_path='
),
root_dir,
(
f'/{dsl.PIPELINE_JOB_ID_PLACEHOLDER}/{dsl.PIPELINE_TASK_ID_PLACEHOLDER}/error.pb",'
' "--metadata_path='
),
metadata.uri,
'", "--materialized_cv_splits=',
materialized_cv_splits.uri,
'", "--tuning_result_input_path=',
tuning_result_input.uri,
'", "--tuning_result_output_path=',
tuning_result_output.uri,
'", "--kms_key_name=',
encryption_spec_key_name,
'", "--gcp_resources_path=',
gcp_resources,
'", "--execution_metrics_path=',
execution_metrics,
(
'", "--use_custom_job=true", "--use_json=true",'
' "--log_level=ERROR",'
' "--executor_input={{$.json_escape[1]}}"]}}]}}'
),
]
),
],
)
| 814 |
0 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/automl | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/automl/tabular/__init__.py | # Copyright 2023 The Kubeflow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GA AutoML tabular components."""
import os
from google_cloud_pipeline_components.v1.automl.tabular.cv_trainer import automl_tabular_cv_trainer as CvTrainerOp
from google_cloud_pipeline_components.v1.automl.tabular.ensemble import automl_tabular_ensemble as EnsembleOp
from google_cloud_pipeline_components.v1.automl.tabular.finalizer import automl_tabular_finalizer as FinalizerOp
from google_cloud_pipeline_components.v1.automl.tabular.infra_validator import automl_tabular_infra_validator as InfraValidatorOp
from google_cloud_pipeline_components.v1.automl.tabular.split_materialized_data import split_materialized_data as SplitMaterializedDataOp
from google_cloud_pipeline_components.v1.automl.tabular.stage_1_tuner import automl_tabular_stage_1_tuner as Stage1TunerOp
from google_cloud_pipeline_components.v1.automl.tabular.stats_and_example_gen import tabular_stats_and_example_gen as StatsAndExampleGenOp
from google_cloud_pipeline_components.v1.automl.tabular.training_configurator_and_validator import training_configurator_and_validator as TrainingConfiguratorAndValidatorOp
from google_cloud_pipeline_components.v1.automl.tabular.transform import automl_tabular_transform as TransformOp
from google_cloud_pipeline_components.v1.automl.tabular.utils import get_automl_tabular_pipeline_and_parameters
from kfp import components
__all__ = [
'CvTrainerOp',
'EnsembleOp',
'FinalizerOp',
'InfraValidatorOp',
'SplitMaterializedDataOp',
'Stage1TunerOp',
'StatsAndExampleGenOp',
'TrainingConfiguratorAndValidatorOp',
'TransformOp',
'get_automl_tabular_pipeline_and_parameters',
]
automl_tabular_pipeline = components.load_component_from_file(
# Note, please don't name it as `component.yaml` which will conflict with
# the generated file.
os.path.join(os.path.dirname(__file__), 'automl_tabular_pipeline.yaml')
)
| 815 |
0 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/automl | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/automl/tabular/automl_tabular_pipeline.yaml | # PIPELINE DEFINITION
# Name: automl-tabular
# Description: Complete AutoML Tables pipeline.
# Includes feature engineering, architecture search, and hyper-parameter tuning.
# Inputs:
# additional_experiments: dict
# cv_trainer_worker_pool_specs_override: list
# data_source_bigquery_table_path: str [Default: '']
# data_source_csv_filenames: str [Default: '']
# dataflow_service_account: str [Default: '']
# dataflow_subnetwork: str [Default: '']
# dataflow_use_public_ips: bool [Default: True]
# disable_early_stopping: bool [Default: False]
# distill_batch_predict_machine_type: str [Default: 'n1-standard-16']
# distill_batch_predict_max_replica_count: int [Default: 25.0]
# distill_batch_predict_starting_replica_count: int [Default: 25.0]
# enable_probabilistic_inference: bool [Default: False]
# encryption_spec_key_name: str [Default: '']
# evaluation_batch_explain_machine_type: str [Default: 'n1-highmem-8']
# evaluation_batch_explain_max_replica_count: int [Default: 10.0]
# evaluation_batch_explain_starting_replica_count: int [Default: 10.0]
# evaluation_batch_predict_machine_type: str [Default: 'n1-highmem-8']
# evaluation_batch_predict_max_replica_count: int [Default: 20.0]
# evaluation_batch_predict_starting_replica_count: int [Default: 20.0]
# evaluation_dataflow_disk_size_gb: int [Default: 50.0]
# evaluation_dataflow_machine_type: str [Default: 'n1-standard-4']
# evaluation_dataflow_max_num_workers: int [Default: 100.0]
# evaluation_dataflow_starting_num_workers: int [Default: 10.0]
# export_additional_model_without_custom_ops: bool [Default: False]
# fast_testing: bool [Default: False]
# location: str
# model_description: str [Default: '']
# model_display_name: str [Default: '']
# optimization_objective: str
# optimization_objective_precision_value: float [Default: -1.0]
# optimization_objective_recall_value: float [Default: -1.0]
# parent_model: system.Artifact
# predefined_split_key: str [Default: '']
# prediction_type: str
# project: str
# quantiles: list
# root_dir: str
# run_distillation: bool [Default: False]
# run_evaluation: bool [Default: False]
# stage_1_num_parallel_trials: int [Default: 35.0]
# stage_1_tuner_worker_pool_specs_override: list
# stage_1_tuning_result_artifact_uri: str [Default: '']
# stage_2_num_parallel_trials: int [Default: 35.0]
# stage_2_num_selected_trials: int [Default: 5.0]
# stats_and_example_gen_dataflow_disk_size_gb: int [Default: 40.0]
# stats_and_example_gen_dataflow_machine_type: str [Default: 'n1-standard-16']
# stats_and_example_gen_dataflow_max_num_workers: int [Default: 25.0]
# stratified_split_key: str [Default: '']
# study_spec_parameters_override: list
# target_column: str
# test_fraction: float [Default: -1.0]
# timestamp_split_key: str [Default: '']
# train_budget_milli_node_hours: float
# training_fraction: float [Default: -1.0]
# transform_dataflow_disk_size_gb: int [Default: 40.0]
# transform_dataflow_machine_type: str [Default: 'n1-standard-16']
# transform_dataflow_max_num_workers: int [Default: 25.0]
# transformations: str
# validation_fraction: float [Default: -1.0]
# vertex_dataset: system.Artifact
# weight_column: str [Default: '']
# Outputs:
# feature-attribution-2-feature_attributions: system.Metrics
# feature-attribution-3-feature_attributions: system.Metrics
# feature-attribution-feature_attributions: system.Metrics
# model-evaluation-2-evaluation_metrics: system.Metrics
# model-evaluation-3-evaluation_metrics: system.Metrics
# model-evaluation-evaluation_metrics: system.Metrics
components:
comp-automl-tabular-cv-trainer:
executorLabel: exec-automl-tabular-cv-trainer
inputDefinitions:
artifacts:
materialized_cv_splits:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
description: The materialized cross-validation splits.
metadata:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
description: The tabular example gen metadata.
transform_output:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
description: The transform output artifact.
tuning_result_input:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
description: AutoML Tabular tuning result.
parameters:
deadline_hours:
description: Number of hours the cross-validation trainer should run.
parameterType: NUMBER_DOUBLE
encryption_spec_key_name:
defaultValue: ''
description: Customer-managed encryption key.
isOptional: true
parameterType: STRING
location:
description: Location for running the Cross-validation trainer.
parameterType: STRING
num_parallel_trials:
description: Number of parallel training trials.
parameterType: NUMBER_INTEGER
num_selected_features:
defaultValue: 0.0
description: Number of selected features. The number of features to learn
in the NN models.
isOptional: true
parameterType: NUMBER_INTEGER
num_selected_trials:
description: Number of selected trials. The number of weak learners in the
final model is 5 * num_selected_trials.
parameterType: NUMBER_INTEGER
project:
description: Project to run Cross-validation trainer.
parameterType: STRING
root_dir:
description: The Cloud Storage location to store the output.
parameterType: STRING
single_run_max_secs:
description: Max number of seconds each training trial runs.
parameterType: NUMBER_INTEGER
worker_pool_specs_override_json:
defaultValue: []
description: 'JSON worker pool specs. E.g., [{"machine_spec": {"machine_type":
"n1-standard-16"}},{},{},{"machine_spec": {"machine_type": "n1-standard-16"}}]'
isOptional: true
parameterType: LIST
outputDefinitions:
artifacts:
tuning_result_output:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
description: The trained model and architectures.
parameters:
execution_metrics:
description: Core metrics in dictionary of component execution.
parameterType: STRUCT
gcp_resources:
description: GCP resources created by this component. For more details,
see https://github.com/kubeflow/pipelines/blob/master/components/google-cloud/google_cloud_pipeline_components/proto/README.md.
parameterType: STRING
comp-automl-tabular-cv-trainer-2:
executorLabel: exec-automl-tabular-cv-trainer-2
inputDefinitions:
artifacts:
materialized_cv_splits:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
description: The materialized cross-validation splits.
metadata:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
description: The tabular example gen metadata.
transform_output:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
description: The transform output artifact.
tuning_result_input:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
description: AutoML Tabular tuning result.
parameters:
deadline_hours:
description: Number of hours the cross-validation trainer should run.
parameterType: NUMBER_DOUBLE
encryption_spec_key_name:
defaultValue: ''
description: Customer-managed encryption key.
isOptional: true
parameterType: STRING
location:
description: Location for running the Cross-validation trainer.
parameterType: STRING
num_parallel_trials:
description: Number of parallel training trials.
parameterType: NUMBER_INTEGER
num_selected_features:
defaultValue: 0.0
description: Number of selected features. The number of features to learn
in the NN models.
isOptional: true
parameterType: NUMBER_INTEGER
num_selected_trials:
description: Number of selected trials. The number of weak learners in the
final model is 5 * num_selected_trials.
parameterType: NUMBER_INTEGER
project:
description: Project to run Cross-validation trainer.
parameterType: STRING
root_dir:
description: The Cloud Storage location to store the output.
parameterType: STRING
single_run_max_secs:
description: Max number of seconds each training trial runs.
parameterType: NUMBER_INTEGER
worker_pool_specs_override_json:
defaultValue: []
description: 'JSON worker pool specs. E.g., [{"machine_spec": {"machine_type":
"n1-standard-16"}},{},{},{"machine_spec": {"machine_type": "n1-standard-16"}}]'
isOptional: true
parameterType: LIST
outputDefinitions:
artifacts:
tuning_result_output:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
description: The trained model and architectures.
parameters:
execution_metrics:
description: Core metrics in dictionary of component execution.
parameterType: STRUCT
gcp_resources:
description: GCP resources created by this component. For more details,
see https://github.com/kubeflow/pipelines/blob/master/components/google-cloud/google_cloud_pipeline_components/proto/README.md.
parameterType: STRING
comp-automl-tabular-ensemble:
executorLabel: exec-automl-tabular-ensemble
inputDefinitions:
artifacts:
dataset_schema:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
description: The schema of the dataset.
instance_baseline:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
description: The instance baseline used to calculate explanations.
metadata:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
description: The tabular example gen metadata.
transform_output:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
description: The transform output artifact.
tuning_result_input:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
description: AutoML Tabular tuning result.
warmup_data:
artifactType:
schemaTitle: system.Dataset
schemaVersion: 0.0.1
description: The warm up data. Ensemble component will save the warm up
data together with the model artifact, used to warm up the model when
prediction server starts.
isOptional: true
parameters:
encryption_spec_key_name:
defaultValue: ''
description: Customer-managed encryption key.
isOptional: true
parameterType: STRING
export_additional_model_without_custom_ops:
defaultValue: false
description: True if export an additional model without custom TF operators
to the `model_without_custom_ops` output.
isOptional: true
parameterType: BOOLEAN
location:
description: Location for running the Cross-validation trainer.
parameterType: STRING
project:
description: Project to run Cross-validation trainer.
parameterType: STRING
root_dir:
description: The Cloud Storage location to store the output.
parameterType: STRING
outputDefinitions:
artifacts:
explanation_metadata_artifact:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
model:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
description: The output model.
model_architecture:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
description: The architecture of the output model.
model_without_custom_ops:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
description: The output model without custom TF operators, this output will
be empty unless `export_additional_model_without_custom_ops` is set.
unmanaged_container_model:
artifactType:
schemaTitle: google.UnmanagedContainerModel
schemaVersion: 0.0.1
parameters:
explanation_metadata:
description: The explanation parameters used by Vertex online and batch
explanations.
parameterType: STRUCT
explanation_parameters:
parameterType: STRUCT
gcp_resources:
description: GCP resources created by this component. For more details,
see https://github.com/kubeflow/pipelines/blob/master/components/google-cloud/google_cloud_pipeline_components/proto/README.md.
parameterType: STRING
comp-automl-tabular-ensemble-2:
executorLabel: exec-automl-tabular-ensemble-2
inputDefinitions:
artifacts:
dataset_schema:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
description: The schema of the dataset.
instance_baseline:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
description: The instance baseline used to calculate explanations.
metadata:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
description: The tabular example gen metadata.
transform_output:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
description: The transform output artifact.
tuning_result_input:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
description: AutoML Tabular tuning result.
warmup_data:
artifactType:
schemaTitle: system.Dataset
schemaVersion: 0.0.1
description: The warm up data. Ensemble component will save the warm up
data together with the model artifact, used to warm up the model when
prediction server starts.
isOptional: true
parameters:
encryption_spec_key_name:
defaultValue: ''
description: Customer-managed encryption key.
isOptional: true
parameterType: STRING
export_additional_model_without_custom_ops:
defaultValue: false
description: True if export an additional model without custom TF operators
to the `model_without_custom_ops` output.
isOptional: true
parameterType: BOOLEAN
location:
description: Location for running the Cross-validation trainer.
parameterType: STRING
project:
description: Project to run Cross-validation trainer.
parameterType: STRING
root_dir:
description: The Cloud Storage location to store the output.
parameterType: STRING
outputDefinitions:
artifacts:
explanation_metadata_artifact:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
model:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
description: The output model.
model_architecture:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
description: The architecture of the output model.
model_without_custom_ops:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
description: The output model without custom TF operators, this output will
be empty unless `export_additional_model_without_custom_ops` is set.
unmanaged_container_model:
artifactType:
schemaTitle: google.UnmanagedContainerModel
schemaVersion: 0.0.1
parameters:
explanation_metadata:
description: The explanation parameters used by Vertex online and batch
explanations.
parameterType: STRUCT
explanation_parameters:
parameterType: STRUCT
gcp_resources:
description: GCP resources created by this component. For more details,
see https://github.com/kubeflow/pipelines/blob/master/components/google-cloud/google_cloud_pipeline_components/proto/README.md.
parameterType: STRING
comp-automl-tabular-ensemble-3:
executorLabel: exec-automl-tabular-ensemble-3
inputDefinitions:
artifacts:
dataset_schema:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
description: The schema of the dataset.
instance_baseline:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
description: The instance baseline used to calculate explanations.
metadata:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
description: The tabular example gen metadata.
transform_output:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
description: The transform output artifact.
tuning_result_input:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
description: AutoML Tabular tuning result.
warmup_data:
artifactType:
schemaTitle: system.Dataset
schemaVersion: 0.0.1
description: The warm up data. Ensemble component will save the warm up
data together with the model artifact, used to warm up the model when
prediction server starts.
isOptional: true
parameters:
encryption_spec_key_name:
defaultValue: ''
description: Customer-managed encryption key.
isOptional: true
parameterType: STRING
export_additional_model_without_custom_ops:
defaultValue: false
description: True if export an additional model without custom TF operators
to the `model_without_custom_ops` output.
isOptional: true
parameterType: BOOLEAN
location:
description: Location for running the Cross-validation trainer.
parameterType: STRING
project:
description: Project to run Cross-validation trainer.
parameterType: STRING
root_dir:
description: The Cloud Storage location to store the output.
parameterType: STRING
outputDefinitions:
artifacts:
explanation_metadata_artifact:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
model:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
description: The output model.
model_architecture:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
description: The architecture of the output model.
model_without_custom_ops:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
description: The output model without custom TF operators, this output will
be empty unless `export_additional_model_without_custom_ops` is set.
unmanaged_container_model:
artifactType:
schemaTitle: google.UnmanagedContainerModel
schemaVersion: 0.0.1
parameters:
explanation_metadata:
description: The explanation parameters used by Vertex online and batch
explanations.
parameterType: STRUCT
explanation_parameters:
parameterType: STRUCT
gcp_resources:
description: GCP resources created by this component. For more details,
see https://github.com/kubeflow/pipelines/blob/master/components/google-cloud/google_cloud_pipeline_components/proto/README.md.
parameterType: STRING
comp-automl-tabular-finalizer:
executorLabel: exec-automl-tabular-finalizer
inputDefinitions:
parameters:
encryption_spec_key_name:
defaultValue: ''
description: Customer-managed encryption key.
isOptional: true
parameterType: STRING
location:
description: Location for running the Cross-validation trainer.
parameterType: STRING
project:
description: Project to run Cross-validation trainer.
parameterType: STRING
root_dir:
description: The Cloud Storage location to store the output.
parameterType: STRING
outputDefinitions:
parameters:
gcp_resources:
description: GCP resources created by this component. For more details,
see https://github.com/kubeflow/pipelines/blob/master/components/google-cloud/google_cloud_pipeline_components/proto/README.md.
parameterType: STRING
comp-automl-tabular-infra-validator:
executorLabel: exec-automl-tabular-infra-validator
inputDefinitions:
artifacts:
unmanaged_container_model:
artifactType:
schemaTitle: google.UnmanagedContainerModel
schemaVersion: 0.0.1
description: google.UnmanagedContainerModel for model to be validated.
comp-automl-tabular-infra-validator-2:
executorLabel: exec-automl-tabular-infra-validator-2
inputDefinitions:
artifacts:
unmanaged_container_model:
artifactType:
schemaTitle: google.UnmanagedContainerModel
schemaVersion: 0.0.1
description: google.UnmanagedContainerModel for model to be validated.
comp-automl-tabular-infra-validator-3:
executorLabel: exec-automl-tabular-infra-validator-3
inputDefinitions:
artifacts:
unmanaged_container_model:
artifactType:
schemaTitle: google.UnmanagedContainerModel
schemaVersion: 0.0.1
description: google.UnmanagedContainerModel for model to be validated.
comp-automl-tabular-stage-1-tuner:
executorLabel: exec-automl-tabular-stage-1-tuner
inputDefinitions:
artifacts:
feature_ranking:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
isOptional: true
materialized_eval_split:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
description: The materialized eval split.
materialized_train_split:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
description: The materialized train split.
metadata:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
description: The tabular example gen metadata.
transform_output:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
description: The transform output artifact.
parameters:
deadline_hours:
description: Number of hours the cross-validation trainer should run.
parameterType: NUMBER_DOUBLE
disable_early_stopping:
defaultValue: false
description: True if disable early stopping. Default value is false.
isOptional: true
parameterType: BOOLEAN
encryption_spec_key_name:
defaultValue: ''
description: Customer-managed encryption key.
isOptional: true
parameterType: STRING
location:
description: Location for running the Cross-validation trainer.
parameterType: STRING
num_parallel_trials:
description: Number of parallel training trials.
parameterType: NUMBER_INTEGER
num_selected_features:
defaultValue: 0.0
description: Number of selected features. The number of features to learn
in the NN models.
isOptional: true
parameterType: NUMBER_INTEGER
num_selected_trials:
description: Number of selected trials. The number of weak learners in the
final model is 5 * num_selected_trials.
parameterType: NUMBER_INTEGER
project:
description: Project to run Cross-validation trainer.
parameterType: STRING
reduce_search_space_mode:
defaultValue: regular
description: 'The reduce search space mode. Possible values: "regular" (default),
"minimal", "full".'
isOptional: true
parameterType: STRING
root_dir:
description: The Cloud Storage location to store the output.
parameterType: STRING
run_distillation:
defaultValue: false
description: True if in distillation mode. The default value is false.
isOptional: true
parameterType: BOOLEAN
single_run_max_secs:
description: Max number of seconds each training trial runs.
parameterType: NUMBER_INTEGER
study_spec_parameters_override:
defaultValue: []
description: 'JSON study spec. E.g., [{"parameter_id": "model_type","categorical_value_spec":
{"values": ["nn"]}}]'
isOptional: true
parameterType: LIST
tune_feature_selection_rate:
defaultValue: false
isOptional: true
parameterType: BOOLEAN
worker_pool_specs_override_json:
defaultValue: []
description: 'JSON worker pool specs. E.g., [{"machine_spec": {"machine_type":
"n1-standard-16"}},{},{},{"machine_spec": {"machine_type": "n1-standard-16"}}]'
isOptional: true
parameterType: LIST
outputDefinitions:
artifacts:
tuning_result_output:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
description: The trained model and architectures.
parameters:
execution_metrics:
description: Core metrics in dictionary of component execution.
parameterType: STRUCT
gcp_resources:
description: GCP resources created by this component. For more details,
see https://github.com/kubeflow/pipelines/blob/master/components/google-cloud/google_cloud_pipeline_components/proto/README.md.
parameterType: STRING
comp-automl-tabular-stage-1-tuner-2:
executorLabel: exec-automl-tabular-stage-1-tuner-2
inputDefinitions:
artifacts:
feature_ranking:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
isOptional: true
materialized_eval_split:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
description: The materialized eval split.
materialized_train_split:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
description: The materialized train split.
metadata:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
description: The tabular example gen metadata.
transform_output:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
description: The transform output artifact.
parameters:
deadline_hours:
description: Number of hours the cross-validation trainer should run.
parameterType: NUMBER_DOUBLE
disable_early_stopping:
defaultValue: false
description: True if disable early stopping. Default value is false.
isOptional: true
parameterType: BOOLEAN
encryption_spec_key_name:
defaultValue: ''
description: Customer-managed encryption key.
isOptional: true
parameterType: STRING
location:
description: Location for running the Cross-validation trainer.
parameterType: STRING
num_parallel_trials:
description: Number of parallel training trials.
parameterType: NUMBER_INTEGER
num_selected_features:
defaultValue: 0.0
description: Number of selected features. The number of features to learn
in the NN models.
isOptional: true
parameterType: NUMBER_INTEGER
num_selected_trials:
description: Number of selected trials. The number of weak learners in the
final model is 5 * num_selected_trials.
parameterType: NUMBER_INTEGER
project:
description: Project to run Cross-validation trainer.
parameterType: STRING
reduce_search_space_mode:
defaultValue: regular
description: 'The reduce search space mode. Possible values: "regular" (default),
"minimal", "full".'
isOptional: true
parameterType: STRING
root_dir:
description: The Cloud Storage location to store the output.
parameterType: STRING
run_distillation:
defaultValue: false
description: True if in distillation mode. The default value is false.
isOptional: true
parameterType: BOOLEAN
single_run_max_secs:
description: Max number of seconds each training trial runs.
parameterType: NUMBER_INTEGER
study_spec_parameters_override:
defaultValue: []
description: 'JSON study spec. E.g., [{"parameter_id": "model_type","categorical_value_spec":
{"values": ["nn"]}}]'
isOptional: true
parameterType: LIST
tune_feature_selection_rate:
defaultValue: false
isOptional: true
parameterType: BOOLEAN
worker_pool_specs_override_json:
defaultValue: []
description: 'JSON worker pool specs. E.g., [{"machine_spec": {"machine_type":
"n1-standard-16"}},{},{},{"machine_spec": {"machine_type": "n1-standard-16"}}]'
isOptional: true
parameterType: LIST
outputDefinitions:
artifacts:
tuning_result_output:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
description: The trained model and architectures.
parameters:
execution_metrics:
description: Core metrics in dictionary of component execution.
parameterType: STRUCT
gcp_resources:
description: GCP resources created by this component. For more details,
see https://github.com/kubeflow/pipelines/blob/master/components/google-cloud/google_cloud_pipeline_components/proto/README.md.
parameterType: STRING
comp-automl-tabular-transform:
executorLabel: exec-automl-tabular-transform
inputDefinitions:
artifacts:
dataset_schema:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
description: The schema of the dataset.
eval_split:
artifactType:
schemaTitle: system.Dataset
schemaVersion: 0.0.1
description: The eval split.
metadata:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
description: The tabular example gen metadata.
test_split:
artifactType:
schemaTitle: system.Dataset
schemaVersion: 0.0.1
description: The test split.
train_split:
artifactType:
schemaTitle: system.Dataset
schemaVersion: 0.0.1
description: The train split.
parameters:
dataflow_disk_size_gb:
defaultValue: 40.0
description: The disk size, in gigabytes, to use on each Dataflow worker
instance. If not set, default to 40.
isOptional: true
parameterType: NUMBER_INTEGER
dataflow_machine_type:
defaultValue: n1-standard-16
description: The machine type used for dataflow jobs. If not set, default
to n1-standard-16.
isOptional: true
parameterType: STRING
dataflow_max_num_workers:
defaultValue: 25.0
description: The number of workers to run the dataflow job. If not set,
default to 25.
isOptional: true
parameterType: NUMBER_INTEGER
dataflow_service_account:
defaultValue: ''
description: Custom service account to run dataflow jobs.
isOptional: true
parameterType: STRING
dataflow_subnetwork:
defaultValue: ''
description: 'Dataflow''s fully qualified subnetwork name, when empty the
default subnetwork will be used. More details: https://cloud.google.com/dataflow/docs/guides/specifying-networks#example_network_and_subnetwork_specifications'
isOptional: true
parameterType: STRING
dataflow_use_public_ips:
defaultValue: true
description: Specifies whether Dataflow workers use public IP addresses.
isOptional: true
parameterType: BOOLEAN
encryption_spec_key_name:
defaultValue: ''
description: Customer-managed encryption key.
isOptional: true
parameterType: STRING
location:
description: Location for running the Cross-validation trainer.
parameterType: STRING
project:
description: Project to run Cross-validation trainer.
parameterType: STRING
root_dir:
description: The Cloud Storage location to store the output.
parameterType: STRING
outputDefinitions:
artifacts:
materialized_eval_split:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
description: The materialized test split.
materialized_test_split:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
materialized_train_split:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
description: The materialized train split.
training_schema_uri:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
description: The training schema.
transform_output:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
description: The transform output artifact.
parameters:
gcp_resources:
description: GCP resources created by this component. For more details,
see https://github.com/kubeflow/pipelines/blob/master/components/google-cloud/google_cloud_pipeline_components/proto/README.md.
parameterType: STRING
comp-automl-tabular-transform-2:
executorLabel: exec-automl-tabular-transform-2
inputDefinitions:
artifacts:
dataset_schema:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
description: The schema of the dataset.
eval_split:
artifactType:
schemaTitle: system.Dataset
schemaVersion: 0.0.1
description: The eval split.
metadata:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
description: The tabular example gen metadata.
test_split:
artifactType:
schemaTitle: system.Dataset
schemaVersion: 0.0.1
description: The test split.
train_split:
artifactType:
schemaTitle: system.Dataset
schemaVersion: 0.0.1
description: The train split.
parameters:
dataflow_disk_size_gb:
defaultValue: 40.0
description: The disk size, in gigabytes, to use on each Dataflow worker
instance. If not set, default to 40.
isOptional: true
parameterType: NUMBER_INTEGER
dataflow_machine_type:
defaultValue: n1-standard-16
description: The machine type used for dataflow jobs. If not set, default
to n1-standard-16.
isOptional: true
parameterType: STRING
dataflow_max_num_workers:
defaultValue: 25.0
description: The number of workers to run the dataflow job. If not set,
default to 25.
isOptional: true
parameterType: NUMBER_INTEGER
dataflow_service_account:
defaultValue: ''
description: Custom service account to run dataflow jobs.
isOptional: true
parameterType: STRING
dataflow_subnetwork:
defaultValue: ''
description: 'Dataflow''s fully qualified subnetwork name, when empty the
default subnetwork will be used. More details: https://cloud.google.com/dataflow/docs/guides/specifying-networks#example_network_and_subnetwork_specifications'
isOptional: true
parameterType: STRING
dataflow_use_public_ips:
defaultValue: true
description: Specifies whether Dataflow workers use public IP addresses.
isOptional: true
parameterType: BOOLEAN
encryption_spec_key_name:
defaultValue: ''
description: Customer-managed encryption key.
isOptional: true
parameterType: STRING
location:
description: Location for running the Cross-validation trainer.
parameterType: STRING
project:
description: Project to run Cross-validation trainer.
parameterType: STRING
root_dir:
description: The Cloud Storage location to store the output.
parameterType: STRING
outputDefinitions:
artifacts:
materialized_eval_split:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
description: The materialized test split.
materialized_test_split:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
materialized_train_split:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
description: The materialized train split.
training_schema_uri:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
description: The training schema.
transform_output:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
description: The transform output artifact.
parameters:
gcp_resources:
description: GCP resources created by this component. For more details,
see https://github.com/kubeflow/pipelines/blob/master/components/google-cloud/google_cloud_pipeline_components/proto/README.md.
parameterType: STRING
comp-bool-identity:
executorLabel: exec-bool-identity
inputDefinitions:
parameters:
value:
description: Boolean value to return
parameterType: BOOLEAN
outputDefinitions:
parameters:
Output:
parameterType: STRING
comp-bool-identity-2:
executorLabel: exec-bool-identity-2
inputDefinitions:
parameters:
value:
description: Boolean value to return
parameterType: BOOLEAN
outputDefinitions:
parameters:
Output:
parameterType: STRING
comp-bool-identity-3:
executorLabel: exec-bool-identity-3
inputDefinitions:
parameters:
value:
description: Boolean value to return
parameterType: BOOLEAN
outputDefinitions:
parameters:
Output:
parameterType: STRING
comp-calculate-training-parameters:
executorLabel: exec-calculate-training-parameters
inputDefinitions:
parameters:
fast_testing:
defaultValue: false
description: Internal flag used for presubmit tests.
isOptional: true
parameterType: BOOLEAN
is_skip_architecture_search:
defaultValue: false
description: 'If component is being called in the
skip_architecture_search pipeline.'
isOptional: true
parameterType: BOOLEAN
run_distillation:
description: Whether to run distill in the training pipeline.
parameterType: BOOLEAN
stage_1_num_parallel_trials:
description: Number of parallel trails for stage 1.
parameterType: NUMBER_INTEGER
stage_2_num_parallel_trials:
description: Number of parallel trails for stage 2.
parameterType: NUMBER_INTEGER
train_budget_milli_node_hours:
description: 'The train budget of creating this model,
expressed in milli node hours i.e. 1,000 value in this field means 1 node
hour.'
parameterType: NUMBER_DOUBLE
outputDefinitions:
parameters:
distill_stage_1_deadline_hours:
parameterType: NUMBER_DOUBLE
reduce_search_space_mode:
parameterType: STRING
stage_1_deadline_hours:
parameterType: NUMBER_DOUBLE
stage_1_num_selected_trials:
parameterType: NUMBER_INTEGER
stage_1_single_run_max_secs:
parameterType: NUMBER_INTEGER
stage_2_deadline_hours:
parameterType: NUMBER_DOUBLE
stage_2_single_run_max_secs:
parameterType: NUMBER_INTEGER
comp-calculate-training-parameters-2:
executorLabel: exec-calculate-training-parameters-2
inputDefinitions:
parameters:
fast_testing:
defaultValue: false
description: Internal flag used for presubmit tests.
isOptional: true
parameterType: BOOLEAN
is_skip_architecture_search:
defaultValue: false
description: 'If component is being called in the
skip_architecture_search pipeline.'
isOptional: true
parameterType: BOOLEAN
run_distillation:
description: Whether to run distill in the training pipeline.
parameterType: BOOLEAN
stage_1_num_parallel_trials:
description: Number of parallel trails for stage 1.
parameterType: NUMBER_INTEGER
stage_2_num_parallel_trials:
description: Number of parallel trails for stage 2.
parameterType: NUMBER_INTEGER
train_budget_milli_node_hours:
description: 'The train budget of creating this model,
expressed in milli node hours i.e. 1,000 value in this field means 1 node
hour.'
parameterType: NUMBER_DOUBLE
outputDefinitions:
parameters:
distill_stage_1_deadline_hours:
parameterType: NUMBER_DOUBLE
reduce_search_space_mode:
parameterType: STRING
stage_1_deadline_hours:
parameterType: NUMBER_DOUBLE
stage_1_num_selected_trials:
parameterType: NUMBER_INTEGER
stage_1_single_run_max_secs:
parameterType: NUMBER_INTEGER
stage_2_deadline_hours:
parameterType: NUMBER_DOUBLE
stage_2_single_run_max_secs:
parameterType: NUMBER_INTEGER
comp-condition-2:
dag:
outputs:
artifacts:
feature-attribution-feature_attributions:
artifactSelectors:
- outputArtifactKey: feature-attribution-feature_attributions
producerSubtask: condition-3
model-evaluation-evaluation_metrics:
artifactSelectors:
- outputArtifactKey: model-evaluation-evaluation_metrics
producerSubtask: condition-3
tasks:
automl-tabular-cv-trainer:
cachingOptions:
enableCache: true
componentRef:
name: comp-automl-tabular-cv-trainer
dependentTasks:
- calculate-training-parameters
- importer
inputs:
artifacts:
materialized_cv_splits:
componentInputArtifact: pipelinechannel--merge-materialized-splits-splits
metadata:
componentInputArtifact: pipelinechannel--tabular-stats-and-example-gen-metadata
transform_output:
componentInputArtifact: pipelinechannel--automl-tabular-transform-transform_output
tuning_result_input:
taskOutputArtifact:
outputArtifactKey: artifact
producerTask: importer
parameters:
deadline_hours:
taskOutputParameter:
outputParameterKey: stage_2_deadline_hours
producerTask: calculate-training-parameters
encryption_spec_key_name:
componentInputParameter: pipelinechannel--encryption_spec_key_name
location:
componentInputParameter: pipelinechannel--location
num_parallel_trials:
componentInputParameter: pipelinechannel--stage_2_num_parallel_trials
num_selected_trials:
componentInputParameter: pipelinechannel--stage_2_num_selected_trials
project:
componentInputParameter: pipelinechannel--project
root_dir:
componentInputParameter: pipelinechannel--root_dir
single_run_max_secs:
taskOutputParameter:
outputParameterKey: stage_2_single_run_max_secs
producerTask: calculate-training-parameters
worker_pool_specs_override_json:
componentInputParameter: pipelinechannel--cv_trainer_worker_pool_specs_override
taskInfo:
name: automl-tabular-cv-trainer
automl-tabular-ensemble:
cachingOptions:
enableCache: true
componentRef:
name: comp-automl-tabular-ensemble
dependentTasks:
- automl-tabular-cv-trainer
inputs:
artifacts:
dataset_schema:
componentInputArtifact: pipelinechannel--tabular-stats-and-example-gen-dataset_schema
instance_baseline:
componentInputArtifact: pipelinechannel--tabular-stats-and-example-gen-instance_baseline
metadata:
componentInputArtifact: pipelinechannel--tabular-stats-and-example-gen-metadata
transform_output:
componentInputArtifact: pipelinechannel--automl-tabular-transform-transform_output
tuning_result_input:
taskOutputArtifact:
outputArtifactKey: tuning_result_output
producerTask: automl-tabular-cv-trainer
warmup_data:
componentInputArtifact: pipelinechannel--tabular-stats-and-example-gen-eval_split
parameters:
encryption_spec_key_name:
componentInputParameter: pipelinechannel--encryption_spec_key_name
export_additional_model_without_custom_ops:
componentInputParameter: pipelinechannel--export_additional_model_without_custom_ops
location:
componentInputParameter: pipelinechannel--location
project:
componentInputParameter: pipelinechannel--project
root_dir:
componentInputParameter: pipelinechannel--root_dir
taskInfo:
name: automl-tabular-ensemble
automl-tabular-infra-validator:
cachingOptions:
enableCache: true
componentRef:
name: comp-automl-tabular-infra-validator
dependentTasks:
- automl-tabular-ensemble
inputs:
artifacts:
unmanaged_container_model:
taskOutputArtifact:
outputArtifactKey: unmanaged_container_model
producerTask: automl-tabular-ensemble
taskInfo:
name: automl-tabular-infra-validator
bool-identity:
cachingOptions:
enableCache: true
componentRef:
name: comp-bool-identity
inputs:
parameters:
value:
componentInputParameter: pipelinechannel--run_evaluation
taskInfo:
name: bool-identity
calculate-training-parameters:
cachingOptions:
enableCache: true
componentRef:
name: comp-calculate-training-parameters
inputs:
parameters:
fast_testing:
componentInputParameter: pipelinechannel--fast_testing
is_skip_architecture_search:
runtimeValue:
constant: true
run_distillation:
componentInputParameter: pipelinechannel--run_distillation
stage_1_num_parallel_trials:
componentInputParameter: pipelinechannel--stage_1_num_parallel_trials
stage_2_num_parallel_trials:
componentInputParameter: pipelinechannel--stage_2_num_parallel_trials
train_budget_milli_node_hours:
componentInputParameter: pipelinechannel--train_budget_milli_node_hours
taskInfo:
name: calculate-training-parameters
condition-3:
componentRef:
name: comp-condition-3
dependentTasks:
- automl-tabular-ensemble
- bool-identity
- model-upload
inputs:
artifacts:
pipelinechannel--automl-tabular-ensemble-explanation_metadata_artifact:
taskOutputArtifact:
outputArtifactKey: explanation_metadata_artifact
producerTask: automl-tabular-ensemble
pipelinechannel--automl-tabular-ensemble-unmanaged_container_model:
taskOutputArtifact:
outputArtifactKey: unmanaged_container_model
producerTask: automl-tabular-ensemble
pipelinechannel--model-upload-model:
taskOutputArtifact:
outputArtifactKey: model
producerTask: model-upload
parameters:
pipelinechannel--automl-tabular-ensemble-explanation_parameters:
taskOutputParameter:
outputParameterKey: explanation_parameters
producerTask: automl-tabular-ensemble
pipelinechannel--bool-identity-Output:
taskOutputParameter:
outputParameterKey: Output
producerTask: bool-identity
pipelinechannel--dataflow_service_account:
componentInputParameter: pipelinechannel--dataflow_service_account
pipelinechannel--dataflow_subnetwork:
componentInputParameter: pipelinechannel--dataflow_subnetwork
pipelinechannel--dataflow_use_public_ips:
componentInputParameter: pipelinechannel--dataflow_use_public_ips
pipelinechannel--encryption_spec_key_name:
componentInputParameter: pipelinechannel--encryption_spec_key_name
pipelinechannel--evaluation_batch_explain_machine_type:
componentInputParameter: pipelinechannel--evaluation_batch_explain_machine_type
pipelinechannel--evaluation_batch_explain_max_replica_count:
componentInputParameter: pipelinechannel--evaluation_batch_explain_max_replica_count
pipelinechannel--evaluation_batch_explain_starting_replica_count:
componentInputParameter: pipelinechannel--evaluation_batch_explain_starting_replica_count
pipelinechannel--evaluation_batch_predict_machine_type:
componentInputParameter: pipelinechannel--evaluation_batch_predict_machine_type
pipelinechannel--evaluation_batch_predict_max_replica_count:
componentInputParameter: pipelinechannel--evaluation_batch_predict_max_replica_count
pipelinechannel--evaluation_batch_predict_starting_replica_count:
componentInputParameter: pipelinechannel--evaluation_batch_predict_starting_replica_count
pipelinechannel--evaluation_dataflow_disk_size_gb:
componentInputParameter: pipelinechannel--evaluation_dataflow_disk_size_gb
pipelinechannel--evaluation_dataflow_machine_type:
componentInputParameter: pipelinechannel--evaluation_dataflow_machine_type
pipelinechannel--evaluation_dataflow_max_num_workers:
componentInputParameter: pipelinechannel--evaluation_dataflow_max_num_workers
pipelinechannel--evaluation_dataflow_starting_num_workers:
componentInputParameter: pipelinechannel--evaluation_dataflow_starting_num_workers
pipelinechannel--location:
componentInputParameter: pipelinechannel--location
pipelinechannel--prediction_type:
componentInputParameter: pipelinechannel--prediction_type
pipelinechannel--project:
componentInputParameter: pipelinechannel--project
pipelinechannel--root_dir:
componentInputParameter: pipelinechannel--root_dir
pipelinechannel--string-not-empty-Output:
componentInputParameter: pipelinechannel--string-not-empty-Output
pipelinechannel--tabular-stats-and-example-gen-downsampled_test_split_json:
componentInputParameter: pipelinechannel--tabular-stats-and-example-gen-downsampled_test_split_json
pipelinechannel--tabular-stats-and-example-gen-test_split_json:
componentInputParameter: pipelinechannel--tabular-stats-and-example-gen-test_split_json
pipelinechannel--target_column:
componentInputParameter: pipelinechannel--target_column
taskInfo:
name: is-evaluation
triggerPolicy:
condition: inputs.parameter_values['pipelinechannel--bool-identity-Output']
== 'true'
importer:
cachingOptions:
enableCache: true
componentRef:
name: comp-importer
inputs:
parameters:
uri:
componentInputParameter: pipelinechannel--stage_1_tuning_result_artifact_uri
taskInfo:
name: importer
model-upload:
cachingOptions:
enableCache: true
componentRef:
name: comp-model-upload
dependentTasks:
- automl-tabular-ensemble
inputs:
artifacts:
explanation_metadata_artifact:
taskOutputArtifact:
outputArtifactKey: explanation_metadata_artifact
producerTask: automl-tabular-ensemble
parent_model:
componentInputArtifact: pipelinechannel--parent_model
unmanaged_container_model:
taskOutputArtifact:
outputArtifactKey: unmanaged_container_model
producerTask: automl-tabular-ensemble
parameters:
description:
componentInputParameter: pipelinechannel--model_description
display_name:
componentInputParameter: pipelinechannel--get-model-display-name-model_display_name
encryption_spec_key_name:
componentInputParameter: pipelinechannel--encryption_spec_key_name
explanation_parameters:
taskOutputParameter:
outputParameterKey: explanation_parameters
producerTask: automl-tabular-ensemble
location:
componentInputParameter: pipelinechannel--location
project:
componentInputParameter: pipelinechannel--project
taskInfo:
name: model-upload
inputDefinitions:
artifacts:
pipelinechannel--automl-tabular-transform-transform_output:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
pipelinechannel--merge-materialized-splits-splits:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
pipelinechannel--parent_model:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
pipelinechannel--tabular-stats-and-example-gen-dataset_schema:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
pipelinechannel--tabular-stats-and-example-gen-eval_split:
artifactType:
schemaTitle: system.Dataset
schemaVersion: 0.0.1
pipelinechannel--tabular-stats-and-example-gen-instance_baseline:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
pipelinechannel--tabular-stats-and-example-gen-metadata:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
parameters:
pipelinechannel--cv_trainer_worker_pool_specs_override:
parameterType: LIST
pipelinechannel--dataflow_service_account:
parameterType: STRING
pipelinechannel--dataflow_subnetwork:
parameterType: STRING
pipelinechannel--dataflow_use_public_ips:
parameterType: BOOLEAN
pipelinechannel--encryption_spec_key_name:
parameterType: STRING
pipelinechannel--evaluation_batch_explain_machine_type:
parameterType: STRING
pipelinechannel--evaluation_batch_explain_max_replica_count:
parameterType: NUMBER_INTEGER
pipelinechannel--evaluation_batch_explain_starting_replica_count:
parameterType: NUMBER_INTEGER
pipelinechannel--evaluation_batch_predict_machine_type:
parameterType: STRING
pipelinechannel--evaluation_batch_predict_max_replica_count:
parameterType: NUMBER_INTEGER
pipelinechannel--evaluation_batch_predict_starting_replica_count:
parameterType: NUMBER_INTEGER
pipelinechannel--evaluation_dataflow_disk_size_gb:
parameterType: NUMBER_INTEGER
pipelinechannel--evaluation_dataflow_machine_type:
parameterType: STRING
pipelinechannel--evaluation_dataflow_max_num_workers:
parameterType: NUMBER_INTEGER
pipelinechannel--evaluation_dataflow_starting_num_workers:
parameterType: NUMBER_INTEGER
pipelinechannel--export_additional_model_without_custom_ops:
parameterType: BOOLEAN
pipelinechannel--fast_testing:
parameterType: BOOLEAN
pipelinechannel--get-model-display-name-model_display_name:
parameterType: STRING
pipelinechannel--location:
parameterType: STRING
pipelinechannel--model_description:
parameterType: STRING
pipelinechannel--prediction_type:
parameterType: STRING
pipelinechannel--project:
parameterType: STRING
pipelinechannel--root_dir:
parameterType: STRING
pipelinechannel--run_distillation:
parameterType: BOOLEAN
pipelinechannel--run_evaluation:
parameterType: BOOLEAN
pipelinechannel--stage_1_num_parallel_trials:
parameterType: NUMBER_INTEGER
pipelinechannel--stage_1_tuning_result_artifact_uri:
parameterType: STRING
pipelinechannel--stage_2_num_parallel_trials:
parameterType: NUMBER_INTEGER
pipelinechannel--stage_2_num_selected_trials:
parameterType: NUMBER_INTEGER
pipelinechannel--string-not-empty-Output:
parameterType: STRING
pipelinechannel--tabular-stats-and-example-gen-downsampled_test_split_json:
parameterType: LIST
pipelinechannel--tabular-stats-and-example-gen-test_split_json:
parameterType: LIST
pipelinechannel--target_column:
parameterType: STRING
pipelinechannel--train_budget_milli_node_hours:
parameterType: NUMBER_DOUBLE
outputDefinitions:
artifacts:
feature-attribution-feature_attributions:
artifactType:
schemaTitle: system.Metrics
schemaVersion: 0.0.1
model-evaluation-evaluation_metrics:
artifactType:
schemaTitle: system.Metrics
schemaVersion: 0.0.1
comp-condition-3:
dag:
outputs:
artifacts:
feature-attribution-feature_attributions:
artifactSelectors:
- outputArtifactKey: feature_attributions
producerSubtask: feature-attribution
model-evaluation-evaluation_metrics:
artifactSelectors:
- outputArtifactKey: evaluation_metrics
producerSubtask: model-evaluation
tasks:
feature-attribution:
cachingOptions:
enableCache: true
componentRef:
name: comp-feature-attribution
dependentTasks:
- model-batch-explanation
inputs:
artifacts:
predictions_gcs_source:
taskOutputArtifact:
outputArtifactKey: gcs_output_directory
producerTask: model-batch-explanation
parameters:
dataflow_disk_size_gb:
componentInputParameter: pipelinechannel--evaluation_dataflow_disk_size_gb
dataflow_machine_type:
componentInputParameter: pipelinechannel--evaluation_dataflow_machine_type
dataflow_max_workers_num:
componentInputParameter: pipelinechannel--evaluation_dataflow_max_num_workers
dataflow_service_account:
componentInputParameter: pipelinechannel--dataflow_service_account
dataflow_subnetwork:
componentInputParameter: pipelinechannel--dataflow_subnetwork
dataflow_use_public_ips:
componentInputParameter: pipelinechannel--dataflow_use_public_ips
dataflow_workers_num:
componentInputParameter: pipelinechannel--evaluation_dataflow_starting_num_workers
encryption_spec_key_name:
componentInputParameter: pipelinechannel--encryption_spec_key_name
force_runner_mode:
runtimeValue:
constant: Dataflow
location:
componentInputParameter: pipelinechannel--location
predictions_format:
runtimeValue:
constant: jsonl
problem_type:
componentInputParameter: pipelinechannel--prediction_type
project:
componentInputParameter: pipelinechannel--project
taskInfo:
name: feature-attribution
model-batch-explanation:
cachingOptions:
enableCache: true
componentRef:
name: comp-model-batch-explanation
inputs:
artifacts:
explanation_metadata_artifact:
componentInputArtifact: pipelinechannel--automl-tabular-ensemble-explanation_metadata_artifact
unmanaged_container_model:
componentInputArtifact: pipelinechannel--automl-tabular-ensemble-unmanaged_container_model
parameters:
encryption_spec_key_name:
componentInputParameter: pipelinechannel--encryption_spec_key_name
explanation_parameters:
componentInputParameter: pipelinechannel--automl-tabular-ensemble-explanation_parameters
gcs_destination_output_uri_prefix:
componentInputParameter: pipelinechannel--root_dir
gcs_source_uris:
componentInputParameter: pipelinechannel--tabular-stats-and-example-gen-downsampled_test_split_json
generate_explanation:
runtimeValue:
constant: true
instances_format:
runtimeValue:
constant: tf-record
job_display_name:
runtimeValue:
constant: batch-explain-evaluation-{{$.pipeline_job_uuid}}-{{$.pipeline_task_uuid}}
location:
componentInputParameter: pipelinechannel--location
machine_type:
componentInputParameter: pipelinechannel--evaluation_batch_explain_machine_type
max_replica_count:
componentInputParameter: pipelinechannel--evaluation_batch_explain_max_replica_count
predictions_format:
runtimeValue:
constant: jsonl
project:
componentInputParameter: pipelinechannel--project
starting_replica_count:
componentInputParameter: pipelinechannel--evaluation_batch_explain_starting_replica_count
taskInfo:
name: model-batch-explanation
model-batch-predict:
cachingOptions:
enableCache: true
componentRef:
name: comp-model-batch-predict
inputs:
artifacts:
unmanaged_container_model:
componentInputArtifact: pipelinechannel--automl-tabular-ensemble-unmanaged_container_model
parameters:
encryption_spec_key_name:
componentInputParameter: pipelinechannel--encryption_spec_key_name
gcs_destination_output_uri_prefix:
componentInputParameter: pipelinechannel--root_dir
gcs_source_uris:
componentInputParameter: pipelinechannel--tabular-stats-and-example-gen-test_split_json
instances_format:
runtimeValue:
constant: tf-record
job_display_name:
runtimeValue:
constant: batch-predict-evaluation-{{$.pipeline_job_uuid}}-{{$.pipeline_task_uuid}}
location:
componentInputParameter: pipelinechannel--location
machine_type:
componentInputParameter: pipelinechannel--evaluation_batch_predict_machine_type
max_replica_count:
componentInputParameter: pipelinechannel--evaluation_batch_predict_max_replica_count
predictions_format:
runtimeValue:
constant: jsonl
project:
componentInputParameter: pipelinechannel--project
starting_replica_count:
componentInputParameter: pipelinechannel--evaluation_batch_predict_starting_replica_count
taskInfo:
name: model-batch-predict
model-evaluation:
cachingOptions:
enableCache: true
componentRef:
name: comp-model-evaluation
dependentTasks:
- model-batch-predict
inputs:
artifacts:
batch_prediction_job:
taskOutputArtifact:
outputArtifactKey: batchpredictionjob
producerTask: model-batch-predict
parameters:
dataflow_disk_size:
componentInputParameter: pipelinechannel--evaluation_dataflow_disk_size_gb
dataflow_machine_type:
componentInputParameter: pipelinechannel--evaluation_dataflow_machine_type
dataflow_max_workers_num:
componentInputParameter: pipelinechannel--evaluation_dataflow_max_num_workers
dataflow_service_account:
componentInputParameter: pipelinechannel--dataflow_service_account
dataflow_subnetwork:
componentInputParameter: pipelinechannel--dataflow_subnetwork
dataflow_use_public_ips:
componentInputParameter: pipelinechannel--dataflow_use_public_ips
dataflow_workers_num:
componentInputParameter: pipelinechannel--evaluation_dataflow_starting_num_workers
encryption_spec_key_name:
componentInputParameter: pipelinechannel--encryption_spec_key_name
ground_truth_column:
componentInputParameter: pipelinechannel--target_column
ground_truth_format:
runtimeValue:
constant: jsonl
location:
componentInputParameter: pipelinechannel--location
prediction_label_column:
runtimeValue:
constant: ''
prediction_score_column:
runtimeValue:
constant: ''
predictions_format:
runtimeValue:
constant: jsonl
problem_type:
componentInputParameter: pipelinechannel--prediction_type
project:
componentInputParameter: pipelinechannel--project
root_dir:
componentInputParameter: pipelinechannel--root_dir
taskInfo:
name: model-evaluation
model-evaluation-import:
cachingOptions:
enableCache: true
componentRef:
name: comp-model-evaluation-import
dependentTasks:
- feature-attribution
- model-evaluation
inputs:
artifacts:
feature_attributions:
taskOutputArtifact:
outputArtifactKey: feature_attributions
producerTask: feature-attribution
metrics:
taskOutputArtifact:
outputArtifactKey: evaluation_metrics
producerTask: model-evaluation
model:
componentInputArtifact: pipelinechannel--model-upload-model
parameters:
dataset_paths:
componentInputParameter: pipelinechannel--tabular-stats-and-example-gen-test_split_json
dataset_type:
runtimeValue:
constant: tf-record
display_name:
runtimeValue:
constant: AutoML Tabular
problem_type:
componentInputParameter: pipelinechannel--prediction_type
taskInfo:
name: model-evaluation-import
inputDefinitions:
artifacts:
pipelinechannel--automl-tabular-ensemble-explanation_metadata_artifact:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
pipelinechannel--automl-tabular-ensemble-unmanaged_container_model:
artifactType:
schemaTitle: google.UnmanagedContainerModel
schemaVersion: 0.0.1
pipelinechannel--model-upload-model:
artifactType:
schemaTitle: google.VertexModel
schemaVersion: 0.0.1
parameters:
pipelinechannel--automl-tabular-ensemble-explanation_parameters:
parameterType: STRUCT
pipelinechannel--bool-identity-Output:
parameterType: STRING
pipelinechannel--dataflow_service_account:
parameterType: STRING
pipelinechannel--dataflow_subnetwork:
parameterType: STRING
pipelinechannel--dataflow_use_public_ips:
parameterType: BOOLEAN
pipelinechannel--encryption_spec_key_name:
parameterType: STRING
pipelinechannel--evaluation_batch_explain_machine_type:
parameterType: STRING
pipelinechannel--evaluation_batch_explain_max_replica_count:
parameterType: NUMBER_INTEGER
pipelinechannel--evaluation_batch_explain_starting_replica_count:
parameterType: NUMBER_INTEGER
pipelinechannel--evaluation_batch_predict_machine_type:
parameterType: STRING
pipelinechannel--evaluation_batch_predict_max_replica_count:
parameterType: NUMBER_INTEGER
pipelinechannel--evaluation_batch_predict_starting_replica_count:
parameterType: NUMBER_INTEGER
pipelinechannel--evaluation_dataflow_disk_size_gb:
parameterType: NUMBER_INTEGER
pipelinechannel--evaluation_dataflow_machine_type:
parameterType: STRING
pipelinechannel--evaluation_dataflow_max_num_workers:
parameterType: NUMBER_INTEGER
pipelinechannel--evaluation_dataflow_starting_num_workers:
parameterType: NUMBER_INTEGER
pipelinechannel--location:
parameterType: STRING
pipelinechannel--prediction_type:
parameterType: STRING
pipelinechannel--project:
parameterType: STRING
pipelinechannel--root_dir:
parameterType: STRING
pipelinechannel--string-not-empty-Output:
parameterType: STRING
pipelinechannel--tabular-stats-and-example-gen-downsampled_test_split_json:
parameterType: LIST
pipelinechannel--tabular-stats-and-example-gen-test_split_json:
parameterType: LIST
pipelinechannel--target_column:
parameterType: STRING
outputDefinitions:
artifacts:
feature-attribution-feature_attributions:
artifactType:
schemaTitle: system.Metrics
schemaVersion: 0.0.1
model-evaluation-evaluation_metrics:
artifactType:
schemaTitle: system.Metrics
schemaVersion: 0.0.1
comp-condition-4:
dag:
outputs:
artifacts:
feature-attribution-2-feature_attributions:
artifactSelectors:
- outputArtifactKey: feature-attribution-2-feature_attributions
producerSubtask: condition-5
feature-attribution-3-feature_attributions:
artifactSelectors:
- outputArtifactKey: feature-attribution-3-feature_attributions
producerSubtask: condition-7
model-evaluation-2-evaluation_metrics:
artifactSelectors:
- outputArtifactKey: model-evaluation-2-evaluation_metrics
producerSubtask: condition-5
model-evaluation-3-evaluation_metrics:
artifactSelectors:
- outputArtifactKey: model-evaluation-3-evaluation_metrics
producerSubtask: condition-7
tasks:
automl-tabular-cv-trainer-2:
cachingOptions:
enableCache: true
componentRef:
name: comp-automl-tabular-cv-trainer-2
dependentTasks:
- automl-tabular-stage-1-tuner
- calculate-training-parameters-2
inputs:
artifacts:
materialized_cv_splits:
componentInputArtifact: pipelinechannel--merge-materialized-splits-splits
metadata:
componentInputArtifact: pipelinechannel--tabular-stats-and-example-gen-metadata
transform_output:
componentInputArtifact: pipelinechannel--automl-tabular-transform-transform_output
tuning_result_input:
taskOutputArtifact:
outputArtifactKey: tuning_result_output
producerTask: automl-tabular-stage-1-tuner
parameters:
deadline_hours:
taskOutputParameter:
outputParameterKey: stage_2_deadline_hours
producerTask: calculate-training-parameters-2
encryption_spec_key_name:
componentInputParameter: pipelinechannel--encryption_spec_key_name
location:
componentInputParameter: pipelinechannel--location
num_parallel_trials:
componentInputParameter: pipelinechannel--stage_2_num_parallel_trials
num_selected_trials:
componentInputParameter: pipelinechannel--stage_2_num_selected_trials
project:
componentInputParameter: pipelinechannel--project
root_dir:
componentInputParameter: pipelinechannel--root_dir
single_run_max_secs:
taskOutputParameter:
outputParameterKey: stage_2_single_run_max_secs
producerTask: calculate-training-parameters-2
worker_pool_specs_override_json:
componentInputParameter: pipelinechannel--cv_trainer_worker_pool_specs_override
taskInfo:
name: automl-tabular-cv-trainer-2
automl-tabular-ensemble-2:
cachingOptions:
enableCache: true
componentRef:
name: comp-automl-tabular-ensemble-2
dependentTasks:
- automl-tabular-cv-trainer-2
inputs:
artifacts:
dataset_schema:
componentInputArtifact: pipelinechannel--tabular-stats-and-example-gen-dataset_schema
instance_baseline:
componentInputArtifact: pipelinechannel--tabular-stats-and-example-gen-instance_baseline
metadata:
componentInputArtifact: pipelinechannel--tabular-stats-and-example-gen-metadata
transform_output:
componentInputArtifact: pipelinechannel--automl-tabular-transform-transform_output
tuning_result_input:
taskOutputArtifact:
outputArtifactKey: tuning_result_output
producerTask: automl-tabular-cv-trainer-2
warmup_data:
componentInputArtifact: pipelinechannel--tabular-stats-and-example-gen-eval_split
parameters:
encryption_spec_key_name:
componentInputParameter: pipelinechannel--encryption_spec_key_name
export_additional_model_without_custom_ops:
componentInputParameter: pipelinechannel--export_additional_model_without_custom_ops
location:
componentInputParameter: pipelinechannel--location
project:
componentInputParameter: pipelinechannel--project
root_dir:
componentInputParameter: pipelinechannel--root_dir
taskInfo:
name: automl-tabular-ensemble-2
automl-tabular-infra-validator-2:
cachingOptions:
enableCache: true
componentRef:
name: comp-automl-tabular-infra-validator-2
dependentTasks:
- automl-tabular-ensemble-2
inputs:
artifacts:
unmanaged_container_model:
taskOutputArtifact:
outputArtifactKey: unmanaged_container_model
producerTask: automl-tabular-ensemble-2
taskInfo:
name: automl-tabular-infra-validator-2
automl-tabular-stage-1-tuner:
cachingOptions:
enableCache: true
componentRef:
name: comp-automl-tabular-stage-1-tuner
dependentTasks:
- calculate-training-parameters-2
inputs:
artifacts:
materialized_eval_split:
componentInputArtifact: pipelinechannel--automl-tabular-transform-materialized_eval_split
materialized_train_split:
componentInputArtifact: pipelinechannel--automl-tabular-transform-materialized_train_split
metadata:
componentInputArtifact: pipelinechannel--tabular-stats-and-example-gen-metadata
transform_output:
componentInputArtifact: pipelinechannel--automl-tabular-transform-transform_output
parameters:
deadline_hours:
taskOutputParameter:
outputParameterKey: stage_1_deadline_hours
producerTask: calculate-training-parameters-2
disable_early_stopping:
componentInputParameter: pipelinechannel--disable_early_stopping
encryption_spec_key_name:
componentInputParameter: pipelinechannel--encryption_spec_key_name
location:
componentInputParameter: pipelinechannel--location
num_parallel_trials:
componentInputParameter: pipelinechannel--stage_1_num_parallel_trials
num_selected_trials:
taskOutputParameter:
outputParameterKey: stage_1_num_selected_trials
producerTask: calculate-training-parameters-2
project:
componentInputParameter: pipelinechannel--project
reduce_search_space_mode:
taskOutputParameter:
outputParameterKey: reduce_search_space_mode
producerTask: calculate-training-parameters-2
root_dir:
componentInputParameter: pipelinechannel--root_dir
single_run_max_secs:
taskOutputParameter:
outputParameterKey: stage_1_single_run_max_secs
producerTask: calculate-training-parameters-2
study_spec_parameters_override:
componentInputParameter: pipelinechannel--study_spec_parameters_override
worker_pool_specs_override_json:
componentInputParameter: pipelinechannel--stage_1_tuner_worker_pool_specs_override
taskInfo:
name: automl-tabular-stage-1-tuner
bool-identity-2:
cachingOptions:
enableCache: true
componentRef:
name: comp-bool-identity-2
inputs:
parameters:
value:
componentInputParameter: pipelinechannel--run_evaluation
taskInfo:
name: bool-identity-2
bool-identity-3:
cachingOptions:
enableCache: true
componentRef:
name: comp-bool-identity-3
inputs:
parameters:
value:
componentInputParameter: pipelinechannel--run_distillation
taskInfo:
name: bool-identity-3
calculate-training-parameters-2:
cachingOptions:
enableCache: true
componentRef:
name: comp-calculate-training-parameters-2
inputs:
parameters:
fast_testing:
componentInputParameter: pipelinechannel--fast_testing
is_skip_architecture_search:
runtimeValue:
constant: false
run_distillation:
componentInputParameter: pipelinechannel--run_distillation
stage_1_num_parallel_trials:
componentInputParameter: pipelinechannel--stage_1_num_parallel_trials
stage_2_num_parallel_trials:
componentInputParameter: pipelinechannel--stage_2_num_parallel_trials
train_budget_milli_node_hours:
componentInputParameter: pipelinechannel--train_budget_milli_node_hours
taskInfo:
name: calculate-training-parameters-2
condition-5:
componentRef:
name: comp-condition-5
dependentTasks:
- automl-tabular-ensemble-2
- bool-identity-2
- bool-identity-3
inputs:
artifacts:
pipelinechannel--automl-tabular-ensemble-2-explanation_metadata_artifact:
taskOutputArtifact:
outputArtifactKey: explanation_metadata_artifact
producerTask: automl-tabular-ensemble-2
pipelinechannel--automl-tabular-ensemble-2-unmanaged_container_model:
taskOutputArtifact:
outputArtifactKey: unmanaged_container_model
producerTask: automl-tabular-ensemble-2
pipelinechannel--parent_model:
componentInputArtifact: pipelinechannel--parent_model
parameters:
pipelinechannel--automl-tabular-ensemble-2-explanation_parameters:
taskOutputParameter:
outputParameterKey: explanation_parameters
producerTask: automl-tabular-ensemble-2
pipelinechannel--bool-identity-2-Output:
taskOutputParameter:
outputParameterKey: Output
producerTask: bool-identity-2
pipelinechannel--bool-identity-3-Output:
taskOutputParameter:
outputParameterKey: Output
producerTask: bool-identity-3
pipelinechannel--dataflow_service_account:
componentInputParameter: pipelinechannel--dataflow_service_account
pipelinechannel--dataflow_subnetwork:
componentInputParameter: pipelinechannel--dataflow_subnetwork
pipelinechannel--dataflow_use_public_ips:
componentInputParameter: pipelinechannel--dataflow_use_public_ips
pipelinechannel--encryption_spec_key_name:
componentInputParameter: pipelinechannel--encryption_spec_key_name
pipelinechannel--evaluation_batch_explain_machine_type:
componentInputParameter: pipelinechannel--evaluation_batch_explain_machine_type
pipelinechannel--evaluation_batch_explain_max_replica_count:
componentInputParameter: pipelinechannel--evaluation_batch_explain_max_replica_count
pipelinechannel--evaluation_batch_explain_starting_replica_count:
componentInputParameter: pipelinechannel--evaluation_batch_explain_starting_replica_count
pipelinechannel--evaluation_batch_predict_machine_type:
componentInputParameter: pipelinechannel--evaluation_batch_predict_machine_type
pipelinechannel--evaluation_batch_predict_max_replica_count:
componentInputParameter: pipelinechannel--evaluation_batch_predict_max_replica_count
pipelinechannel--evaluation_batch_predict_starting_replica_count:
componentInputParameter: pipelinechannel--evaluation_batch_predict_starting_replica_count
pipelinechannel--evaluation_dataflow_disk_size_gb:
componentInputParameter: pipelinechannel--evaluation_dataflow_disk_size_gb
pipelinechannel--evaluation_dataflow_machine_type:
componentInputParameter: pipelinechannel--evaluation_dataflow_machine_type
pipelinechannel--evaluation_dataflow_max_num_workers:
componentInputParameter: pipelinechannel--evaluation_dataflow_max_num_workers
pipelinechannel--evaluation_dataflow_starting_num_workers:
componentInputParameter: pipelinechannel--evaluation_dataflow_starting_num_workers
pipelinechannel--get-model-display-name-model_display_name:
componentInputParameter: pipelinechannel--get-model-display-name-model_display_name
pipelinechannel--location:
componentInputParameter: pipelinechannel--location
pipelinechannel--model_description:
componentInputParameter: pipelinechannel--model_description
pipelinechannel--prediction_type:
componentInputParameter: pipelinechannel--prediction_type
pipelinechannel--project:
componentInputParameter: pipelinechannel--project
pipelinechannel--root_dir:
componentInputParameter: pipelinechannel--root_dir
pipelinechannel--string-not-empty-Output:
componentInputParameter: pipelinechannel--string-not-empty-Output
pipelinechannel--tabular-stats-and-example-gen-downsampled_test_split_json:
componentInputParameter: pipelinechannel--tabular-stats-and-example-gen-downsampled_test_split_json
pipelinechannel--tabular-stats-and-example-gen-test_split_json:
componentInputParameter: pipelinechannel--tabular-stats-and-example-gen-test_split_json
pipelinechannel--target_column:
componentInputParameter: pipelinechannel--target_column
taskInfo:
name: no-distill
triggerPolicy:
condition: inputs.parameter_values['pipelinechannel--bool-identity-3-Output']
== 'false'
condition-7:
componentRef:
name: comp-condition-7
dependentTasks:
- automl-tabular-ensemble-2
- bool-identity-2
- bool-identity-3
- calculate-training-parameters-2
inputs:
artifacts:
pipelinechannel--automl-tabular-ensemble-2-unmanaged_container_model:
taskOutputArtifact:
outputArtifactKey: unmanaged_container_model
producerTask: automl-tabular-ensemble-2
pipelinechannel--tabular-stats-and-example-gen-dataset_schema:
componentInputArtifact: pipelinechannel--tabular-stats-and-example-gen-dataset_schema
pipelinechannel--tabular-stats-and-example-gen-eval_split:
componentInputArtifact: pipelinechannel--tabular-stats-and-example-gen-eval_split
pipelinechannel--tabular-stats-and-example-gen-instance_baseline:
componentInputArtifact: pipelinechannel--tabular-stats-and-example-gen-instance_baseline
pipelinechannel--tabular-stats-and-example-gen-metadata:
componentInputArtifact: pipelinechannel--tabular-stats-and-example-gen-metadata
pipelinechannel--tabular-stats-and-example-gen-test_split:
componentInputArtifact: pipelinechannel--tabular-stats-and-example-gen-test_split
pipelinechannel--tabular-stats-and-example-gen-train_split:
componentInputArtifact: pipelinechannel--tabular-stats-and-example-gen-train_split
parameters:
pipelinechannel--bool-identity-2-Output:
taskOutputParameter:
outputParameterKey: Output
producerTask: bool-identity-2
pipelinechannel--bool-identity-3-Output:
taskOutputParameter:
outputParameterKey: Output
producerTask: bool-identity-3
pipelinechannel--calculate-training-parameters-2-distill_stage_1_deadline_hours:
taskOutputParameter:
outputParameterKey: distill_stage_1_deadline_hours
producerTask: calculate-training-parameters-2
pipelinechannel--calculate-training-parameters-2-reduce_search_space_mode:
taskOutputParameter:
outputParameterKey: reduce_search_space_mode
producerTask: calculate-training-parameters-2
pipelinechannel--calculate-training-parameters-2-stage_1_single_run_max_secs:
taskOutputParameter:
outputParameterKey: stage_1_single_run_max_secs
producerTask: calculate-training-parameters-2
pipelinechannel--dataflow_service_account:
componentInputParameter: pipelinechannel--dataflow_service_account
pipelinechannel--dataflow_subnetwork:
componentInputParameter: pipelinechannel--dataflow_subnetwork
pipelinechannel--dataflow_use_public_ips:
componentInputParameter: pipelinechannel--dataflow_use_public_ips
pipelinechannel--disable_early_stopping:
componentInputParameter: pipelinechannel--disable_early_stopping
pipelinechannel--distill_batch_predict_machine_type:
componentInputParameter: pipelinechannel--distill_batch_predict_machine_type
pipelinechannel--distill_batch_predict_max_replica_count:
componentInputParameter: pipelinechannel--distill_batch_predict_max_replica_count
pipelinechannel--distill_batch_predict_starting_replica_count:
componentInputParameter: pipelinechannel--distill_batch_predict_starting_replica_count
pipelinechannel--encryption_spec_key_name:
componentInputParameter: pipelinechannel--encryption_spec_key_name
pipelinechannel--evaluation_batch_explain_machine_type:
componentInputParameter: pipelinechannel--evaluation_batch_explain_machine_type
pipelinechannel--evaluation_batch_explain_max_replica_count:
componentInputParameter: pipelinechannel--evaluation_batch_explain_max_replica_count
pipelinechannel--evaluation_batch_explain_starting_replica_count:
componentInputParameter: pipelinechannel--evaluation_batch_explain_starting_replica_count
pipelinechannel--evaluation_batch_predict_machine_type:
componentInputParameter: pipelinechannel--evaluation_batch_predict_machine_type
pipelinechannel--evaluation_batch_predict_max_replica_count:
componentInputParameter: pipelinechannel--evaluation_batch_predict_max_replica_count
pipelinechannel--evaluation_batch_predict_starting_replica_count:
componentInputParameter: pipelinechannel--evaluation_batch_predict_starting_replica_count
pipelinechannel--evaluation_dataflow_disk_size_gb:
componentInputParameter: pipelinechannel--evaluation_dataflow_disk_size_gb
pipelinechannel--evaluation_dataflow_machine_type:
componentInputParameter: pipelinechannel--evaluation_dataflow_machine_type
pipelinechannel--evaluation_dataflow_max_num_workers:
componentInputParameter: pipelinechannel--evaluation_dataflow_max_num_workers
pipelinechannel--evaluation_dataflow_starting_num_workers:
componentInputParameter: pipelinechannel--evaluation_dataflow_starting_num_workers
pipelinechannel--export_additional_model_without_custom_ops:
componentInputParameter: pipelinechannel--export_additional_model_without_custom_ops
pipelinechannel--location:
componentInputParameter: pipelinechannel--location
pipelinechannel--prediction_type:
componentInputParameter: pipelinechannel--prediction_type
pipelinechannel--project:
componentInputParameter: pipelinechannel--project
pipelinechannel--root_dir:
componentInputParameter: pipelinechannel--root_dir
pipelinechannel--stage_1_num_parallel_trials:
componentInputParameter: pipelinechannel--stage_1_num_parallel_trials
pipelinechannel--stage_1_tuner_worker_pool_specs_override:
componentInputParameter: pipelinechannel--stage_1_tuner_worker_pool_specs_override
pipelinechannel--string-not-empty-Output:
componentInputParameter: pipelinechannel--string-not-empty-Output
pipelinechannel--tabular-stats-and-example-gen-downsampled_test_split_json:
componentInputParameter: pipelinechannel--tabular-stats-and-example-gen-downsampled_test_split_json
pipelinechannel--tabular-stats-and-example-gen-test_split_json:
componentInputParameter: pipelinechannel--tabular-stats-and-example-gen-test_split_json
pipelinechannel--target_column:
componentInputParameter: pipelinechannel--target_column
pipelinechannel--transform_dataflow_disk_size_gb:
componentInputParameter: pipelinechannel--transform_dataflow_disk_size_gb
pipelinechannel--transform_dataflow_machine_type:
componentInputParameter: pipelinechannel--transform_dataflow_machine_type
pipelinechannel--transform_dataflow_max_num_workers:
componentInputParameter: pipelinechannel--transform_dataflow_max_num_workers
taskInfo:
name: is-distill
triggerPolicy:
condition: inputs.parameter_values['pipelinechannel--bool-identity-3-Output']
== 'true'
inputDefinitions:
artifacts:
pipelinechannel--automl-tabular-transform-materialized_eval_split:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
pipelinechannel--automl-tabular-transform-materialized_train_split:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
pipelinechannel--automl-tabular-transform-transform_output:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
pipelinechannel--merge-materialized-splits-splits:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
pipelinechannel--parent_model:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
pipelinechannel--tabular-stats-and-example-gen-dataset_schema:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
pipelinechannel--tabular-stats-and-example-gen-eval_split:
artifactType:
schemaTitle: system.Dataset
schemaVersion: 0.0.1
pipelinechannel--tabular-stats-and-example-gen-instance_baseline:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
pipelinechannel--tabular-stats-and-example-gen-metadata:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
pipelinechannel--tabular-stats-and-example-gen-test_split:
artifactType:
schemaTitle: system.Dataset
schemaVersion: 0.0.1
pipelinechannel--tabular-stats-and-example-gen-train_split:
artifactType:
schemaTitle: system.Dataset
schemaVersion: 0.0.1
parameters:
pipelinechannel--cv_trainer_worker_pool_specs_override:
parameterType: LIST
pipelinechannel--dataflow_service_account:
parameterType: STRING
pipelinechannel--dataflow_subnetwork:
parameterType: STRING
pipelinechannel--dataflow_use_public_ips:
parameterType: BOOLEAN
pipelinechannel--disable_early_stopping:
parameterType: BOOLEAN
pipelinechannel--distill_batch_predict_machine_type:
parameterType: STRING
pipelinechannel--distill_batch_predict_max_replica_count:
parameterType: NUMBER_INTEGER
pipelinechannel--distill_batch_predict_starting_replica_count:
parameterType: NUMBER_INTEGER
pipelinechannel--encryption_spec_key_name:
parameterType: STRING
pipelinechannel--evaluation_batch_explain_machine_type:
parameterType: STRING
pipelinechannel--evaluation_batch_explain_max_replica_count:
parameterType: NUMBER_INTEGER
pipelinechannel--evaluation_batch_explain_starting_replica_count:
parameterType: NUMBER_INTEGER
pipelinechannel--evaluation_batch_predict_machine_type:
parameterType: STRING
pipelinechannel--evaluation_batch_predict_max_replica_count:
parameterType: NUMBER_INTEGER
pipelinechannel--evaluation_batch_predict_starting_replica_count:
parameterType: NUMBER_INTEGER
pipelinechannel--evaluation_dataflow_disk_size_gb:
parameterType: NUMBER_INTEGER
pipelinechannel--evaluation_dataflow_machine_type:
parameterType: STRING
pipelinechannel--evaluation_dataflow_max_num_workers:
parameterType: NUMBER_INTEGER
pipelinechannel--evaluation_dataflow_starting_num_workers:
parameterType: NUMBER_INTEGER
pipelinechannel--export_additional_model_without_custom_ops:
parameterType: BOOLEAN
pipelinechannel--fast_testing:
parameterType: BOOLEAN
pipelinechannel--get-model-display-name-model_display_name:
parameterType: STRING
pipelinechannel--location:
parameterType: STRING
pipelinechannel--model_description:
parameterType: STRING
pipelinechannel--prediction_type:
parameterType: STRING
pipelinechannel--project:
parameterType: STRING
pipelinechannel--root_dir:
parameterType: STRING
pipelinechannel--run_distillation:
parameterType: BOOLEAN
pipelinechannel--run_evaluation:
parameterType: BOOLEAN
pipelinechannel--stage_1_num_parallel_trials:
parameterType: NUMBER_INTEGER
pipelinechannel--stage_1_tuner_worker_pool_specs_override:
parameterType: LIST
pipelinechannel--stage_2_num_parallel_trials:
parameterType: NUMBER_INTEGER
pipelinechannel--stage_2_num_selected_trials:
parameterType: NUMBER_INTEGER
pipelinechannel--string-not-empty-Output:
parameterType: STRING
pipelinechannel--study_spec_parameters_override:
parameterType: LIST
pipelinechannel--tabular-stats-and-example-gen-downsampled_test_split_json:
parameterType: LIST
pipelinechannel--tabular-stats-and-example-gen-test_split_json:
parameterType: LIST
pipelinechannel--target_column:
parameterType: STRING
pipelinechannel--train_budget_milli_node_hours:
parameterType: NUMBER_DOUBLE
pipelinechannel--transform_dataflow_disk_size_gb:
parameterType: NUMBER_INTEGER
pipelinechannel--transform_dataflow_machine_type:
parameterType: STRING
pipelinechannel--transform_dataflow_max_num_workers:
parameterType: NUMBER_INTEGER
outputDefinitions:
artifacts:
feature-attribution-2-feature_attributions:
artifactType:
schemaTitle: system.Metrics
schemaVersion: 0.0.1
feature-attribution-3-feature_attributions:
artifactType:
schemaTitle: system.Metrics
schemaVersion: 0.0.1
model-evaluation-2-evaluation_metrics:
artifactType:
schemaTitle: system.Metrics
schemaVersion: 0.0.1
model-evaluation-3-evaluation_metrics:
artifactType:
schemaTitle: system.Metrics
schemaVersion: 0.0.1
comp-condition-5:
dag:
outputs:
artifacts:
feature-attribution-2-feature_attributions:
artifactSelectors:
- outputArtifactKey: feature-attribution-2-feature_attributions
producerSubtask: condition-6
model-evaluation-2-evaluation_metrics:
artifactSelectors:
- outputArtifactKey: model-evaluation-2-evaluation_metrics
producerSubtask: condition-6
tasks:
condition-6:
componentRef:
name: comp-condition-6
dependentTasks:
- model-upload-2
inputs:
artifacts:
pipelinechannel--automl-tabular-ensemble-2-explanation_metadata_artifact:
componentInputArtifact: pipelinechannel--automl-tabular-ensemble-2-explanation_metadata_artifact
pipelinechannel--automl-tabular-ensemble-2-unmanaged_container_model:
componentInputArtifact: pipelinechannel--automl-tabular-ensemble-2-unmanaged_container_model
pipelinechannel--model-upload-2-model:
taskOutputArtifact:
outputArtifactKey: model
producerTask: model-upload-2
parameters:
pipelinechannel--automl-tabular-ensemble-2-explanation_parameters:
componentInputParameter: pipelinechannel--automl-tabular-ensemble-2-explanation_parameters
pipelinechannel--bool-identity-2-Output:
componentInputParameter: pipelinechannel--bool-identity-2-Output
pipelinechannel--bool-identity-3-Output:
componentInputParameter: pipelinechannel--bool-identity-3-Output
pipelinechannel--dataflow_service_account:
componentInputParameter: pipelinechannel--dataflow_service_account
pipelinechannel--dataflow_subnetwork:
componentInputParameter: pipelinechannel--dataflow_subnetwork
pipelinechannel--dataflow_use_public_ips:
componentInputParameter: pipelinechannel--dataflow_use_public_ips
pipelinechannel--encryption_spec_key_name:
componentInputParameter: pipelinechannel--encryption_spec_key_name
pipelinechannel--evaluation_batch_explain_machine_type:
componentInputParameter: pipelinechannel--evaluation_batch_explain_machine_type
pipelinechannel--evaluation_batch_explain_max_replica_count:
componentInputParameter: pipelinechannel--evaluation_batch_explain_max_replica_count
pipelinechannel--evaluation_batch_explain_starting_replica_count:
componentInputParameter: pipelinechannel--evaluation_batch_explain_starting_replica_count
pipelinechannel--evaluation_batch_predict_machine_type:
componentInputParameter: pipelinechannel--evaluation_batch_predict_machine_type
pipelinechannel--evaluation_batch_predict_max_replica_count:
componentInputParameter: pipelinechannel--evaluation_batch_predict_max_replica_count
pipelinechannel--evaluation_batch_predict_starting_replica_count:
componentInputParameter: pipelinechannel--evaluation_batch_predict_starting_replica_count
pipelinechannel--evaluation_dataflow_disk_size_gb:
componentInputParameter: pipelinechannel--evaluation_dataflow_disk_size_gb
pipelinechannel--evaluation_dataflow_machine_type:
componentInputParameter: pipelinechannel--evaluation_dataflow_machine_type
pipelinechannel--evaluation_dataflow_max_num_workers:
componentInputParameter: pipelinechannel--evaluation_dataflow_max_num_workers
pipelinechannel--evaluation_dataflow_starting_num_workers:
componentInputParameter: pipelinechannel--evaluation_dataflow_starting_num_workers
pipelinechannel--location:
componentInputParameter: pipelinechannel--location
pipelinechannel--prediction_type:
componentInputParameter: pipelinechannel--prediction_type
pipelinechannel--project:
componentInputParameter: pipelinechannel--project
pipelinechannel--root_dir:
componentInputParameter: pipelinechannel--root_dir
pipelinechannel--string-not-empty-Output:
componentInputParameter: pipelinechannel--string-not-empty-Output
pipelinechannel--tabular-stats-and-example-gen-downsampled_test_split_json:
componentInputParameter: pipelinechannel--tabular-stats-and-example-gen-downsampled_test_split_json
pipelinechannel--tabular-stats-and-example-gen-test_split_json:
componentInputParameter: pipelinechannel--tabular-stats-and-example-gen-test_split_json
pipelinechannel--target_column:
componentInputParameter: pipelinechannel--target_column
taskInfo:
name: is-evaluation
triggerPolicy:
condition: inputs.parameter_values['pipelinechannel--bool-identity-2-Output']
== 'true'
model-upload-2:
cachingOptions:
enableCache: true
componentRef:
name: comp-model-upload-2
inputs:
artifacts:
explanation_metadata_artifact:
componentInputArtifact: pipelinechannel--automl-tabular-ensemble-2-explanation_metadata_artifact
parent_model:
componentInputArtifact: pipelinechannel--parent_model
unmanaged_container_model:
componentInputArtifact: pipelinechannel--automl-tabular-ensemble-2-unmanaged_container_model
parameters:
description:
componentInputParameter: pipelinechannel--model_description
display_name:
componentInputParameter: pipelinechannel--get-model-display-name-model_display_name
encryption_spec_key_name:
componentInputParameter: pipelinechannel--encryption_spec_key_name
explanation_parameters:
componentInputParameter: pipelinechannel--automl-tabular-ensemble-2-explanation_parameters
location:
componentInputParameter: pipelinechannel--location
project:
componentInputParameter: pipelinechannel--project
taskInfo:
name: model-upload-2
inputDefinitions:
artifacts:
pipelinechannel--automl-tabular-ensemble-2-explanation_metadata_artifact:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
pipelinechannel--automl-tabular-ensemble-2-unmanaged_container_model:
artifactType:
schemaTitle: google.UnmanagedContainerModel
schemaVersion: 0.0.1
pipelinechannel--parent_model:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
parameters:
pipelinechannel--automl-tabular-ensemble-2-explanation_parameters:
parameterType: STRUCT
pipelinechannel--bool-identity-2-Output:
parameterType: STRING
pipelinechannel--bool-identity-3-Output:
parameterType: STRING
pipelinechannel--dataflow_service_account:
parameterType: STRING
pipelinechannel--dataflow_subnetwork:
parameterType: STRING
pipelinechannel--dataflow_use_public_ips:
parameterType: BOOLEAN
pipelinechannel--encryption_spec_key_name:
parameterType: STRING
pipelinechannel--evaluation_batch_explain_machine_type:
parameterType: STRING
pipelinechannel--evaluation_batch_explain_max_replica_count:
parameterType: NUMBER_INTEGER
pipelinechannel--evaluation_batch_explain_starting_replica_count:
parameterType: NUMBER_INTEGER
pipelinechannel--evaluation_batch_predict_machine_type:
parameterType: STRING
pipelinechannel--evaluation_batch_predict_max_replica_count:
parameterType: NUMBER_INTEGER
pipelinechannel--evaluation_batch_predict_starting_replica_count:
parameterType: NUMBER_INTEGER
pipelinechannel--evaluation_dataflow_disk_size_gb:
parameterType: NUMBER_INTEGER
pipelinechannel--evaluation_dataflow_machine_type:
parameterType: STRING
pipelinechannel--evaluation_dataflow_max_num_workers:
parameterType: NUMBER_INTEGER
pipelinechannel--evaluation_dataflow_starting_num_workers:
parameterType: NUMBER_INTEGER
pipelinechannel--get-model-display-name-model_display_name:
parameterType: STRING
pipelinechannel--location:
parameterType: STRING
pipelinechannel--model_description:
parameterType: STRING
pipelinechannel--prediction_type:
parameterType: STRING
pipelinechannel--project:
parameterType: STRING
pipelinechannel--root_dir:
parameterType: STRING
pipelinechannel--string-not-empty-Output:
parameterType: STRING
pipelinechannel--tabular-stats-and-example-gen-downsampled_test_split_json:
parameterType: LIST
pipelinechannel--tabular-stats-and-example-gen-test_split_json:
parameterType: LIST
pipelinechannel--target_column:
parameterType: STRING
outputDefinitions:
artifacts:
feature-attribution-2-feature_attributions:
artifactType:
schemaTitle: system.Metrics
schemaVersion: 0.0.1
model-evaluation-2-evaluation_metrics:
artifactType:
schemaTitle: system.Metrics
schemaVersion: 0.0.1
comp-condition-6:
dag:
outputs:
artifacts:
feature-attribution-2-feature_attributions:
artifactSelectors:
- outputArtifactKey: feature_attributions
producerSubtask: feature-attribution-2
model-evaluation-2-evaluation_metrics:
artifactSelectors:
- outputArtifactKey: evaluation_metrics
producerSubtask: model-evaluation-2
tasks:
feature-attribution-2:
cachingOptions:
enableCache: true
componentRef:
name: comp-feature-attribution-2
dependentTasks:
- model-batch-explanation-2
inputs:
artifacts:
predictions_gcs_source:
taskOutputArtifact:
outputArtifactKey: gcs_output_directory
producerTask: model-batch-explanation-2
parameters:
dataflow_disk_size_gb:
componentInputParameter: pipelinechannel--evaluation_dataflow_disk_size_gb
dataflow_machine_type:
componentInputParameter: pipelinechannel--evaluation_dataflow_machine_type
dataflow_max_workers_num:
componentInputParameter: pipelinechannel--evaluation_dataflow_max_num_workers
dataflow_service_account:
componentInputParameter: pipelinechannel--dataflow_service_account
dataflow_subnetwork:
componentInputParameter: pipelinechannel--dataflow_subnetwork
dataflow_use_public_ips:
componentInputParameter: pipelinechannel--dataflow_use_public_ips
dataflow_workers_num:
componentInputParameter: pipelinechannel--evaluation_dataflow_starting_num_workers
encryption_spec_key_name:
componentInputParameter: pipelinechannel--encryption_spec_key_name
force_runner_mode:
runtimeValue:
constant: Dataflow
location:
componentInputParameter: pipelinechannel--location
predictions_format:
runtimeValue:
constant: jsonl
problem_type:
componentInputParameter: pipelinechannel--prediction_type
project:
componentInputParameter: pipelinechannel--project
taskInfo:
name: feature-attribution-2
model-batch-explanation-2:
cachingOptions:
enableCache: true
componentRef:
name: comp-model-batch-explanation-2
inputs:
artifacts:
explanation_metadata_artifact:
componentInputArtifact: pipelinechannel--automl-tabular-ensemble-2-explanation_metadata_artifact
unmanaged_container_model:
componentInputArtifact: pipelinechannel--automl-tabular-ensemble-2-unmanaged_container_model
parameters:
encryption_spec_key_name:
componentInputParameter: pipelinechannel--encryption_spec_key_name
explanation_parameters:
componentInputParameter: pipelinechannel--automl-tabular-ensemble-2-explanation_parameters
gcs_destination_output_uri_prefix:
componentInputParameter: pipelinechannel--root_dir
gcs_source_uris:
componentInputParameter: pipelinechannel--tabular-stats-and-example-gen-downsampled_test_split_json
generate_explanation:
runtimeValue:
constant: true
instances_format:
runtimeValue:
constant: tf-record
job_display_name:
runtimeValue:
constant: batch-explain-evaluation-{{$.pipeline_job_uuid}}-{{$.pipeline_task_uuid}}
location:
componentInputParameter: pipelinechannel--location
machine_type:
componentInputParameter: pipelinechannel--evaluation_batch_explain_machine_type
max_replica_count:
componentInputParameter: pipelinechannel--evaluation_batch_explain_max_replica_count
predictions_format:
runtimeValue:
constant: jsonl
project:
componentInputParameter: pipelinechannel--project
starting_replica_count:
componentInputParameter: pipelinechannel--evaluation_batch_explain_starting_replica_count
taskInfo:
name: model-batch-explanation-2
model-batch-predict-2:
cachingOptions:
enableCache: true
componentRef:
name: comp-model-batch-predict-2
inputs:
artifacts:
unmanaged_container_model:
componentInputArtifact: pipelinechannel--automl-tabular-ensemble-2-unmanaged_container_model
parameters:
encryption_spec_key_name:
componentInputParameter: pipelinechannel--encryption_spec_key_name
gcs_destination_output_uri_prefix:
componentInputParameter: pipelinechannel--root_dir
gcs_source_uris:
componentInputParameter: pipelinechannel--tabular-stats-and-example-gen-test_split_json
instances_format:
runtimeValue:
constant: tf-record
job_display_name:
runtimeValue:
constant: batch-predict-evaluation-{{$.pipeline_job_uuid}}-{{$.pipeline_task_uuid}}
location:
componentInputParameter: pipelinechannel--location
machine_type:
componentInputParameter: pipelinechannel--evaluation_batch_predict_machine_type
max_replica_count:
componentInputParameter: pipelinechannel--evaluation_batch_predict_max_replica_count
predictions_format:
runtimeValue:
constant: jsonl
project:
componentInputParameter: pipelinechannel--project
starting_replica_count:
componentInputParameter: pipelinechannel--evaluation_batch_predict_starting_replica_count
taskInfo:
name: model-batch-predict-2
model-evaluation-2:
cachingOptions:
enableCache: true
componentRef:
name: comp-model-evaluation-2
dependentTasks:
- model-batch-predict-2
inputs:
artifacts:
batch_prediction_job:
taskOutputArtifact:
outputArtifactKey: batchpredictionjob
producerTask: model-batch-predict-2
parameters:
dataflow_disk_size:
componentInputParameter: pipelinechannel--evaluation_dataflow_disk_size_gb
dataflow_machine_type:
componentInputParameter: pipelinechannel--evaluation_dataflow_machine_type
dataflow_max_workers_num:
componentInputParameter: pipelinechannel--evaluation_dataflow_max_num_workers
dataflow_service_account:
componentInputParameter: pipelinechannel--dataflow_service_account
dataflow_subnetwork:
componentInputParameter: pipelinechannel--dataflow_subnetwork
dataflow_use_public_ips:
componentInputParameter: pipelinechannel--dataflow_use_public_ips
dataflow_workers_num:
componentInputParameter: pipelinechannel--evaluation_dataflow_starting_num_workers
encryption_spec_key_name:
componentInputParameter: pipelinechannel--encryption_spec_key_name
ground_truth_column:
componentInputParameter: pipelinechannel--target_column
ground_truth_format:
runtimeValue:
constant: jsonl
location:
componentInputParameter: pipelinechannel--location
prediction_label_column:
runtimeValue:
constant: ''
prediction_score_column:
runtimeValue:
constant: ''
predictions_format:
runtimeValue:
constant: jsonl
problem_type:
componentInputParameter: pipelinechannel--prediction_type
project:
componentInputParameter: pipelinechannel--project
root_dir:
componentInputParameter: pipelinechannel--root_dir
taskInfo:
name: model-evaluation-2
model-evaluation-import-2:
cachingOptions:
enableCache: true
componentRef:
name: comp-model-evaluation-import-2
dependentTasks:
- feature-attribution-2
- model-evaluation-2
inputs:
artifacts:
feature_attributions:
taskOutputArtifact:
outputArtifactKey: feature_attributions
producerTask: feature-attribution-2
metrics:
taskOutputArtifact:
outputArtifactKey: evaluation_metrics
producerTask: model-evaluation-2
model:
componentInputArtifact: pipelinechannel--model-upload-2-model
parameters:
dataset_paths:
componentInputParameter: pipelinechannel--tabular-stats-and-example-gen-test_split_json
dataset_type:
runtimeValue:
constant: tf-record
display_name:
runtimeValue:
constant: AutoML Tabular
problem_type:
componentInputParameter: pipelinechannel--prediction_type
taskInfo:
name: model-evaluation-import-2
inputDefinitions:
artifacts:
pipelinechannel--automl-tabular-ensemble-2-explanation_metadata_artifact:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
pipelinechannel--automl-tabular-ensemble-2-unmanaged_container_model:
artifactType:
schemaTitle: google.UnmanagedContainerModel
schemaVersion: 0.0.1
pipelinechannel--model-upload-2-model:
artifactType:
schemaTitle: google.VertexModel
schemaVersion: 0.0.1
parameters:
pipelinechannel--automl-tabular-ensemble-2-explanation_parameters:
parameterType: STRUCT
pipelinechannel--bool-identity-2-Output:
parameterType: STRING
pipelinechannel--bool-identity-3-Output:
parameterType: STRING
pipelinechannel--dataflow_service_account:
parameterType: STRING
pipelinechannel--dataflow_subnetwork:
parameterType: STRING
pipelinechannel--dataflow_use_public_ips:
parameterType: BOOLEAN
pipelinechannel--encryption_spec_key_name:
parameterType: STRING
pipelinechannel--evaluation_batch_explain_machine_type:
parameterType: STRING
pipelinechannel--evaluation_batch_explain_max_replica_count:
parameterType: NUMBER_INTEGER
pipelinechannel--evaluation_batch_explain_starting_replica_count:
parameterType: NUMBER_INTEGER
pipelinechannel--evaluation_batch_predict_machine_type:
parameterType: STRING
pipelinechannel--evaluation_batch_predict_max_replica_count:
parameterType: NUMBER_INTEGER
pipelinechannel--evaluation_batch_predict_starting_replica_count:
parameterType: NUMBER_INTEGER
pipelinechannel--evaluation_dataflow_disk_size_gb:
parameterType: NUMBER_INTEGER
pipelinechannel--evaluation_dataflow_machine_type:
parameterType: STRING
pipelinechannel--evaluation_dataflow_max_num_workers:
parameterType: NUMBER_INTEGER
pipelinechannel--evaluation_dataflow_starting_num_workers:
parameterType: NUMBER_INTEGER
pipelinechannel--location:
parameterType: STRING
pipelinechannel--prediction_type:
parameterType: STRING
pipelinechannel--project:
parameterType: STRING
pipelinechannel--root_dir:
parameterType: STRING
pipelinechannel--string-not-empty-Output:
parameterType: STRING
pipelinechannel--tabular-stats-and-example-gen-downsampled_test_split_json:
parameterType: LIST
pipelinechannel--tabular-stats-and-example-gen-test_split_json:
parameterType: LIST
pipelinechannel--target_column:
parameterType: STRING
outputDefinitions:
artifacts:
feature-attribution-2-feature_attributions:
artifactType:
schemaTitle: system.Metrics
schemaVersion: 0.0.1
model-evaluation-2-evaluation_metrics:
artifactType:
schemaTitle: system.Metrics
schemaVersion: 0.0.1
comp-condition-7:
dag:
outputs:
artifacts:
feature-attribution-3-feature_attributions:
artifactSelectors:
- outputArtifactKey: feature-attribution-3-feature_attributions
producerSubtask: condition-8
model-evaluation-3-evaluation_metrics:
artifactSelectors:
- outputArtifactKey: model-evaluation-3-evaluation_metrics
producerSubtask: condition-8
tasks:
automl-tabular-ensemble-3:
cachingOptions:
enableCache: true
componentRef:
name: comp-automl-tabular-ensemble-3
dependentTasks:
- automl-tabular-stage-1-tuner-2
- automl-tabular-transform-2
inputs:
artifacts:
dataset_schema:
componentInputArtifact: pipelinechannel--tabular-stats-and-example-gen-dataset_schema
instance_baseline:
componentInputArtifact: pipelinechannel--tabular-stats-and-example-gen-instance_baseline
metadata:
componentInputArtifact: pipelinechannel--tabular-stats-and-example-gen-metadata
transform_output:
taskOutputArtifact:
outputArtifactKey: transform_output
producerTask: automl-tabular-transform-2
tuning_result_input:
taskOutputArtifact:
outputArtifactKey: tuning_result_output
producerTask: automl-tabular-stage-1-tuner-2
warmup_data:
componentInputArtifact: pipelinechannel--tabular-stats-and-example-gen-eval_split
parameters:
encryption_spec_key_name:
componentInputParameter: pipelinechannel--encryption_spec_key_name
export_additional_model_without_custom_ops:
componentInputParameter: pipelinechannel--export_additional_model_without_custom_ops
location:
componentInputParameter: pipelinechannel--location
project:
componentInputParameter: pipelinechannel--project
root_dir:
componentInputParameter: pipelinechannel--root_dir
taskInfo:
name: automl-tabular-ensemble-3
automl-tabular-infra-validator-3:
cachingOptions:
enableCache: true
componentRef:
name: comp-automl-tabular-infra-validator-3
dependentTasks:
- automl-tabular-ensemble-3
inputs:
artifacts:
unmanaged_container_model:
taskOutputArtifact:
outputArtifactKey: unmanaged_container_model
producerTask: automl-tabular-ensemble-3
taskInfo:
name: automl-tabular-infra-validator-3
automl-tabular-stage-1-tuner-2:
cachingOptions:
enableCache: true
componentRef:
name: comp-automl-tabular-stage-1-tuner-2
dependentTasks:
- automl-tabular-transform-2
inputs:
artifacts:
materialized_eval_split:
taskOutputArtifact:
outputArtifactKey: materialized_eval_split
producerTask: automl-tabular-transform-2
materialized_train_split:
taskOutputArtifact:
outputArtifactKey: materialized_train_split
producerTask: automl-tabular-transform-2
metadata:
componentInputArtifact: pipelinechannel--tabular-stats-and-example-gen-metadata
transform_output:
taskOutputArtifact:
outputArtifactKey: transform_output
producerTask: automl-tabular-transform-2
parameters:
deadline_hours:
componentInputParameter: pipelinechannel--calculate-training-parameters-2-distill_stage_1_deadline_hours
disable_early_stopping:
componentInputParameter: pipelinechannel--disable_early_stopping
encryption_spec_key_name:
componentInputParameter: pipelinechannel--encryption_spec_key_name
location:
componentInputParameter: pipelinechannel--location
num_parallel_trials:
componentInputParameter: pipelinechannel--stage_1_num_parallel_trials
num_selected_trials:
runtimeValue:
constant: 1.0
project:
componentInputParameter: pipelinechannel--project
reduce_search_space_mode:
componentInputParameter: pipelinechannel--calculate-training-parameters-2-reduce_search_space_mode
root_dir:
componentInputParameter: pipelinechannel--root_dir
run_distillation:
runtimeValue:
constant: true
single_run_max_secs:
componentInputParameter: pipelinechannel--calculate-training-parameters-2-stage_1_single_run_max_secs
worker_pool_specs_override_json:
componentInputParameter: pipelinechannel--stage_1_tuner_worker_pool_specs_override
taskInfo:
name: automl-tabular-stage-1-tuner-2
automl-tabular-transform-2:
cachingOptions:
enableCache: true
componentRef:
name: comp-automl-tabular-transform-2
dependentTasks:
- write-bp-result-path
- write-bp-result-path-2
inputs:
artifacts:
dataset_schema:
componentInputArtifact: pipelinechannel--tabular-stats-and-example-gen-dataset_schema
eval_split:
taskOutputArtifact:
outputArtifactKey: result
producerTask: write-bp-result-path-2
metadata:
componentInputArtifact: pipelinechannel--tabular-stats-and-example-gen-metadata
test_split:
componentInputArtifact: pipelinechannel--tabular-stats-and-example-gen-test_split
train_split:
taskOutputArtifact:
outputArtifactKey: result
producerTask: write-bp-result-path
parameters:
dataflow_disk_size_gb:
componentInputParameter: pipelinechannel--transform_dataflow_disk_size_gb
dataflow_machine_type:
componentInputParameter: pipelinechannel--transform_dataflow_machine_type
dataflow_max_num_workers:
componentInputParameter: pipelinechannel--transform_dataflow_max_num_workers
dataflow_service_account:
componentInputParameter: pipelinechannel--dataflow_service_account
dataflow_subnetwork:
componentInputParameter: pipelinechannel--dataflow_subnetwork
encryption_spec_key_name:
componentInputParameter: pipelinechannel--encryption_spec_key_name
location:
componentInputParameter: pipelinechannel--location
project:
componentInputParameter: pipelinechannel--project
root_dir:
componentInputParameter: pipelinechannel--root_dir
taskInfo:
name: automl-tabular-transform-2
condition-8:
componentRef:
name: comp-condition-8
dependentTasks:
- automl-tabular-ensemble-3
- model-upload-3
inputs:
artifacts:
pipelinechannel--automl-tabular-ensemble-3-explanation_metadata_artifact:
taskOutputArtifact:
outputArtifactKey: explanation_metadata_artifact
producerTask: automl-tabular-ensemble-3
pipelinechannel--automl-tabular-ensemble-3-unmanaged_container_model:
taskOutputArtifact:
outputArtifactKey: unmanaged_container_model
producerTask: automl-tabular-ensemble-3
pipelinechannel--model-upload-3-model:
taskOutputArtifact:
outputArtifactKey: model
producerTask: model-upload-3
parameters:
pipelinechannel--automl-tabular-ensemble-3-explanation_parameters:
taskOutputParameter:
outputParameterKey: explanation_parameters
producerTask: automl-tabular-ensemble-3
pipelinechannel--bool-identity-2-Output:
componentInputParameter: pipelinechannel--bool-identity-2-Output
pipelinechannel--bool-identity-3-Output:
componentInputParameter: pipelinechannel--bool-identity-3-Output
pipelinechannel--dataflow_service_account:
componentInputParameter: pipelinechannel--dataflow_service_account
pipelinechannel--dataflow_subnetwork:
componentInputParameter: pipelinechannel--dataflow_subnetwork
pipelinechannel--dataflow_use_public_ips:
componentInputParameter: pipelinechannel--dataflow_use_public_ips
pipelinechannel--encryption_spec_key_name:
componentInputParameter: pipelinechannel--encryption_spec_key_name
pipelinechannel--evaluation_batch_explain_machine_type:
componentInputParameter: pipelinechannel--evaluation_batch_explain_machine_type
pipelinechannel--evaluation_batch_explain_max_replica_count:
componentInputParameter: pipelinechannel--evaluation_batch_explain_max_replica_count
pipelinechannel--evaluation_batch_explain_starting_replica_count:
componentInputParameter: pipelinechannel--evaluation_batch_explain_starting_replica_count
pipelinechannel--evaluation_batch_predict_machine_type:
componentInputParameter: pipelinechannel--evaluation_batch_predict_machine_type
pipelinechannel--evaluation_batch_predict_max_replica_count:
componentInputParameter: pipelinechannel--evaluation_batch_predict_max_replica_count
pipelinechannel--evaluation_batch_predict_starting_replica_count:
componentInputParameter: pipelinechannel--evaluation_batch_predict_starting_replica_count
pipelinechannel--evaluation_dataflow_disk_size_gb:
componentInputParameter: pipelinechannel--evaluation_dataflow_disk_size_gb
pipelinechannel--evaluation_dataflow_machine_type:
componentInputParameter: pipelinechannel--evaluation_dataflow_machine_type
pipelinechannel--evaluation_dataflow_max_num_workers:
componentInputParameter: pipelinechannel--evaluation_dataflow_max_num_workers
pipelinechannel--evaluation_dataflow_starting_num_workers:
componentInputParameter: pipelinechannel--evaluation_dataflow_starting_num_workers
pipelinechannel--location:
componentInputParameter: pipelinechannel--location
pipelinechannel--prediction_type:
componentInputParameter: pipelinechannel--prediction_type
pipelinechannel--project:
componentInputParameter: pipelinechannel--project
pipelinechannel--root_dir:
componentInputParameter: pipelinechannel--root_dir
pipelinechannel--string-not-empty-Output:
componentInputParameter: pipelinechannel--string-not-empty-Output
pipelinechannel--tabular-stats-and-example-gen-downsampled_test_split_json:
componentInputParameter: pipelinechannel--tabular-stats-and-example-gen-downsampled_test_split_json
pipelinechannel--tabular-stats-and-example-gen-test_split_json:
componentInputParameter: pipelinechannel--tabular-stats-and-example-gen-test_split_json
pipelinechannel--target_column:
componentInputParameter: pipelinechannel--target_column
taskInfo:
name: is-evaluation
triggerPolicy:
condition: inputs.parameter_values['pipelinechannel--bool-identity-2-Output']
== 'true'
model-batch-predict-3:
cachingOptions:
enableCache: true
componentRef:
name: comp-model-batch-predict-3
dependentTasks:
- read-input-uri
inputs:
artifacts:
unmanaged_container_model:
componentInputArtifact: pipelinechannel--automl-tabular-ensemble-2-unmanaged_container_model
parameters:
encryption_spec_key_name:
componentInputParameter: pipelinechannel--encryption_spec_key_name
gcs_destination_output_uri_prefix:
componentInputParameter: pipelinechannel--root_dir
gcs_source_uris:
taskOutputParameter:
outputParameterKey: Output
producerTask: read-input-uri
instances_format:
runtimeValue:
constant: tf-record
job_display_name:
runtimeValue:
constant: batch-predict-train-split
location:
componentInputParameter: pipelinechannel--location
machine_type:
componentInputParameter: pipelinechannel--distill_batch_predict_machine_type
max_replica_count:
componentInputParameter: pipelinechannel--distill_batch_predict_max_replica_count
predictions_format:
runtimeValue:
constant: tf-record
project:
componentInputParameter: pipelinechannel--project
starting_replica_count:
componentInputParameter: pipelinechannel--distill_batch_predict_starting_replica_count
taskInfo:
name: model-batch-predict-3
model-batch-predict-4:
cachingOptions:
enableCache: true
componentRef:
name: comp-model-batch-predict-4
dependentTasks:
- read-input-uri-2
inputs:
artifacts:
unmanaged_container_model:
componentInputArtifact: pipelinechannel--automl-tabular-ensemble-2-unmanaged_container_model
parameters:
encryption_spec_key_name:
componentInputParameter: pipelinechannel--encryption_spec_key_name
gcs_destination_output_uri_prefix:
componentInputParameter: pipelinechannel--root_dir
gcs_source_uris:
taskOutputParameter:
outputParameterKey: Output
producerTask: read-input-uri-2
instances_format:
runtimeValue:
constant: tf-record
job_display_name:
runtimeValue:
constant: batch-predict-eval-split
location:
componentInputParameter: pipelinechannel--location
machine_type:
componentInputParameter: pipelinechannel--distill_batch_predict_machine_type
max_replica_count:
componentInputParameter: pipelinechannel--distill_batch_predict_max_replica_count
predictions_format:
runtimeValue:
constant: tf-record
project:
componentInputParameter: pipelinechannel--project
starting_replica_count:
componentInputParameter: pipelinechannel--distill_batch_predict_starting_replica_count
taskInfo:
name: model-batch-predict-4
model-upload-3:
cachingOptions:
enableCache: true
componentRef:
name: comp-model-upload-3
dependentTasks:
- automl-tabular-ensemble-3
- automl-tabular-infra-validator-3
inputs:
artifacts:
explanation_metadata_artifact:
taskOutputArtifact:
outputArtifactKey: explanation_metadata_artifact
producerTask: automl-tabular-ensemble-3
unmanaged_container_model:
taskOutputArtifact:
outputArtifactKey: unmanaged_container_model
producerTask: automl-tabular-ensemble-3
parameters:
display_name:
runtimeValue:
constant: automl-tabular-distill-model-upload-{{$.pipeline_job_uuid}}-{{$.pipeline_task_uuid}}
encryption_spec_key_name:
componentInputParameter: pipelinechannel--encryption_spec_key_name
explanation_parameters:
taskOutputParameter:
outputParameterKey: explanation_parameters
producerTask: automl-tabular-ensemble-3
location:
componentInputParameter: pipelinechannel--location
project:
componentInputParameter: pipelinechannel--project
taskInfo:
name: model-upload-3
read-input-uri:
cachingOptions:
enableCache: true
componentRef:
name: comp-read-input-uri
inputs:
artifacts:
split_uri:
componentInputArtifact: pipelinechannel--tabular-stats-and-example-gen-train_split
taskInfo:
name: read-input-uri
read-input-uri-2:
cachingOptions:
enableCache: true
componentRef:
name: comp-read-input-uri-2
inputs:
artifacts:
split_uri:
componentInputArtifact: pipelinechannel--tabular-stats-and-example-gen-eval_split
taskInfo:
name: read-input-uri-2
write-bp-result-path:
cachingOptions:
enableCache: true
componentRef:
name: comp-write-bp-result-path
dependentTasks:
- model-batch-predict-3
inputs:
artifacts:
bp_job:
taskOutputArtifact:
outputArtifactKey: batchpredictionjob
producerTask: model-batch-predict-3
taskInfo:
name: write-bp-result-path
write-bp-result-path-2:
cachingOptions:
enableCache: true
componentRef:
name: comp-write-bp-result-path-2
dependentTasks:
- model-batch-predict-4
inputs:
artifacts:
bp_job:
taskOutputArtifact:
outputArtifactKey: batchpredictionjob
producerTask: model-batch-predict-4
taskInfo:
name: write-bp-result-path-2
inputDefinitions:
artifacts:
pipelinechannel--automl-tabular-ensemble-2-unmanaged_container_model:
artifactType:
schemaTitle: google.UnmanagedContainerModel
schemaVersion: 0.0.1
pipelinechannel--tabular-stats-and-example-gen-dataset_schema:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
pipelinechannel--tabular-stats-and-example-gen-eval_split:
artifactType:
schemaTitle: system.Dataset
schemaVersion: 0.0.1
pipelinechannel--tabular-stats-and-example-gen-instance_baseline:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
pipelinechannel--tabular-stats-and-example-gen-metadata:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
pipelinechannel--tabular-stats-and-example-gen-test_split:
artifactType:
schemaTitle: system.Dataset
schemaVersion: 0.0.1
pipelinechannel--tabular-stats-and-example-gen-train_split:
artifactType:
schemaTitle: system.Dataset
schemaVersion: 0.0.1
parameters:
pipelinechannel--bool-identity-2-Output:
parameterType: STRING
pipelinechannel--bool-identity-3-Output:
parameterType: STRING
pipelinechannel--calculate-training-parameters-2-distill_stage_1_deadline_hours:
parameterType: NUMBER_DOUBLE
pipelinechannel--calculate-training-parameters-2-reduce_search_space_mode:
parameterType: STRING
pipelinechannel--calculate-training-parameters-2-stage_1_single_run_max_secs:
parameterType: NUMBER_INTEGER
pipelinechannel--dataflow_service_account:
parameterType: STRING
pipelinechannel--dataflow_subnetwork:
parameterType: STRING
pipelinechannel--dataflow_use_public_ips:
parameterType: BOOLEAN
pipelinechannel--disable_early_stopping:
parameterType: BOOLEAN
pipelinechannel--distill_batch_predict_machine_type:
parameterType: STRING
pipelinechannel--distill_batch_predict_max_replica_count:
parameterType: NUMBER_INTEGER
pipelinechannel--distill_batch_predict_starting_replica_count:
parameterType: NUMBER_INTEGER
pipelinechannel--encryption_spec_key_name:
parameterType: STRING
pipelinechannel--evaluation_batch_explain_machine_type:
parameterType: STRING
pipelinechannel--evaluation_batch_explain_max_replica_count:
parameterType: NUMBER_INTEGER
pipelinechannel--evaluation_batch_explain_starting_replica_count:
parameterType: NUMBER_INTEGER
pipelinechannel--evaluation_batch_predict_machine_type:
parameterType: STRING
pipelinechannel--evaluation_batch_predict_max_replica_count:
parameterType: NUMBER_INTEGER
pipelinechannel--evaluation_batch_predict_starting_replica_count:
parameterType: NUMBER_INTEGER
pipelinechannel--evaluation_dataflow_disk_size_gb:
parameterType: NUMBER_INTEGER
pipelinechannel--evaluation_dataflow_machine_type:
parameterType: STRING
pipelinechannel--evaluation_dataflow_max_num_workers:
parameterType: NUMBER_INTEGER
pipelinechannel--evaluation_dataflow_starting_num_workers:
parameterType: NUMBER_INTEGER
pipelinechannel--export_additional_model_without_custom_ops:
parameterType: BOOLEAN
pipelinechannel--location:
parameterType: STRING
pipelinechannel--prediction_type:
parameterType: STRING
pipelinechannel--project:
parameterType: STRING
pipelinechannel--root_dir:
parameterType: STRING
pipelinechannel--stage_1_num_parallel_trials:
parameterType: NUMBER_INTEGER
pipelinechannel--stage_1_tuner_worker_pool_specs_override:
parameterType: LIST
pipelinechannel--string-not-empty-Output:
parameterType: STRING
pipelinechannel--tabular-stats-and-example-gen-downsampled_test_split_json:
parameterType: LIST
pipelinechannel--tabular-stats-and-example-gen-test_split_json:
parameterType: LIST
pipelinechannel--target_column:
parameterType: STRING
pipelinechannel--transform_dataflow_disk_size_gb:
parameterType: NUMBER_INTEGER
pipelinechannel--transform_dataflow_machine_type:
parameterType: STRING
pipelinechannel--transform_dataflow_max_num_workers:
parameterType: NUMBER_INTEGER
outputDefinitions:
artifacts:
feature-attribution-3-feature_attributions:
artifactType:
schemaTitle: system.Metrics
schemaVersion: 0.0.1
model-evaluation-3-evaluation_metrics:
artifactType:
schemaTitle: system.Metrics
schemaVersion: 0.0.1
comp-condition-8:
dag:
outputs:
artifacts:
feature-attribution-3-feature_attributions:
artifactSelectors:
- outputArtifactKey: feature_attributions
producerSubtask: feature-attribution-3
model-evaluation-3-evaluation_metrics:
artifactSelectors:
- outputArtifactKey: evaluation_metrics
producerSubtask: model-evaluation-3
tasks:
feature-attribution-3:
cachingOptions:
enableCache: true
componentRef:
name: comp-feature-attribution-3
dependentTasks:
- model-batch-explanation-3
inputs:
artifacts:
predictions_gcs_source:
taskOutputArtifact:
outputArtifactKey: gcs_output_directory
producerTask: model-batch-explanation-3
parameters:
dataflow_disk_size_gb:
componentInputParameter: pipelinechannel--evaluation_dataflow_disk_size_gb
dataflow_machine_type:
componentInputParameter: pipelinechannel--evaluation_dataflow_machine_type
dataflow_max_workers_num:
componentInputParameter: pipelinechannel--evaluation_dataflow_max_num_workers
dataflow_service_account:
componentInputParameter: pipelinechannel--dataflow_service_account
dataflow_subnetwork:
componentInputParameter: pipelinechannel--dataflow_subnetwork
dataflow_use_public_ips:
componentInputParameter: pipelinechannel--dataflow_use_public_ips
dataflow_workers_num:
componentInputParameter: pipelinechannel--evaluation_dataflow_starting_num_workers
encryption_spec_key_name:
componentInputParameter: pipelinechannel--encryption_spec_key_name
force_runner_mode:
runtimeValue:
constant: Dataflow
location:
componentInputParameter: pipelinechannel--location
predictions_format:
runtimeValue:
constant: jsonl
problem_type:
componentInputParameter: pipelinechannel--prediction_type
project:
componentInputParameter: pipelinechannel--project
taskInfo:
name: feature-attribution-3
model-batch-explanation-3:
cachingOptions:
enableCache: true
componentRef:
name: comp-model-batch-explanation-3
inputs:
artifacts:
explanation_metadata_artifact:
componentInputArtifact: pipelinechannel--automl-tabular-ensemble-3-explanation_metadata_artifact
unmanaged_container_model:
componentInputArtifact: pipelinechannel--automl-tabular-ensemble-3-unmanaged_container_model
parameters:
encryption_spec_key_name:
componentInputParameter: pipelinechannel--encryption_spec_key_name
explanation_parameters:
componentInputParameter: pipelinechannel--automl-tabular-ensemble-3-explanation_parameters
gcs_destination_output_uri_prefix:
componentInputParameter: pipelinechannel--root_dir
gcs_source_uris:
componentInputParameter: pipelinechannel--tabular-stats-and-example-gen-downsampled_test_split_json
generate_explanation:
runtimeValue:
constant: true
instances_format:
runtimeValue:
constant: tf-record
job_display_name:
runtimeValue:
constant: batch-explain-evaluation-{{$.pipeline_job_uuid}}-{{$.pipeline_task_uuid}}
location:
componentInputParameter: pipelinechannel--location
machine_type:
componentInputParameter: pipelinechannel--evaluation_batch_explain_machine_type
max_replica_count:
componentInputParameter: pipelinechannel--evaluation_batch_explain_max_replica_count
predictions_format:
runtimeValue:
constant: jsonl
project:
componentInputParameter: pipelinechannel--project
starting_replica_count:
componentInputParameter: pipelinechannel--evaluation_batch_explain_starting_replica_count
taskInfo:
name: model-batch-explanation-3
model-batch-predict-5:
cachingOptions:
enableCache: true
componentRef:
name: comp-model-batch-predict-5
inputs:
artifacts:
unmanaged_container_model:
componentInputArtifact: pipelinechannel--automl-tabular-ensemble-3-unmanaged_container_model
parameters:
encryption_spec_key_name:
componentInputParameter: pipelinechannel--encryption_spec_key_name
gcs_destination_output_uri_prefix:
componentInputParameter: pipelinechannel--root_dir
gcs_source_uris:
componentInputParameter: pipelinechannel--tabular-stats-and-example-gen-test_split_json
instances_format:
runtimeValue:
constant: tf-record
job_display_name:
runtimeValue:
constant: batch-predict-evaluation-{{$.pipeline_job_uuid}}-{{$.pipeline_task_uuid}}
location:
componentInputParameter: pipelinechannel--location
machine_type:
componentInputParameter: pipelinechannel--evaluation_batch_predict_machine_type
max_replica_count:
componentInputParameter: pipelinechannel--evaluation_batch_predict_max_replica_count
predictions_format:
runtimeValue:
constant: jsonl
project:
componentInputParameter: pipelinechannel--project
starting_replica_count:
componentInputParameter: pipelinechannel--evaluation_batch_predict_starting_replica_count
taskInfo:
name: model-batch-predict-5
model-evaluation-3:
cachingOptions:
enableCache: true
componentRef:
name: comp-model-evaluation-3
dependentTasks:
- model-batch-predict-5
inputs:
artifacts:
batch_prediction_job:
taskOutputArtifact:
outputArtifactKey: batchpredictionjob
producerTask: model-batch-predict-5
parameters:
dataflow_disk_size:
componentInputParameter: pipelinechannel--evaluation_dataflow_disk_size_gb
dataflow_machine_type:
componentInputParameter: pipelinechannel--evaluation_dataflow_machine_type
dataflow_max_workers_num:
componentInputParameter: pipelinechannel--evaluation_dataflow_max_num_workers
dataflow_service_account:
componentInputParameter: pipelinechannel--dataflow_service_account
dataflow_subnetwork:
componentInputParameter: pipelinechannel--dataflow_subnetwork
dataflow_use_public_ips:
componentInputParameter: pipelinechannel--dataflow_use_public_ips
dataflow_workers_num:
componentInputParameter: pipelinechannel--evaluation_dataflow_starting_num_workers
encryption_spec_key_name:
componentInputParameter: pipelinechannel--encryption_spec_key_name
ground_truth_column:
componentInputParameter: pipelinechannel--target_column
ground_truth_format:
runtimeValue:
constant: jsonl
location:
componentInputParameter: pipelinechannel--location
prediction_label_column:
runtimeValue:
constant: ''
prediction_score_column:
runtimeValue:
constant: ''
predictions_format:
runtimeValue:
constant: jsonl
problem_type:
componentInputParameter: pipelinechannel--prediction_type
project:
componentInputParameter: pipelinechannel--project
root_dir:
componentInputParameter: pipelinechannel--root_dir
taskInfo:
name: model-evaluation-3
model-evaluation-import-3:
cachingOptions:
enableCache: true
componentRef:
name: comp-model-evaluation-import-3
dependentTasks:
- feature-attribution-3
- model-evaluation-3
inputs:
artifacts:
feature_attributions:
taskOutputArtifact:
outputArtifactKey: feature_attributions
producerTask: feature-attribution-3
metrics:
taskOutputArtifact:
outputArtifactKey: evaluation_metrics
producerTask: model-evaluation-3
model:
componentInputArtifact: pipelinechannel--model-upload-3-model
parameters:
dataset_paths:
componentInputParameter: pipelinechannel--tabular-stats-and-example-gen-test_split_json
dataset_type:
runtimeValue:
constant: tf-record
display_name:
runtimeValue:
constant: AutoML Tabular
problem_type:
componentInputParameter: pipelinechannel--prediction_type
taskInfo:
name: model-evaluation-import-3
inputDefinitions:
artifacts:
pipelinechannel--automl-tabular-ensemble-3-explanation_metadata_artifact:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
pipelinechannel--automl-tabular-ensemble-3-unmanaged_container_model:
artifactType:
schemaTitle: google.UnmanagedContainerModel
schemaVersion: 0.0.1
pipelinechannel--model-upload-3-model:
artifactType:
schemaTitle: google.VertexModel
schemaVersion: 0.0.1
parameters:
pipelinechannel--automl-tabular-ensemble-3-explanation_parameters:
parameterType: STRUCT
pipelinechannel--bool-identity-2-Output:
parameterType: STRING
pipelinechannel--bool-identity-3-Output:
parameterType: STRING
pipelinechannel--dataflow_service_account:
parameterType: STRING
pipelinechannel--dataflow_subnetwork:
parameterType: STRING
pipelinechannel--dataflow_use_public_ips:
parameterType: BOOLEAN
pipelinechannel--encryption_spec_key_name:
parameterType: STRING
pipelinechannel--evaluation_batch_explain_machine_type:
parameterType: STRING
pipelinechannel--evaluation_batch_explain_max_replica_count:
parameterType: NUMBER_INTEGER
pipelinechannel--evaluation_batch_explain_starting_replica_count:
parameterType: NUMBER_INTEGER
pipelinechannel--evaluation_batch_predict_machine_type:
parameterType: STRING
pipelinechannel--evaluation_batch_predict_max_replica_count:
parameterType: NUMBER_INTEGER
pipelinechannel--evaluation_batch_predict_starting_replica_count:
parameterType: NUMBER_INTEGER
pipelinechannel--evaluation_dataflow_disk_size_gb:
parameterType: NUMBER_INTEGER
pipelinechannel--evaluation_dataflow_machine_type:
parameterType: STRING
pipelinechannel--evaluation_dataflow_max_num_workers:
parameterType: NUMBER_INTEGER
pipelinechannel--evaluation_dataflow_starting_num_workers:
parameterType: NUMBER_INTEGER
pipelinechannel--location:
parameterType: STRING
pipelinechannel--prediction_type:
parameterType: STRING
pipelinechannel--project:
parameterType: STRING
pipelinechannel--root_dir:
parameterType: STRING
pipelinechannel--string-not-empty-Output:
parameterType: STRING
pipelinechannel--tabular-stats-and-example-gen-downsampled_test_split_json:
parameterType: LIST
pipelinechannel--tabular-stats-and-example-gen-test_split_json:
parameterType: LIST
pipelinechannel--target_column:
parameterType: STRING
outputDefinitions:
artifacts:
feature-attribution-3-feature_attributions:
artifactType:
schemaTitle: system.Metrics
schemaVersion: 0.0.1
model-evaluation-3-evaluation_metrics:
artifactType:
schemaTitle: system.Metrics
schemaVersion: 0.0.1
comp-exit-handler-1:
dag:
outputs:
artifacts:
feature-attribution-2-feature_attributions:
artifactSelectors:
- outputArtifactKey: feature-attribution-2-feature_attributions
producerSubtask: condition-4
feature-attribution-3-feature_attributions:
artifactSelectors:
- outputArtifactKey: feature-attribution-3-feature_attributions
producerSubtask: condition-4
feature-attribution-feature_attributions:
artifactSelectors:
- outputArtifactKey: feature-attribution-feature_attributions
producerSubtask: condition-2
model-evaluation-2-evaluation_metrics:
artifactSelectors:
- outputArtifactKey: model-evaluation-2-evaluation_metrics
producerSubtask: condition-4
model-evaluation-3-evaluation_metrics:
artifactSelectors:
- outputArtifactKey: model-evaluation-3-evaluation_metrics
producerSubtask: condition-4
model-evaluation-evaluation_metrics:
artifactSelectors:
- outputArtifactKey: model-evaluation-evaluation_metrics
producerSubtask: condition-2
tasks:
automl-tabular-transform:
cachingOptions:
enableCache: true
componentRef:
name: comp-automl-tabular-transform
dependentTasks:
- tabular-stats-and-example-gen
inputs:
artifacts:
dataset_schema:
taskOutputArtifact:
outputArtifactKey: dataset_schema
producerTask: tabular-stats-and-example-gen
eval_split:
taskOutputArtifact:
outputArtifactKey: eval_split
producerTask: tabular-stats-and-example-gen
metadata:
taskOutputArtifact:
outputArtifactKey: metadata
producerTask: tabular-stats-and-example-gen
test_split:
taskOutputArtifact:
outputArtifactKey: test_split
producerTask: tabular-stats-and-example-gen
train_split:
taskOutputArtifact:
outputArtifactKey: train_split
producerTask: tabular-stats-and-example-gen
parameters:
dataflow_disk_size_gb:
componentInputParameter: pipelinechannel--transform_dataflow_disk_size_gb
dataflow_machine_type:
componentInputParameter: pipelinechannel--transform_dataflow_machine_type
dataflow_max_num_workers:
componentInputParameter: pipelinechannel--transform_dataflow_max_num_workers
dataflow_service_account:
componentInputParameter: pipelinechannel--dataflow_service_account
dataflow_subnetwork:
componentInputParameter: pipelinechannel--dataflow_subnetwork
dataflow_use_public_ips:
componentInputParameter: pipelinechannel--dataflow_use_public_ips
encryption_spec_key_name:
componentInputParameter: pipelinechannel--encryption_spec_key_name
location:
componentInputParameter: pipelinechannel--location
project:
componentInputParameter: pipelinechannel--project
root_dir:
componentInputParameter: pipelinechannel--root_dir
taskInfo:
name: automl-tabular-transform
condition-2:
componentRef:
name: comp-condition-2
dependentTasks:
- automl-tabular-transform
- merge-materialized-splits
- string-not-empty
- tabular-stats-and-example-gen
inputs:
artifacts:
pipelinechannel--automl-tabular-transform-transform_output:
taskOutputArtifact:
outputArtifactKey: transform_output
producerTask: automl-tabular-transform
pipelinechannel--merge-materialized-splits-splits:
taskOutputArtifact:
outputArtifactKey: splits
producerTask: merge-materialized-splits
pipelinechannel--parent_model:
componentInputArtifact: pipelinechannel--parent_model
pipelinechannel--tabular-stats-and-example-gen-dataset_schema:
taskOutputArtifact:
outputArtifactKey: dataset_schema
producerTask: tabular-stats-and-example-gen
pipelinechannel--tabular-stats-and-example-gen-eval_split:
taskOutputArtifact:
outputArtifactKey: eval_split
producerTask: tabular-stats-and-example-gen
pipelinechannel--tabular-stats-and-example-gen-instance_baseline:
taskOutputArtifact:
outputArtifactKey: instance_baseline
producerTask: tabular-stats-and-example-gen
pipelinechannel--tabular-stats-and-example-gen-metadata:
taskOutputArtifact:
outputArtifactKey: metadata
producerTask: tabular-stats-and-example-gen
parameters:
pipelinechannel--cv_trainer_worker_pool_specs_override:
componentInputParameter: pipelinechannel--cv_trainer_worker_pool_specs_override
pipelinechannel--dataflow_service_account:
componentInputParameter: pipelinechannel--dataflow_service_account
pipelinechannel--dataflow_subnetwork:
componentInputParameter: pipelinechannel--dataflow_subnetwork
pipelinechannel--dataflow_use_public_ips:
componentInputParameter: pipelinechannel--dataflow_use_public_ips
pipelinechannel--encryption_spec_key_name:
componentInputParameter: pipelinechannel--encryption_spec_key_name
pipelinechannel--evaluation_batch_explain_machine_type:
componentInputParameter: pipelinechannel--evaluation_batch_explain_machine_type
pipelinechannel--evaluation_batch_explain_max_replica_count:
componentInputParameter: pipelinechannel--evaluation_batch_explain_max_replica_count
pipelinechannel--evaluation_batch_explain_starting_replica_count:
componentInputParameter: pipelinechannel--evaluation_batch_explain_starting_replica_count
pipelinechannel--evaluation_batch_predict_machine_type:
componentInputParameter: pipelinechannel--evaluation_batch_predict_machine_type
pipelinechannel--evaluation_batch_predict_max_replica_count:
componentInputParameter: pipelinechannel--evaluation_batch_predict_max_replica_count
pipelinechannel--evaluation_batch_predict_starting_replica_count:
componentInputParameter: pipelinechannel--evaluation_batch_predict_starting_replica_count
pipelinechannel--evaluation_dataflow_disk_size_gb:
componentInputParameter: pipelinechannel--evaluation_dataflow_disk_size_gb
pipelinechannel--evaluation_dataflow_machine_type:
componentInputParameter: pipelinechannel--evaluation_dataflow_machine_type
pipelinechannel--evaluation_dataflow_max_num_workers:
componentInputParameter: pipelinechannel--evaluation_dataflow_max_num_workers
pipelinechannel--evaluation_dataflow_starting_num_workers:
componentInputParameter: pipelinechannel--evaluation_dataflow_starting_num_workers
pipelinechannel--export_additional_model_without_custom_ops:
componentInputParameter: pipelinechannel--export_additional_model_without_custom_ops
pipelinechannel--fast_testing:
componentInputParameter: pipelinechannel--fast_testing
pipelinechannel--get-model-display-name-model_display_name:
componentInputParameter: pipelinechannel--get-model-display-name-model_display_name
pipelinechannel--location:
componentInputParameter: pipelinechannel--location
pipelinechannel--model_description:
componentInputParameter: pipelinechannel--model_description
pipelinechannel--prediction_type:
componentInputParameter: pipelinechannel--prediction_type
pipelinechannel--project:
componentInputParameter: pipelinechannel--project
pipelinechannel--root_dir:
componentInputParameter: pipelinechannel--root_dir
pipelinechannel--run_distillation:
componentInputParameter: pipelinechannel--run_distillation
pipelinechannel--run_evaluation:
componentInputParameter: pipelinechannel--run_evaluation
pipelinechannel--stage_1_num_parallel_trials:
componentInputParameter: pipelinechannel--stage_1_num_parallel_trials
pipelinechannel--stage_1_tuning_result_artifact_uri:
componentInputParameter: pipelinechannel--stage_1_tuning_result_artifact_uri
pipelinechannel--stage_2_num_parallel_trials:
componentInputParameter: pipelinechannel--stage_2_num_parallel_trials
pipelinechannel--stage_2_num_selected_trials:
componentInputParameter: pipelinechannel--stage_2_num_selected_trials
pipelinechannel--string-not-empty-Output:
taskOutputParameter:
outputParameterKey: Output
producerTask: string-not-empty
pipelinechannel--tabular-stats-and-example-gen-downsampled_test_split_json:
taskOutputParameter:
outputParameterKey: downsampled_test_split_json
producerTask: tabular-stats-and-example-gen
pipelinechannel--tabular-stats-and-example-gen-test_split_json:
taskOutputParameter:
outputParameterKey: test_split_json
producerTask: tabular-stats-and-example-gen
pipelinechannel--target_column:
componentInputParameter: pipelinechannel--target_column
pipelinechannel--train_budget_milli_node_hours:
componentInputParameter: pipelinechannel--train_budget_milli_node_hours
taskInfo:
name: stage_1_tuning_result_artifact_uri_not_empty
triggerPolicy:
condition: inputs.parameter_values['pipelinechannel--string-not-empty-Output']
== 'true'
condition-4:
componentRef:
name: comp-condition-4
dependentTasks:
- automl-tabular-transform
- merge-materialized-splits
- string-not-empty
- tabular-stats-and-example-gen
inputs:
artifacts:
pipelinechannel--automl-tabular-transform-materialized_eval_split:
taskOutputArtifact:
outputArtifactKey: materialized_eval_split
producerTask: automl-tabular-transform
pipelinechannel--automl-tabular-transform-materialized_train_split:
taskOutputArtifact:
outputArtifactKey: materialized_train_split
producerTask: automl-tabular-transform
pipelinechannel--automl-tabular-transform-transform_output:
taskOutputArtifact:
outputArtifactKey: transform_output
producerTask: automl-tabular-transform
pipelinechannel--merge-materialized-splits-splits:
taskOutputArtifact:
outputArtifactKey: splits
producerTask: merge-materialized-splits
pipelinechannel--parent_model:
componentInputArtifact: pipelinechannel--parent_model
pipelinechannel--tabular-stats-and-example-gen-dataset_schema:
taskOutputArtifact:
outputArtifactKey: dataset_schema
producerTask: tabular-stats-and-example-gen
pipelinechannel--tabular-stats-and-example-gen-eval_split:
taskOutputArtifact:
outputArtifactKey: eval_split
producerTask: tabular-stats-and-example-gen
pipelinechannel--tabular-stats-and-example-gen-instance_baseline:
taskOutputArtifact:
outputArtifactKey: instance_baseline
producerTask: tabular-stats-and-example-gen
pipelinechannel--tabular-stats-and-example-gen-metadata:
taskOutputArtifact:
outputArtifactKey: metadata
producerTask: tabular-stats-and-example-gen
pipelinechannel--tabular-stats-and-example-gen-test_split:
taskOutputArtifact:
outputArtifactKey: test_split
producerTask: tabular-stats-and-example-gen
pipelinechannel--tabular-stats-and-example-gen-train_split:
taskOutputArtifact:
outputArtifactKey: train_split
producerTask: tabular-stats-and-example-gen
parameters:
pipelinechannel--cv_trainer_worker_pool_specs_override:
componentInputParameter: pipelinechannel--cv_trainer_worker_pool_specs_override
pipelinechannel--dataflow_service_account:
componentInputParameter: pipelinechannel--dataflow_service_account
pipelinechannel--dataflow_subnetwork:
componentInputParameter: pipelinechannel--dataflow_subnetwork
pipelinechannel--dataflow_use_public_ips:
componentInputParameter: pipelinechannel--dataflow_use_public_ips
pipelinechannel--disable_early_stopping:
componentInputParameter: pipelinechannel--disable_early_stopping
pipelinechannel--distill_batch_predict_machine_type:
componentInputParameter: pipelinechannel--distill_batch_predict_machine_type
pipelinechannel--distill_batch_predict_max_replica_count:
componentInputParameter: pipelinechannel--distill_batch_predict_max_replica_count
pipelinechannel--distill_batch_predict_starting_replica_count:
componentInputParameter: pipelinechannel--distill_batch_predict_starting_replica_count
pipelinechannel--encryption_spec_key_name:
componentInputParameter: pipelinechannel--encryption_spec_key_name
pipelinechannel--evaluation_batch_explain_machine_type:
componentInputParameter: pipelinechannel--evaluation_batch_explain_machine_type
pipelinechannel--evaluation_batch_explain_max_replica_count:
componentInputParameter: pipelinechannel--evaluation_batch_explain_max_replica_count
pipelinechannel--evaluation_batch_explain_starting_replica_count:
componentInputParameter: pipelinechannel--evaluation_batch_explain_starting_replica_count
pipelinechannel--evaluation_batch_predict_machine_type:
componentInputParameter: pipelinechannel--evaluation_batch_predict_machine_type
pipelinechannel--evaluation_batch_predict_max_replica_count:
componentInputParameter: pipelinechannel--evaluation_batch_predict_max_replica_count
pipelinechannel--evaluation_batch_predict_starting_replica_count:
componentInputParameter: pipelinechannel--evaluation_batch_predict_starting_replica_count
pipelinechannel--evaluation_dataflow_disk_size_gb:
componentInputParameter: pipelinechannel--evaluation_dataflow_disk_size_gb
pipelinechannel--evaluation_dataflow_machine_type:
componentInputParameter: pipelinechannel--evaluation_dataflow_machine_type
pipelinechannel--evaluation_dataflow_max_num_workers:
componentInputParameter: pipelinechannel--evaluation_dataflow_max_num_workers
pipelinechannel--evaluation_dataflow_starting_num_workers:
componentInputParameter: pipelinechannel--evaluation_dataflow_starting_num_workers
pipelinechannel--export_additional_model_without_custom_ops:
componentInputParameter: pipelinechannel--export_additional_model_without_custom_ops
pipelinechannel--fast_testing:
componentInputParameter: pipelinechannel--fast_testing
pipelinechannel--get-model-display-name-model_display_name:
componentInputParameter: pipelinechannel--get-model-display-name-model_display_name
pipelinechannel--location:
componentInputParameter: pipelinechannel--location
pipelinechannel--model_description:
componentInputParameter: pipelinechannel--model_description
pipelinechannel--prediction_type:
componentInputParameter: pipelinechannel--prediction_type
pipelinechannel--project:
componentInputParameter: pipelinechannel--project
pipelinechannel--root_dir:
componentInputParameter: pipelinechannel--root_dir
pipelinechannel--run_distillation:
componentInputParameter: pipelinechannel--run_distillation
pipelinechannel--run_evaluation:
componentInputParameter: pipelinechannel--run_evaluation
pipelinechannel--stage_1_num_parallel_trials:
componentInputParameter: pipelinechannel--stage_1_num_parallel_trials
pipelinechannel--stage_1_tuner_worker_pool_specs_override:
componentInputParameter: pipelinechannel--stage_1_tuner_worker_pool_specs_override
pipelinechannel--stage_2_num_parallel_trials:
componentInputParameter: pipelinechannel--stage_2_num_parallel_trials
pipelinechannel--stage_2_num_selected_trials:
componentInputParameter: pipelinechannel--stage_2_num_selected_trials
pipelinechannel--string-not-empty-Output:
taskOutputParameter:
outputParameterKey: Output
producerTask: string-not-empty
pipelinechannel--study_spec_parameters_override:
componentInputParameter: pipelinechannel--study_spec_parameters_override
pipelinechannel--tabular-stats-and-example-gen-downsampled_test_split_json:
taskOutputParameter:
outputParameterKey: downsampled_test_split_json
producerTask: tabular-stats-and-example-gen
pipelinechannel--tabular-stats-and-example-gen-test_split_json:
taskOutputParameter:
outputParameterKey: test_split_json
producerTask: tabular-stats-and-example-gen
pipelinechannel--target_column:
componentInputParameter: pipelinechannel--target_column
pipelinechannel--train_budget_milli_node_hours:
componentInputParameter: pipelinechannel--train_budget_milli_node_hours
pipelinechannel--transform_dataflow_disk_size_gb:
componentInputParameter: pipelinechannel--transform_dataflow_disk_size_gb
pipelinechannel--transform_dataflow_machine_type:
componentInputParameter: pipelinechannel--transform_dataflow_machine_type
pipelinechannel--transform_dataflow_max_num_workers:
componentInputParameter: pipelinechannel--transform_dataflow_max_num_workers
taskInfo:
name: stage_1_tuning_result_artifact_uri_empty
triggerPolicy:
condition: inputs.parameter_values['pipelinechannel--string-not-empty-Output']
== 'false'
merge-materialized-splits:
cachingOptions:
enableCache: true
componentRef:
name: comp-merge-materialized-splits
dependentTasks:
- automl-tabular-transform
inputs:
artifacts:
split_0:
taskOutputArtifact:
outputArtifactKey: materialized_train_split
producerTask: automl-tabular-transform
split_1:
taskOutputArtifact:
outputArtifactKey: materialized_eval_split
producerTask: automl-tabular-transform
taskInfo:
name: merge-materialized-splits
string-not-empty:
cachingOptions:
enableCache: true
componentRef:
name: comp-string-not-empty
inputs:
parameters:
value:
componentInputParameter: pipelinechannel--stage_1_tuning_result_artifact_uri
taskInfo:
name: string-not-empty
tabular-stats-and-example-gen:
cachingOptions:
enableCache: true
componentRef:
name: comp-tabular-stats-and-example-gen
inputs:
parameters:
additional_experiments_json:
componentInputParameter: pipelinechannel--additional_experiments
data_source_bigquery_table_path:
componentInputParameter: pipelinechannel--set-optional-inputs-data_source_bigquery_table_path
data_source_csv_filenames:
componentInputParameter: pipelinechannel--set-optional-inputs-data_source_csv_filenames
dataflow_disk_size_gb:
componentInputParameter: pipelinechannel--stats_and_example_gen_dataflow_disk_size_gb
dataflow_machine_type:
componentInputParameter: pipelinechannel--stats_and_example_gen_dataflow_machine_type
dataflow_max_num_workers:
componentInputParameter: pipelinechannel--stats_and_example_gen_dataflow_max_num_workers
dataflow_service_account:
componentInputParameter: pipelinechannel--dataflow_service_account
dataflow_subnetwork:
componentInputParameter: pipelinechannel--dataflow_subnetwork
dataflow_use_public_ips:
componentInputParameter: pipelinechannel--dataflow_use_public_ips
enable_probabilistic_inference:
componentInputParameter: pipelinechannel--enable_probabilistic_inference
encryption_spec_key_name:
componentInputParameter: pipelinechannel--encryption_spec_key_name
location:
componentInputParameter: pipelinechannel--location
optimization_objective:
componentInputParameter: pipelinechannel--optimization_objective
optimization_objective_precision_value:
componentInputParameter: pipelinechannel--optimization_objective_precision_value
optimization_objective_recall_value:
componentInputParameter: pipelinechannel--optimization_objective_recall_value
predefined_split_key:
componentInputParameter: pipelinechannel--predefined_split_key
prediction_type:
componentInputParameter: pipelinechannel--prediction_type
project:
componentInputParameter: pipelinechannel--project
quantiles:
componentInputParameter: pipelinechannel--quantiles
root_dir:
componentInputParameter: pipelinechannel--root_dir
run_distillation:
componentInputParameter: pipelinechannel--run_distillation
stratified_split_key:
componentInputParameter: pipelinechannel--stratified_split_key
target_column_name:
componentInputParameter: pipelinechannel--target_column
test_fraction:
componentInputParameter: pipelinechannel--test_fraction
timestamp_split_key:
componentInputParameter: pipelinechannel--timestamp_split_key
training_fraction:
componentInputParameter: pipelinechannel--training_fraction
transformations:
runtimeValue:
constant: '[]'
transformations_path:
componentInputParameter: pipelinechannel--transformations
validation_fraction:
componentInputParameter: pipelinechannel--validation_fraction
weight_column_name:
componentInputParameter: pipelinechannel--weight_column
taskInfo:
name: tabular-stats-and-example-gen
inputDefinitions:
artifacts:
pipelinechannel--parent_model:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
parameters:
pipelinechannel--additional_experiments:
parameterType: STRUCT
pipelinechannel--cv_trainer_worker_pool_specs_override:
parameterType: LIST
pipelinechannel--dataflow_service_account:
parameterType: STRING
pipelinechannel--dataflow_subnetwork:
parameterType: STRING
pipelinechannel--dataflow_use_public_ips:
parameterType: BOOLEAN
pipelinechannel--disable_early_stopping:
parameterType: BOOLEAN
pipelinechannel--distill_batch_predict_machine_type:
parameterType: STRING
pipelinechannel--distill_batch_predict_max_replica_count:
parameterType: NUMBER_INTEGER
pipelinechannel--distill_batch_predict_starting_replica_count:
parameterType: NUMBER_INTEGER
pipelinechannel--enable_probabilistic_inference:
parameterType: BOOLEAN
pipelinechannel--encryption_spec_key_name:
parameterType: STRING
pipelinechannel--evaluation_batch_explain_machine_type:
parameterType: STRING
pipelinechannel--evaluation_batch_explain_max_replica_count:
parameterType: NUMBER_INTEGER
pipelinechannel--evaluation_batch_explain_starting_replica_count:
parameterType: NUMBER_INTEGER
pipelinechannel--evaluation_batch_predict_machine_type:
parameterType: STRING
pipelinechannel--evaluation_batch_predict_max_replica_count:
parameterType: NUMBER_INTEGER
pipelinechannel--evaluation_batch_predict_starting_replica_count:
parameterType: NUMBER_INTEGER
pipelinechannel--evaluation_dataflow_disk_size_gb:
parameterType: NUMBER_INTEGER
pipelinechannel--evaluation_dataflow_machine_type:
parameterType: STRING
pipelinechannel--evaluation_dataflow_max_num_workers:
parameterType: NUMBER_INTEGER
pipelinechannel--evaluation_dataflow_starting_num_workers:
parameterType: NUMBER_INTEGER
pipelinechannel--export_additional_model_without_custom_ops:
parameterType: BOOLEAN
pipelinechannel--fast_testing:
parameterType: BOOLEAN
pipelinechannel--get-model-display-name-model_display_name:
parameterType: STRING
pipelinechannel--location:
parameterType: STRING
pipelinechannel--model_description:
parameterType: STRING
pipelinechannel--optimization_objective:
parameterType: STRING
pipelinechannel--optimization_objective_precision_value:
parameterType: NUMBER_DOUBLE
pipelinechannel--optimization_objective_recall_value:
parameterType: NUMBER_DOUBLE
pipelinechannel--predefined_split_key:
parameterType: STRING
pipelinechannel--prediction_type:
parameterType: STRING
pipelinechannel--project:
parameterType: STRING
pipelinechannel--quantiles:
parameterType: LIST
pipelinechannel--root_dir:
parameterType: STRING
pipelinechannel--run_distillation:
parameterType: BOOLEAN
pipelinechannel--run_evaluation:
parameterType: BOOLEAN
pipelinechannel--set-optional-inputs-data_source_bigquery_table_path:
parameterType: STRING
pipelinechannel--set-optional-inputs-data_source_csv_filenames:
parameterType: STRING
pipelinechannel--stage_1_num_parallel_trials:
parameterType: NUMBER_INTEGER
pipelinechannel--stage_1_tuner_worker_pool_specs_override:
parameterType: LIST
pipelinechannel--stage_1_tuning_result_artifact_uri:
parameterType: STRING
pipelinechannel--stage_2_num_parallel_trials:
parameterType: NUMBER_INTEGER
pipelinechannel--stage_2_num_selected_trials:
parameterType: NUMBER_INTEGER
pipelinechannel--stats_and_example_gen_dataflow_disk_size_gb:
parameterType: NUMBER_INTEGER
pipelinechannel--stats_and_example_gen_dataflow_machine_type:
parameterType: STRING
pipelinechannel--stats_and_example_gen_dataflow_max_num_workers:
parameterType: NUMBER_INTEGER
pipelinechannel--stratified_split_key:
parameterType: STRING
pipelinechannel--study_spec_parameters_override:
parameterType: LIST
pipelinechannel--target_column:
parameterType: STRING
pipelinechannel--test_fraction:
parameterType: NUMBER_DOUBLE
pipelinechannel--timestamp_split_key:
parameterType: STRING
pipelinechannel--train_budget_milli_node_hours:
parameterType: NUMBER_DOUBLE
pipelinechannel--training_fraction:
parameterType: NUMBER_DOUBLE
pipelinechannel--transform_dataflow_disk_size_gb:
parameterType: NUMBER_INTEGER
pipelinechannel--transform_dataflow_machine_type:
parameterType: STRING
pipelinechannel--transform_dataflow_max_num_workers:
parameterType: NUMBER_INTEGER
pipelinechannel--transformations:
parameterType: STRING
pipelinechannel--validation_fraction:
parameterType: NUMBER_DOUBLE
pipelinechannel--weight_column:
parameterType: STRING
outputDefinitions:
artifacts:
feature-attribution-2-feature_attributions:
artifactType:
schemaTitle: system.Metrics
schemaVersion: 0.0.1
feature-attribution-3-feature_attributions:
artifactType:
schemaTitle: system.Metrics
schemaVersion: 0.0.1
feature-attribution-feature_attributions:
artifactType:
schemaTitle: system.Metrics
schemaVersion: 0.0.1
model-evaluation-2-evaluation_metrics:
artifactType:
schemaTitle: system.Metrics
schemaVersion: 0.0.1
model-evaluation-3-evaluation_metrics:
artifactType:
schemaTitle: system.Metrics
schemaVersion: 0.0.1
model-evaluation-evaluation_metrics:
artifactType:
schemaTitle: system.Metrics
schemaVersion: 0.0.1
comp-feature-attribution:
executorLabel: exec-feature-attribution
inputDefinitions:
artifacts:
predictions_bigquery_source:
artifactType:
schemaTitle: google.BQTable
schemaVersion: 0.0.1
isOptional: true
predictions_gcs_source:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
isOptional: true
parameters:
dataflow_disk_size_gb:
defaultValue: 50.0
isOptional: true
parameterType: NUMBER_INTEGER
dataflow_machine_type:
defaultValue: n1-standard-4
isOptional: true
parameterType: STRING
dataflow_max_workers_num:
defaultValue: 5.0
isOptional: true
parameterType: NUMBER_INTEGER
dataflow_service_account:
defaultValue: ''
isOptional: true
parameterType: STRING
dataflow_subnetwork:
defaultValue: ''
isOptional: true
parameterType: STRING
dataflow_use_public_ips:
defaultValue: true
isOptional: true
parameterType: BOOLEAN
dataflow_workers_num:
defaultValue: 1.0
isOptional: true
parameterType: NUMBER_INTEGER
encryption_spec_key_name:
defaultValue: ''
isOptional: true
parameterType: STRING
force_runner_mode:
defaultValue: ''
isOptional: true
parameterType: STRING
location:
defaultValue: us-central1
isOptional: true
parameterType: STRING
predictions_format:
defaultValue: jsonl
isOptional: true
parameterType: STRING
problem_type:
parameterType: STRING
project:
defaultValue: '{{$.pipeline_google_cloud_project_id}}'
isOptional: true
parameterType: STRING
outputDefinitions:
artifacts:
feature_attributions:
artifactType:
schemaTitle: system.Metrics
schemaVersion: 0.0.1
parameters:
gcp_resources:
description: 'Serialized gcp_resources proto tracking the dataflow
job. For more details, see
https://github.com/kubeflow/pipelines/blob/master/components/google-cloud/google_cloud_pipeline_components/proto/README.md.'
parameterType: STRING
comp-feature-attribution-2:
executorLabel: exec-feature-attribution-2
inputDefinitions:
artifacts:
predictions_bigquery_source:
artifactType:
schemaTitle: google.BQTable
schemaVersion: 0.0.1
isOptional: true
predictions_gcs_source:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
isOptional: true
parameters:
dataflow_disk_size_gb:
defaultValue: 50.0
isOptional: true
parameterType: NUMBER_INTEGER
dataflow_machine_type:
defaultValue: n1-standard-4
isOptional: true
parameterType: STRING
dataflow_max_workers_num:
defaultValue: 5.0
isOptional: true
parameterType: NUMBER_INTEGER
dataflow_service_account:
defaultValue: ''
isOptional: true
parameterType: STRING
dataflow_subnetwork:
defaultValue: ''
isOptional: true
parameterType: STRING
dataflow_use_public_ips:
defaultValue: true
isOptional: true
parameterType: BOOLEAN
dataflow_workers_num:
defaultValue: 1.0
isOptional: true
parameterType: NUMBER_INTEGER
encryption_spec_key_name:
defaultValue: ''
isOptional: true
parameterType: STRING
force_runner_mode:
defaultValue: ''
isOptional: true
parameterType: STRING
location:
defaultValue: us-central1
isOptional: true
parameterType: STRING
predictions_format:
defaultValue: jsonl
isOptional: true
parameterType: STRING
problem_type:
parameterType: STRING
project:
defaultValue: '{{$.pipeline_google_cloud_project_id}}'
isOptional: true
parameterType: STRING
outputDefinitions:
artifacts:
feature_attributions:
artifactType:
schemaTitle: system.Metrics
schemaVersion: 0.0.1
parameters:
gcp_resources:
description: 'Serialized gcp_resources proto tracking the dataflow
job. For more details, see
https://github.com/kubeflow/pipelines/blob/master/components/google-cloud/google_cloud_pipeline_components/proto/README.md.'
parameterType: STRING
comp-feature-attribution-3:
executorLabel: exec-feature-attribution-3
inputDefinitions:
artifacts:
predictions_bigquery_source:
artifactType:
schemaTitle: google.BQTable
schemaVersion: 0.0.1
isOptional: true
predictions_gcs_source:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
isOptional: true
parameters:
dataflow_disk_size_gb:
defaultValue: 50.0
isOptional: true
parameterType: NUMBER_INTEGER
dataflow_machine_type:
defaultValue: n1-standard-4
isOptional: true
parameterType: STRING
dataflow_max_workers_num:
defaultValue: 5.0
isOptional: true
parameterType: NUMBER_INTEGER
dataflow_service_account:
defaultValue: ''
isOptional: true
parameterType: STRING
dataflow_subnetwork:
defaultValue: ''
isOptional: true
parameterType: STRING
dataflow_use_public_ips:
defaultValue: true
isOptional: true
parameterType: BOOLEAN
dataflow_workers_num:
defaultValue: 1.0
isOptional: true
parameterType: NUMBER_INTEGER
encryption_spec_key_name:
defaultValue: ''
isOptional: true
parameterType: STRING
force_runner_mode:
defaultValue: ''
isOptional: true
parameterType: STRING
location:
defaultValue: us-central1
isOptional: true
parameterType: STRING
predictions_format:
defaultValue: jsonl
isOptional: true
parameterType: STRING
problem_type:
parameterType: STRING
project:
defaultValue: '{{$.pipeline_google_cloud_project_id}}'
isOptional: true
parameterType: STRING
outputDefinitions:
artifacts:
feature_attributions:
artifactType:
schemaTitle: system.Metrics
schemaVersion: 0.0.1
parameters:
gcp_resources:
description: 'Serialized gcp_resources proto tracking the dataflow
job. For more details, see
https://github.com/kubeflow/pipelines/blob/master/components/google-cloud/google_cloud_pipeline_components/proto/README.md.'
parameterType: STRING
comp-get-model-display-name:
executorLabel: exec-get-model-display-name
inputDefinitions:
parameters:
model_display_name:
parameterType: STRING
outputDefinitions:
parameters:
model_display_name:
parameterType: STRING
comp-importer:
executorLabel: exec-importer
inputDefinitions:
parameters:
uri:
parameterType: STRING
outputDefinitions:
artifacts:
artifact:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
comp-merge-materialized-splits:
executorLabel: exec-merge-materialized-splits
inputDefinitions:
artifacts:
split_0:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
description: The first materialized split.
split_1:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
description: The second materialized split.
outputDefinitions:
artifacts:
splits:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
comp-model-batch-explanation:
executorLabel: exec-model-batch-explanation
inputDefinitions:
artifacts:
explanation_metadata_artifact:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
isOptional: true
unmanaged_container_model:
artifactType:
schemaTitle: google.UnmanagedContainerModel
schemaVersion: 0.0.1
isOptional: true
parameters:
accelerator_count:
defaultValue: 0.0
isOptional: true
parameterType: NUMBER_INTEGER
accelerator_type:
defaultValue: ''
isOptional: true
parameterType: STRING
bigquery_destination_output_uri:
defaultValue: ''
isOptional: true
parameterType: STRING
bigquery_source_input_uri:
defaultValue: ''
isOptional: true
parameterType: STRING
encryption_spec_key_name:
defaultValue: ''
isOptional: true
parameterType: STRING
explanation_metadata:
defaultValue: {}
isOptional: true
parameterType: STRUCT
explanation_parameters:
defaultValue: {}
isOptional: true
parameterType: STRUCT
gcs_destination_output_uri_prefix:
defaultValue: ''
isOptional: true
parameterType: STRING
gcs_source_uris:
defaultValue: []
isOptional: true
parameterType: LIST
generate_explanation:
defaultValue: false
isOptional: true
parameterType: BOOLEAN
instances_format:
defaultValue: jsonl
isOptional: true
parameterType: STRING
job_display_name:
parameterType: STRING
labels:
defaultValue: {}
isOptional: true
parameterType: STRUCT
location:
defaultValue: us-central1
isOptional: true
parameterType: STRING
machine_type:
defaultValue: ''
isOptional: true
parameterType: STRING
manual_batch_tuning_parameters_batch_size:
defaultValue: 0.0
isOptional: true
parameterType: NUMBER_INTEGER
max_replica_count:
defaultValue: 0.0
isOptional: true
parameterType: NUMBER_INTEGER
model_parameters:
defaultValue: {}
isOptional: true
parameterType: STRUCT
predictions_format:
defaultValue: jsonl
isOptional: true
parameterType: STRING
project:
parameterType: STRING
starting_replica_count:
defaultValue: 0.0
isOptional: true
parameterType: NUMBER_INTEGER
outputDefinitions:
artifacts:
batchpredictionjob:
artifactType:
schemaTitle: google.VertexBatchPredictionJob
schemaVersion: 0.0.1
bigquery_output_table:
artifactType:
schemaTitle: google.BQTable
schemaVersion: 0.0.1
gcs_output_directory:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
parameters:
gcp_resources:
parameterType: STRING
comp-model-batch-explanation-2:
executorLabel: exec-model-batch-explanation-2
inputDefinitions:
artifacts:
explanation_metadata_artifact:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
isOptional: true
unmanaged_container_model:
artifactType:
schemaTitle: google.UnmanagedContainerModel
schemaVersion: 0.0.1
isOptional: true
parameters:
accelerator_count:
defaultValue: 0.0
isOptional: true
parameterType: NUMBER_INTEGER
accelerator_type:
defaultValue: ''
isOptional: true
parameterType: STRING
bigquery_destination_output_uri:
defaultValue: ''
isOptional: true
parameterType: STRING
bigquery_source_input_uri:
defaultValue: ''
isOptional: true
parameterType: STRING
encryption_spec_key_name:
defaultValue: ''
isOptional: true
parameterType: STRING
explanation_metadata:
defaultValue: {}
isOptional: true
parameterType: STRUCT
explanation_parameters:
defaultValue: {}
isOptional: true
parameterType: STRUCT
gcs_destination_output_uri_prefix:
defaultValue: ''
isOptional: true
parameterType: STRING
gcs_source_uris:
defaultValue: []
isOptional: true
parameterType: LIST
generate_explanation:
defaultValue: false
isOptional: true
parameterType: BOOLEAN
instances_format:
defaultValue: jsonl
isOptional: true
parameterType: STRING
job_display_name:
parameterType: STRING
labels:
defaultValue: {}
isOptional: true
parameterType: STRUCT
location:
defaultValue: us-central1
isOptional: true
parameterType: STRING
machine_type:
defaultValue: ''
isOptional: true
parameterType: STRING
manual_batch_tuning_parameters_batch_size:
defaultValue: 0.0
isOptional: true
parameterType: NUMBER_INTEGER
max_replica_count:
defaultValue: 0.0
isOptional: true
parameterType: NUMBER_INTEGER
model_parameters:
defaultValue: {}
isOptional: true
parameterType: STRUCT
predictions_format:
defaultValue: jsonl
isOptional: true
parameterType: STRING
project:
parameterType: STRING
starting_replica_count:
defaultValue: 0.0
isOptional: true
parameterType: NUMBER_INTEGER
outputDefinitions:
artifacts:
batchpredictionjob:
artifactType:
schemaTitle: google.VertexBatchPredictionJob
schemaVersion: 0.0.1
bigquery_output_table:
artifactType:
schemaTitle: google.BQTable
schemaVersion: 0.0.1
gcs_output_directory:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
parameters:
gcp_resources:
parameterType: STRING
comp-model-batch-explanation-3:
executorLabel: exec-model-batch-explanation-3
inputDefinitions:
artifacts:
explanation_metadata_artifact:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
isOptional: true
unmanaged_container_model:
artifactType:
schemaTitle: google.UnmanagedContainerModel
schemaVersion: 0.0.1
isOptional: true
parameters:
accelerator_count:
defaultValue: 0.0
isOptional: true
parameterType: NUMBER_INTEGER
accelerator_type:
defaultValue: ''
isOptional: true
parameterType: STRING
bigquery_destination_output_uri:
defaultValue: ''
isOptional: true
parameterType: STRING
bigquery_source_input_uri:
defaultValue: ''
isOptional: true
parameterType: STRING
encryption_spec_key_name:
defaultValue: ''
isOptional: true
parameterType: STRING
explanation_metadata:
defaultValue: {}
isOptional: true
parameterType: STRUCT
explanation_parameters:
defaultValue: {}
isOptional: true
parameterType: STRUCT
gcs_destination_output_uri_prefix:
defaultValue: ''
isOptional: true
parameterType: STRING
gcs_source_uris:
defaultValue: []
isOptional: true
parameterType: LIST
generate_explanation:
defaultValue: false
isOptional: true
parameterType: BOOLEAN
instances_format:
defaultValue: jsonl
isOptional: true
parameterType: STRING
job_display_name:
parameterType: STRING
labels:
defaultValue: {}
isOptional: true
parameterType: STRUCT
location:
defaultValue: us-central1
isOptional: true
parameterType: STRING
machine_type:
defaultValue: ''
isOptional: true
parameterType: STRING
manual_batch_tuning_parameters_batch_size:
defaultValue: 0.0
isOptional: true
parameterType: NUMBER_INTEGER
max_replica_count:
defaultValue: 0.0
isOptional: true
parameterType: NUMBER_INTEGER
model_parameters:
defaultValue: {}
isOptional: true
parameterType: STRUCT
predictions_format:
defaultValue: jsonl
isOptional: true
parameterType: STRING
project:
parameterType: STRING
starting_replica_count:
defaultValue: 0.0
isOptional: true
parameterType: NUMBER_INTEGER
outputDefinitions:
artifacts:
batchpredictionjob:
artifactType:
schemaTitle: google.VertexBatchPredictionJob
schemaVersion: 0.0.1
bigquery_output_table:
artifactType:
schemaTitle: google.BQTable
schemaVersion: 0.0.1
gcs_output_directory:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
parameters:
gcp_resources:
parameterType: STRING
comp-model-batch-predict:
executorLabel: exec-model-batch-predict
inputDefinitions:
artifacts:
model:
artifactType:
schemaTitle: google.VertexModel
schemaVersion: 0.0.1
description: 'The Model used to get predictions via this job. Must share
the same
ancestor Location. Starting this job has no impact on any existing
deployments of the Model and their resources. Either this or
`unmanaged_container_model` must be specified.'
isOptional: true
unmanaged_container_model:
artifactType:
schemaTitle: google.UnmanagedContainerModel
schemaVersion: 0.0.1
description: 'The unmanaged container model used to get predictions via
this job.
This should be used for models that are not uploaded to Vertex. Either
this or model must be specified.'
isOptional: true
parameters:
accelerator_count:
defaultValue: 0.0
description: 'The number of accelerators to attach
to the `machine_type`. Only used if `machine_type` is set. For more
details about the machine spec, see
https://cloud.google.com/vertex-ai/docs/reference/rest/v1/MachineSpec'
isOptional: true
parameterType: NUMBER_INTEGER
accelerator_type:
defaultValue: ''
description: 'The type of accelerator(s) that may be
attached to the machine as per `accelerator_count`. Only used if
`machine_type` is set. For more details about the machine spec, see
https://cloud.google.com/vertex-ai/docs/reference/rest/v1/MachineSpec'
isOptional: true
parameterType: STRING
bigquery_destination_output_uri:
defaultValue: ''
description: 'The BigQuery project location where the output is to be written
to. In
the given project a new dataset is created with name
`prediction_<model-display-name>_<job-create-time>` where is made
BigQuery-dataset-name compatible (for example, most special characters
become underscores), and timestamp is in YYYY_MM_DDThh_mm_ss_sssZ
"based on ISO-8601" format. In the dataset two tables will be created,
`predictions`, and `errors`. If the Model has both `instance`
and `prediction` schemata defined then the tables have columns as
follows: The `predictions` table contains instances for which the
prediction succeeded, it has columns as per a concatenation of the
Model''s instance and prediction schemata. The `errors` table
contains rows for which the prediction has failed, it has instance
columns, as per the instance schema, followed by a single "errors"
column, which as values has [google.rpc.Status](Status)
represented as a STRUCT, and containing only `code` and
`message`. For more details about this output config, see
https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#OutputConfig.'
isOptional: true
parameterType: STRING
bigquery_source_input_uri:
defaultValue: ''
description: 'BigQuery URI to a table, up to 2000 characters long. For example:
`projectId.bqDatasetId.bqTableId` For more details about this input
config, see
https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#InputConfig.'
isOptional: true
parameterType: STRING
encryption_spec_key_name:
defaultValue: ''
description: 'Customer-managed encryption
key options for a BatchPredictionJob. If this is set, then all
resources created by the BatchPredictionJob will be encrypted with the
provided encryption key. Has the form:
`projects/my-project/locations/my-location/keyRings/my-kr/cryptoKeys/my-key`.
The key needs to be in the same region as where the compute resource
is created.'
isOptional: true
parameterType: STRING
excluded_fields:
defaultValue: []
description: 'Fields that will be excluded in the prediction instance that
is
sent to the Model.
Excluded will be attached to the batch prediction output if
key_field is not specified.
When `excluded_fields` is populated, `included_fields` must be empty.
The input must be JSONL with objects at each line, CSV, BigQuery
or TfRecord.
may be specified via the Model''s `parameters_schema_uri`.'
isOptional: true
parameterType: LIST
explanation_metadata:
defaultValue: {}
description: 'Explanation metadata
configuration for this BatchPredictionJob. Can be specified only if
`generate_explanation` is set to `True`. This value overrides the
value of `Model.explanation_metadata`. All fields of
`explanation_metadata` are optional in the request. If a field of the
`explanation_metadata` object is not populated, the corresponding
field of the `Model.explanation_metadata` object is inherited. For
more details, see
https://cloud.google.com/vertex-ai/docs/reference/rest/v1/ExplanationSpec#explanationmetadata.'
isOptional: true
parameterType: STRUCT
explanation_parameters:
defaultValue: {}
description: 'Parameters to configure
explaining for Model''s predictions. Can be specified only if
`generate_explanation` is set to `True`. This value overrides the
value of `Model.explanation_parameters`. All fields of
`explanation_parameters` are optional in the request. If a field of
the `explanation_parameters` object is not populated, the
corresponding field of the `Model.explanation_parameters` object is
inherited. For more details, see
https://cloud.google.com/vertex-ai/docs/reference/rest/v1/ExplanationSpec#ExplanationParameters.'
isOptional: true
parameterType: STRUCT
gcs_destination_output_uri_prefix:
defaultValue: ''
description: 'The Google Cloud
Storage location of the directory where the output is to be written
to. In the given directory a new directory is created. Its name is
`prediction-<model-display-name>-<job-create-time>`, where timestamp
is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format. Inside of it files
`predictions_0001.<extension>`, `predictions_0002.<extension>`,
..., `predictions_N.<extension>` are created where `<extension>`
depends on chosen `predictions_format`, and N may equal 0001 and
depends on the total number of successfully predicted instances. If
the Model has both `instance` and `prediction` schemata defined
then each such file contains predictions as per the
`predictions_format`. If prediction for any instance failed
(partially or completely), then an additional
`errors_0001.<extension>`, `errors_0002.<extension>`,...,
`errors_N.<extension>` files are created (N depends on total number
of failed predictions). These files contain the failed instances, as
per their schema, followed by an additional `error` field which as
value has `google.rpc.Status` containing only `code` and
`message` fields. For more details about this output config, see
https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#OutputConfig.'
isOptional: true
parameterType: STRING
gcs_source_uris:
defaultValue: []
description: 'Google Cloud Storage URI(-s) to your instances to run batch
prediction
on. They must match `instances_format`. May contain wildcards. For more
information on wildcards, see [WildcardNames](https://cloud.google.com/storage/docs/gsutil/addlhelp/WildcardNames).
For more details about this input config, see [InputConfig](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#InputConfig).'
isOptional: true
parameterType: LIST
generate_explanation:
defaultValue: false
description: 'Generate explanation along with
the batch prediction results. This will cause the batch prediction
output to include explanations based on the `prediction_format`: -
`bigquery`: output includes a column named `explanation`. The value is
a struct that conforms to the [aiplatform.gapic.Explanation] object. -
`jsonl`: The JSON objects on each line include an additional entry
keyed `explanation`. The value of the entry is a JSON object that
conforms to the [aiplatform.gapic.Explanation] object. - `csv`:
Generating explanations for CSV format is not supported. If this
field is set to true, either the Model.explanation_spec or
explanation_metadata and explanation_parameters must be populated.'
isOptional: true
parameterType: BOOLEAN
included_fields:
defaultValue: []
description: 'Fields that will be included in the prediction instance that
is
sent to the Model.
If `instance_type` is `array`, the order of field names in
`included_fields` also determines the order of the values in the array.
When `included_fields` is populated, `excluded_fields` must be empty.
The input must be JSONL with objects at each line, CSV, BigQuery
or TfRecord.'
isOptional: true
parameterType: LIST
instance_type:
defaultValue: ''
description: "The format of the instance that the Model\naccepts. Vertex\
\ AI will convert compatible\n[InstancesFormat](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#InputConfig)\n\
to the specified format. Supported values are:\n`object`: Each input is\
\ converted to JSON object format.\n * For `bigquery`, each row is converted\
\ to an object.\n * For `jsonl`, each line of the JSONL input must be\
\ an object.\n * Does not apply to `csv`, `file-list`, `tf-record`, or\
\ `tf-record-gzip`.\n`array`: Each input is converted to JSON array format.\n\
\ * For `bigquery`, each row is converted to an array. The order\n \
\ of columns is determined by the BigQuery column order, unless\n \
\ [included_fields](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#InputConfig)\
\ is populated.\n `included_fields` must be populated for specifying\
\ field orders.\n * For `jsonl`, if each line of the JSONL input is an\
\ object,\n `included_fields` must be populated for specifying field\
\ orders.\n * Does not apply to `csv`, `file-list`, `tf-record`, or\n\
\ `tf-record-gzip`.\nIf not specified, Vertex AI converts the batch\
\ prediction input as\nfollows:\n * For `bigquery` and `csv`, the behavior\
\ is the same as `array`. The\n order of columns is the same as defined\
\ in the file or table, unless\n included_fields is populated.\n * For\
\ `jsonl`, the prediction instance format is determined by\n each line\
\ of the input.\n * For `tf-record`/`tf-record-gzip`, each record will\
\ be converted to\n an object in the format of `{\"b64\": <value>}`,\
\ where `<value>` is\n the Base64-encoded string of the content of the\
\ record.\n * For `file-list`, each file in the list will be converted\
\ to an\n object in the format of `{\"b64\": <value>}`, where `<value>`\
\ is\n the Base64-encoded string of the content of the file."
isOptional: true
parameterType: STRING
instances_format:
defaultValue: jsonl
description: 'The format in which instances are
given, must be one of the [Model](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.models)''s
supportedInputStorageFormats.
For more details about this input config, see
[InputConfig](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#InputConfig.)'
isOptional: true
parameterType: STRING
job_display_name:
description: The user-defined name of this BatchPredictionJob.
parameterType: STRING
key_field:
defaultValue: ''
description: "The name of the field that is considered as a key.\nThe values\
\ identified by the key field is not included in the\ntransformed instances\
\ that is sent to the Model. This is similar to\nspecifying this name\
\ of the field in [excluded_fields](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#InputConfig).\
\ In addition,\nthe batch prediction output will not include the instances.\
\ Instead the\noutput will only include the value of the key field, in\
\ a field named\n`key` in the output:\n * For `jsonl` output format, the\
\ output will have a `key` field\n instead of the `instance` field.\n\
\ * For `csv`/`bigquery` output format, the output will have have a `key`\n\
\ column instead of the instance feature columns.\nThe input must be\
\ JSONL with objects at each line, CSV, BigQuery\nor TfRecord."
isOptional: true
parameterType: STRING
labels:
defaultValue: {}
description: 'The labels with user-defined metadata to
organize your BatchPredictionJobs. Label keys and values can be no
longer than 64 characters (Unicode codepoints), can only contain
lowercase letters, numeric characters, underscores and dashes.
International characters are allowed. See https://goo.gl/xmQnxf for
more information and examples of labels.'
isOptional: true
parameterType: STRUCT
location:
defaultValue: us-central1
description: Location for creating the BatchPredictionJob.
isOptional: true
parameterType: STRING
machine_type:
defaultValue: ''
description: 'The type of machine for running batch
prediction on dedicated resources. If the Model supports
DEDICATED_RESOURCES this config may be provided (and the job will use
these resources). If the Model doesn''t support AUTOMATIC_RESOURCES,
this config must be provided. For more details about the
BatchDedicatedResources, see
https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#BatchDedicatedResources.
For more details about the machine spec, see
https://cloud.google.com/vertex-ai/docs/reference/rest/v1/MachineSpec'
isOptional: true
parameterType: STRING
manual_batch_tuning_parameters_batch_size:
defaultValue: 0.0
description: 'The number of
the records (e.g. instances) of the operation given in each batch to a
machine replica. Machine type, and size of a single record should be
considered when setting this parameter, higher value speeds up the
batch operation''s execution, but too high value will result in a whole
batch not fitting in a machine''s memory, and the whole operation will
fail.'
isOptional: true
parameterType: NUMBER_INTEGER
max_replica_count:
defaultValue: 0.0
description: 'The maximum number of machine replicas the batch operation
may be scaled
to. Only used if `machine_type` is set.'
isOptional: true
parameterType: NUMBER_INTEGER
model_parameters:
defaultValue: {}
description: The parameters that govern the predictions. The schema of the
parameters
isOptional: true
parameterType: STRUCT
predictions_format:
defaultValue: jsonl
description: 'The format in which Vertex AI gives the predictions. Must
be one of the
Model''s supportedOutputStorageFormats.
For more details about this output config, see [OutputConfig](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#OutputConfig).'
isOptional: true
parameterType: STRING
project:
defaultValue: '{{$.pipeline_google_cloud_project_id}}'
description: Project to create the BatchPredictionJob. Defaults to the project
in which the PipelineJob is run.
isOptional: true
parameterType: STRING
starting_replica_count:
defaultValue: 0.0
description: 'The number of machine replicas
used at the start of the batch operation. If not set, Vertex AI
decides starting number, not greater than `max_replica_count`. Only
used if `machine_type` is set.'
isOptional: true
parameterType: NUMBER_INTEGER
outputDefinitions:
artifacts:
batchpredictionjob:
artifactType:
schemaTitle: google.VertexBatchPredictionJob
schemaVersion: 0.0.1
description: '[**Deprecated. Use gcs_output_directory and bigquery_output_table
instead.**] Artifact
representation of the created batch prediction job.'
bigquery_output_table:
artifactType:
schemaTitle: google.BQTable
schemaVersion: 0.0.1
description: 'Artifact tracking the batch prediction job output. This is
only
available if
bigquery_output_table is specified.'
gcs_output_directory:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
description: 'Artifact tracking the batch prediction job output. This is
only
available if
gcs_destination_output_uri_prefix is specified.'
parameters:
gcp_resources:
description: 'Serialized gcp_resources proto tracking the batch prediction
job.
For more details, see
https://github.com/kubeflow/pipelines/blob/master/components/google-cloud/google_cloud_pipeline_components/proto/README.md.'
parameterType: STRING
comp-model-batch-predict-2:
executorLabel: exec-model-batch-predict-2
inputDefinitions:
artifacts:
model:
artifactType:
schemaTitle: google.VertexModel
schemaVersion: 0.0.1
description: 'The Model used to get predictions via this job. Must share
the same
ancestor Location. Starting this job has no impact on any existing
deployments of the Model and their resources. Either this or
`unmanaged_container_model` must be specified.'
isOptional: true
unmanaged_container_model:
artifactType:
schemaTitle: google.UnmanagedContainerModel
schemaVersion: 0.0.1
description: 'The unmanaged container model used to get predictions via
this job.
This should be used for models that are not uploaded to Vertex. Either
this or model must be specified.'
isOptional: true
parameters:
accelerator_count:
defaultValue: 0.0
description: 'The number of accelerators to attach
to the `machine_type`. Only used if `machine_type` is set. For more
details about the machine spec, see
https://cloud.google.com/vertex-ai/docs/reference/rest/v1/MachineSpec'
isOptional: true
parameterType: NUMBER_INTEGER
accelerator_type:
defaultValue: ''
description: 'The type of accelerator(s) that may be
attached to the machine as per `accelerator_count`. Only used if
`machine_type` is set. For more details about the machine spec, see
https://cloud.google.com/vertex-ai/docs/reference/rest/v1/MachineSpec'
isOptional: true
parameterType: STRING
bigquery_destination_output_uri:
defaultValue: ''
description: 'The BigQuery project location where the output is to be written
to. In
the given project a new dataset is created with name
`prediction_<model-display-name>_<job-create-time>` where is made
BigQuery-dataset-name compatible (for example, most special characters
become underscores), and timestamp is in YYYY_MM_DDThh_mm_ss_sssZ
"based on ISO-8601" format. In the dataset two tables will be created,
`predictions`, and `errors`. If the Model has both `instance`
and `prediction` schemata defined then the tables have columns as
follows: The `predictions` table contains instances for which the
prediction succeeded, it has columns as per a concatenation of the
Model''s instance and prediction schemata. The `errors` table
contains rows for which the prediction has failed, it has instance
columns, as per the instance schema, followed by a single "errors"
column, which as values has [google.rpc.Status](Status)
represented as a STRUCT, and containing only `code` and
`message`. For more details about this output config, see
https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#OutputConfig.'
isOptional: true
parameterType: STRING
bigquery_source_input_uri:
defaultValue: ''
description: 'BigQuery URI to a table, up to 2000 characters long. For example:
`projectId.bqDatasetId.bqTableId` For more details about this input
config, see
https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#InputConfig.'
isOptional: true
parameterType: STRING
encryption_spec_key_name:
defaultValue: ''
description: 'Customer-managed encryption
key options for a BatchPredictionJob. If this is set, then all
resources created by the BatchPredictionJob will be encrypted with the
provided encryption key. Has the form:
`projects/my-project/locations/my-location/keyRings/my-kr/cryptoKeys/my-key`.
The key needs to be in the same region as where the compute resource
is created.'
isOptional: true
parameterType: STRING
excluded_fields:
defaultValue: []
description: 'Fields that will be excluded in the prediction instance that
is
sent to the Model.
Excluded will be attached to the batch prediction output if
key_field is not specified.
When `excluded_fields` is populated, `included_fields` must be empty.
The input must be JSONL with objects at each line, CSV, BigQuery
or TfRecord.
may be specified via the Model''s `parameters_schema_uri`.'
isOptional: true
parameterType: LIST
explanation_metadata:
defaultValue: {}
description: 'Explanation metadata
configuration for this BatchPredictionJob. Can be specified only if
`generate_explanation` is set to `True`. This value overrides the
value of `Model.explanation_metadata`. All fields of
`explanation_metadata` are optional in the request. If a field of the
`explanation_metadata` object is not populated, the corresponding
field of the `Model.explanation_metadata` object is inherited. For
more details, see
https://cloud.google.com/vertex-ai/docs/reference/rest/v1/ExplanationSpec#explanationmetadata.'
isOptional: true
parameterType: STRUCT
explanation_parameters:
defaultValue: {}
description: 'Parameters to configure
explaining for Model''s predictions. Can be specified only if
`generate_explanation` is set to `True`. This value overrides the
value of `Model.explanation_parameters`. All fields of
`explanation_parameters` are optional in the request. If a field of
the `explanation_parameters` object is not populated, the
corresponding field of the `Model.explanation_parameters` object is
inherited. For more details, see
https://cloud.google.com/vertex-ai/docs/reference/rest/v1/ExplanationSpec#ExplanationParameters.'
isOptional: true
parameterType: STRUCT
gcs_destination_output_uri_prefix:
defaultValue: ''
description: 'The Google Cloud
Storage location of the directory where the output is to be written
to. In the given directory a new directory is created. Its name is
`prediction-<model-display-name>-<job-create-time>`, where timestamp
is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format. Inside of it files
`predictions_0001.<extension>`, `predictions_0002.<extension>`,
..., `predictions_N.<extension>` are created where `<extension>`
depends on chosen `predictions_format`, and N may equal 0001 and
depends on the total number of successfully predicted instances. If
the Model has both `instance` and `prediction` schemata defined
then each such file contains predictions as per the
`predictions_format`. If prediction for any instance failed
(partially or completely), then an additional
`errors_0001.<extension>`, `errors_0002.<extension>`,...,
`errors_N.<extension>` files are created (N depends on total number
of failed predictions). These files contain the failed instances, as
per their schema, followed by an additional `error` field which as
value has `google.rpc.Status` containing only `code` and
`message` fields. For more details about this output config, see
https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#OutputConfig.'
isOptional: true
parameterType: STRING
gcs_source_uris:
defaultValue: []
description: 'Google Cloud Storage URI(-s) to your instances to run batch
prediction
on. They must match `instances_format`. May contain wildcards. For more
information on wildcards, see [WildcardNames](https://cloud.google.com/storage/docs/gsutil/addlhelp/WildcardNames).
For more details about this input config, see [InputConfig](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#InputConfig).'
isOptional: true
parameterType: LIST
generate_explanation:
defaultValue: false
description: 'Generate explanation along with
the batch prediction results. This will cause the batch prediction
output to include explanations based on the `prediction_format`: -
`bigquery`: output includes a column named `explanation`. The value is
a struct that conforms to the [aiplatform.gapic.Explanation] object. -
`jsonl`: The JSON objects on each line include an additional entry
keyed `explanation`. The value of the entry is a JSON object that
conforms to the [aiplatform.gapic.Explanation] object. - `csv`:
Generating explanations for CSV format is not supported. If this
field is set to true, either the Model.explanation_spec or
explanation_metadata and explanation_parameters must be populated.'
isOptional: true
parameterType: BOOLEAN
included_fields:
defaultValue: []
description: 'Fields that will be included in the prediction instance that
is
sent to the Model.
If `instance_type` is `array`, the order of field names in
`included_fields` also determines the order of the values in the array.
When `included_fields` is populated, `excluded_fields` must be empty.
The input must be JSONL with objects at each line, CSV, BigQuery
or TfRecord.'
isOptional: true
parameterType: LIST
instance_type:
defaultValue: ''
description: "The format of the instance that the Model\naccepts. Vertex\
\ AI will convert compatible\n[InstancesFormat](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#InputConfig)\n\
to the specified format. Supported values are:\n`object`: Each input is\
\ converted to JSON object format.\n * For `bigquery`, each row is converted\
\ to an object.\n * For `jsonl`, each line of the JSONL input must be\
\ an object.\n * Does not apply to `csv`, `file-list`, `tf-record`, or\
\ `tf-record-gzip`.\n`array`: Each input is converted to JSON array format.\n\
\ * For `bigquery`, each row is converted to an array. The order\n \
\ of columns is determined by the BigQuery column order, unless\n \
\ [included_fields](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#InputConfig)\
\ is populated.\n `included_fields` must be populated for specifying\
\ field orders.\n * For `jsonl`, if each line of the JSONL input is an\
\ object,\n `included_fields` must be populated for specifying field\
\ orders.\n * Does not apply to `csv`, `file-list`, `tf-record`, or\n\
\ `tf-record-gzip`.\nIf not specified, Vertex AI converts the batch\
\ prediction input as\nfollows:\n * For `bigquery` and `csv`, the behavior\
\ is the same as `array`. The\n order of columns is the same as defined\
\ in the file or table, unless\n included_fields is populated.\n * For\
\ `jsonl`, the prediction instance format is determined by\n each line\
\ of the input.\n * For `tf-record`/`tf-record-gzip`, each record will\
\ be converted to\n an object in the format of `{\"b64\": <value>}`,\
\ where `<value>` is\n the Base64-encoded string of the content of the\
\ record.\n * For `file-list`, each file in the list will be converted\
\ to an\n object in the format of `{\"b64\": <value>}`, where `<value>`\
\ is\n the Base64-encoded string of the content of the file."
isOptional: true
parameterType: STRING
instances_format:
defaultValue: jsonl
description: 'The format in which instances are
given, must be one of the [Model](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.models)''s
supportedInputStorageFormats.
For more details about this input config, see
[InputConfig](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#InputConfig.)'
isOptional: true
parameterType: STRING
job_display_name:
description: The user-defined name of this BatchPredictionJob.
parameterType: STRING
key_field:
defaultValue: ''
description: "The name of the field that is considered as a key.\nThe values\
\ identified by the key field is not included in the\ntransformed instances\
\ that is sent to the Model. This is similar to\nspecifying this name\
\ of the field in [excluded_fields](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#InputConfig).\
\ In addition,\nthe batch prediction output will not include the instances.\
\ Instead the\noutput will only include the value of the key field, in\
\ a field named\n`key` in the output:\n * For `jsonl` output format, the\
\ output will have a `key` field\n instead of the `instance` field.\n\
\ * For `csv`/`bigquery` output format, the output will have have a `key`\n\
\ column instead of the instance feature columns.\nThe input must be\
\ JSONL with objects at each line, CSV, BigQuery\nor TfRecord."
isOptional: true
parameterType: STRING
labels:
defaultValue: {}
description: 'The labels with user-defined metadata to
organize your BatchPredictionJobs. Label keys and values can be no
longer than 64 characters (Unicode codepoints), can only contain
lowercase letters, numeric characters, underscores and dashes.
International characters are allowed. See https://goo.gl/xmQnxf for
more information and examples of labels.'
isOptional: true
parameterType: STRUCT
location:
defaultValue: us-central1
description: Location for creating the BatchPredictionJob.
isOptional: true
parameterType: STRING
machine_type:
defaultValue: ''
description: 'The type of machine for running batch
prediction on dedicated resources. If the Model supports
DEDICATED_RESOURCES this config may be provided (and the job will use
these resources). If the Model doesn''t support AUTOMATIC_RESOURCES,
this config must be provided. For more details about the
BatchDedicatedResources, see
https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#BatchDedicatedResources.
For more details about the machine spec, see
https://cloud.google.com/vertex-ai/docs/reference/rest/v1/MachineSpec'
isOptional: true
parameterType: STRING
manual_batch_tuning_parameters_batch_size:
defaultValue: 0.0
description: 'The number of
the records (e.g. instances) of the operation given in each batch to a
machine replica. Machine type, and size of a single record should be
considered when setting this parameter, higher value speeds up the
batch operation''s execution, but too high value will result in a whole
batch not fitting in a machine''s memory, and the whole operation will
fail.'
isOptional: true
parameterType: NUMBER_INTEGER
max_replica_count:
defaultValue: 0.0
description: 'The maximum number of machine replicas the batch operation
may be scaled
to. Only used if `machine_type` is set.'
isOptional: true
parameterType: NUMBER_INTEGER
model_parameters:
defaultValue: {}
description: The parameters that govern the predictions. The schema of the
parameters
isOptional: true
parameterType: STRUCT
predictions_format:
defaultValue: jsonl
description: 'The format in which Vertex AI gives the predictions. Must
be one of the
Model''s supportedOutputStorageFormats.
For more details about this output config, see [OutputConfig](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#OutputConfig).'
isOptional: true
parameterType: STRING
project:
defaultValue: '{{$.pipeline_google_cloud_project_id}}'
description: Project to create the BatchPredictionJob. Defaults to the project
in which the PipelineJob is run.
isOptional: true
parameterType: STRING
starting_replica_count:
defaultValue: 0.0
description: 'The number of machine replicas
used at the start of the batch operation. If not set, Vertex AI
decides starting number, not greater than `max_replica_count`. Only
used if `machine_type` is set.'
isOptional: true
parameterType: NUMBER_INTEGER
outputDefinitions:
artifacts:
batchpredictionjob:
artifactType:
schemaTitle: google.VertexBatchPredictionJob
schemaVersion: 0.0.1
description: '[**Deprecated. Use gcs_output_directory and bigquery_output_table
instead.**] Artifact
representation of the created batch prediction job.'
bigquery_output_table:
artifactType:
schemaTitle: google.BQTable
schemaVersion: 0.0.1
description: 'Artifact tracking the batch prediction job output. This is
only
available if
bigquery_output_table is specified.'
gcs_output_directory:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
description: 'Artifact tracking the batch prediction job output. This is
only
available if
gcs_destination_output_uri_prefix is specified.'
parameters:
gcp_resources:
description: 'Serialized gcp_resources proto tracking the batch prediction
job.
For more details, see
https://github.com/kubeflow/pipelines/blob/master/components/google-cloud/google_cloud_pipeline_components/proto/README.md.'
parameterType: STRING
comp-model-batch-predict-3:
executorLabel: exec-model-batch-predict-3
inputDefinitions:
artifacts:
model:
artifactType:
schemaTitle: google.VertexModel
schemaVersion: 0.0.1
description: 'The Model used to get predictions via this job. Must share
the same
ancestor Location. Starting this job has no impact on any existing
deployments of the Model and their resources. Either this or
`unmanaged_container_model` must be specified.'
isOptional: true
unmanaged_container_model:
artifactType:
schemaTitle: google.UnmanagedContainerModel
schemaVersion: 0.0.1
description: 'The unmanaged container model used to get predictions via
this job.
This should be used for models that are not uploaded to Vertex. Either
this or model must be specified.'
isOptional: true
parameters:
accelerator_count:
defaultValue: 0.0
description: 'The number of accelerators to attach
to the `machine_type`. Only used if `machine_type` is set. For more
details about the machine spec, see
https://cloud.google.com/vertex-ai/docs/reference/rest/v1/MachineSpec'
isOptional: true
parameterType: NUMBER_INTEGER
accelerator_type:
defaultValue: ''
description: 'The type of accelerator(s) that may be
attached to the machine as per `accelerator_count`. Only used if
`machine_type` is set. For more details about the machine spec, see
https://cloud.google.com/vertex-ai/docs/reference/rest/v1/MachineSpec'
isOptional: true
parameterType: STRING
bigquery_destination_output_uri:
defaultValue: ''
description: 'The BigQuery project location where the output is to be written
to. In
the given project a new dataset is created with name
`prediction_<model-display-name>_<job-create-time>` where is made
BigQuery-dataset-name compatible (for example, most special characters
become underscores), and timestamp is in YYYY_MM_DDThh_mm_ss_sssZ
"based on ISO-8601" format. In the dataset two tables will be created,
`predictions`, and `errors`. If the Model has both `instance`
and `prediction` schemata defined then the tables have columns as
follows: The `predictions` table contains instances for which the
prediction succeeded, it has columns as per a concatenation of the
Model''s instance and prediction schemata. The `errors` table
contains rows for which the prediction has failed, it has instance
columns, as per the instance schema, followed by a single "errors"
column, which as values has [google.rpc.Status](Status)
represented as a STRUCT, and containing only `code` and
`message`. For more details about this output config, see
https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#OutputConfig.'
isOptional: true
parameterType: STRING
bigquery_source_input_uri:
defaultValue: ''
description: 'BigQuery URI to a table, up to 2000 characters long. For example:
`projectId.bqDatasetId.bqTableId` For more details about this input
config, see
https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#InputConfig.'
isOptional: true
parameterType: STRING
encryption_spec_key_name:
defaultValue: ''
description: 'Customer-managed encryption
key options for a BatchPredictionJob. If this is set, then all
resources created by the BatchPredictionJob will be encrypted with the
provided encryption key. Has the form:
`projects/my-project/locations/my-location/keyRings/my-kr/cryptoKeys/my-key`.
The key needs to be in the same region as where the compute resource
is created.'
isOptional: true
parameterType: STRING
excluded_fields:
defaultValue: []
description: 'Fields that will be excluded in the prediction instance that
is
sent to the Model.
Excluded will be attached to the batch prediction output if
key_field is not specified.
When `excluded_fields` is populated, `included_fields` must be empty.
The input must be JSONL with objects at each line, CSV, BigQuery
or TfRecord.
may be specified via the Model''s `parameters_schema_uri`.'
isOptional: true
parameterType: LIST
explanation_metadata:
defaultValue: {}
description: 'Explanation metadata
configuration for this BatchPredictionJob. Can be specified only if
`generate_explanation` is set to `True`. This value overrides the
value of `Model.explanation_metadata`. All fields of
`explanation_metadata` are optional in the request. If a field of the
`explanation_metadata` object is not populated, the corresponding
field of the `Model.explanation_metadata` object is inherited. For
more details, see
https://cloud.google.com/vertex-ai/docs/reference/rest/v1/ExplanationSpec#explanationmetadata.'
isOptional: true
parameterType: STRUCT
explanation_parameters:
defaultValue: {}
description: 'Parameters to configure
explaining for Model''s predictions. Can be specified only if
`generate_explanation` is set to `True`. This value overrides the
value of `Model.explanation_parameters`. All fields of
`explanation_parameters` are optional in the request. If a field of
the `explanation_parameters` object is not populated, the
corresponding field of the `Model.explanation_parameters` object is
inherited. For more details, see
https://cloud.google.com/vertex-ai/docs/reference/rest/v1/ExplanationSpec#ExplanationParameters.'
isOptional: true
parameterType: STRUCT
gcs_destination_output_uri_prefix:
defaultValue: ''
description: 'The Google Cloud
Storage location of the directory where the output is to be written
to. In the given directory a new directory is created. Its name is
`prediction-<model-display-name>-<job-create-time>`, where timestamp
is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format. Inside of it files
`predictions_0001.<extension>`, `predictions_0002.<extension>`,
..., `predictions_N.<extension>` are created where `<extension>`
depends on chosen `predictions_format`, and N may equal 0001 and
depends on the total number of successfully predicted instances. If
the Model has both `instance` and `prediction` schemata defined
then each such file contains predictions as per the
`predictions_format`. If prediction for any instance failed
(partially or completely), then an additional
`errors_0001.<extension>`, `errors_0002.<extension>`,...,
`errors_N.<extension>` files are created (N depends on total number
of failed predictions). These files contain the failed instances, as
per their schema, followed by an additional `error` field which as
value has `google.rpc.Status` containing only `code` and
`message` fields. For more details about this output config, see
https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#OutputConfig.'
isOptional: true
parameterType: STRING
gcs_source_uris:
defaultValue: []
description: 'Google Cloud Storage URI(-s) to your instances to run batch
prediction
on. They must match `instances_format`. May contain wildcards. For more
information on wildcards, see [WildcardNames](https://cloud.google.com/storage/docs/gsutil/addlhelp/WildcardNames).
For more details about this input config, see [InputConfig](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#InputConfig).'
isOptional: true
parameterType: LIST
generate_explanation:
defaultValue: false
description: 'Generate explanation along with
the batch prediction results. This will cause the batch prediction
output to include explanations based on the `prediction_format`: -
`bigquery`: output includes a column named `explanation`. The value is
a struct that conforms to the [aiplatform.gapic.Explanation] object. -
`jsonl`: The JSON objects on each line include an additional entry
keyed `explanation`. The value of the entry is a JSON object that
conforms to the [aiplatform.gapic.Explanation] object. - `csv`:
Generating explanations for CSV format is not supported. If this
field is set to true, either the Model.explanation_spec or
explanation_metadata and explanation_parameters must be populated.'
isOptional: true
parameterType: BOOLEAN
included_fields:
defaultValue: []
description: 'Fields that will be included in the prediction instance that
is
sent to the Model.
If `instance_type` is `array`, the order of field names in
`included_fields` also determines the order of the values in the array.
When `included_fields` is populated, `excluded_fields` must be empty.
The input must be JSONL with objects at each line, CSV, BigQuery
or TfRecord.'
isOptional: true
parameterType: LIST
instance_type:
defaultValue: ''
description: "The format of the instance that the Model\naccepts. Vertex\
\ AI will convert compatible\n[InstancesFormat](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#InputConfig)\n\
to the specified format. Supported values are:\n`object`: Each input is\
\ converted to JSON object format.\n * For `bigquery`, each row is converted\
\ to an object.\n * For `jsonl`, each line of the JSONL input must be\
\ an object.\n * Does not apply to `csv`, `file-list`, `tf-record`, or\
\ `tf-record-gzip`.\n`array`: Each input is converted to JSON array format.\n\
\ * For `bigquery`, each row is converted to an array. The order\n \
\ of columns is determined by the BigQuery column order, unless\n \
\ [included_fields](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#InputConfig)\
\ is populated.\n `included_fields` must be populated for specifying\
\ field orders.\n * For `jsonl`, if each line of the JSONL input is an\
\ object,\n `included_fields` must be populated for specifying field\
\ orders.\n * Does not apply to `csv`, `file-list`, `tf-record`, or\n\
\ `tf-record-gzip`.\nIf not specified, Vertex AI converts the batch\
\ prediction input as\nfollows:\n * For `bigquery` and `csv`, the behavior\
\ is the same as `array`. The\n order of columns is the same as defined\
\ in the file or table, unless\n included_fields is populated.\n * For\
\ `jsonl`, the prediction instance format is determined by\n each line\
\ of the input.\n * For `tf-record`/`tf-record-gzip`, each record will\
\ be converted to\n an object in the format of `{\"b64\": <value>}`,\
\ where `<value>` is\n the Base64-encoded string of the content of the\
\ record.\n * For `file-list`, each file in the list will be converted\
\ to an\n object in the format of `{\"b64\": <value>}`, where `<value>`\
\ is\n the Base64-encoded string of the content of the file."
isOptional: true
parameterType: STRING
instances_format:
defaultValue: jsonl
description: 'The format in which instances are
given, must be one of the [Model](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.models)''s
supportedInputStorageFormats.
For more details about this input config, see
[InputConfig](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#InputConfig.)'
isOptional: true
parameterType: STRING
job_display_name:
description: The user-defined name of this BatchPredictionJob.
parameterType: STRING
key_field:
defaultValue: ''
description: "The name of the field that is considered as a key.\nThe values\
\ identified by the key field is not included in the\ntransformed instances\
\ that is sent to the Model. This is similar to\nspecifying this name\
\ of the field in [excluded_fields](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#InputConfig).\
\ In addition,\nthe batch prediction output will not include the instances.\
\ Instead the\noutput will only include the value of the key field, in\
\ a field named\n`key` in the output:\n * For `jsonl` output format, the\
\ output will have a `key` field\n instead of the `instance` field.\n\
\ * For `csv`/`bigquery` output format, the output will have have a `key`\n\
\ column instead of the instance feature columns.\nThe input must be\
\ JSONL with objects at each line, CSV, BigQuery\nor TfRecord."
isOptional: true
parameterType: STRING
labels:
defaultValue: {}
description: 'The labels with user-defined metadata to
organize your BatchPredictionJobs. Label keys and values can be no
longer than 64 characters (Unicode codepoints), can only contain
lowercase letters, numeric characters, underscores and dashes.
International characters are allowed. See https://goo.gl/xmQnxf for
more information and examples of labels.'
isOptional: true
parameterType: STRUCT
location:
defaultValue: us-central1
description: Location for creating the BatchPredictionJob.
isOptional: true
parameterType: STRING
machine_type:
defaultValue: ''
description: 'The type of machine for running batch
prediction on dedicated resources. If the Model supports
DEDICATED_RESOURCES this config may be provided (and the job will use
these resources). If the Model doesn''t support AUTOMATIC_RESOURCES,
this config must be provided. For more details about the
BatchDedicatedResources, see
https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#BatchDedicatedResources.
For more details about the machine spec, see
https://cloud.google.com/vertex-ai/docs/reference/rest/v1/MachineSpec'
isOptional: true
parameterType: STRING
manual_batch_tuning_parameters_batch_size:
defaultValue: 0.0
description: 'The number of
the records (e.g. instances) of the operation given in each batch to a
machine replica. Machine type, and size of a single record should be
considered when setting this parameter, higher value speeds up the
batch operation''s execution, but too high value will result in a whole
batch not fitting in a machine''s memory, and the whole operation will
fail.'
isOptional: true
parameterType: NUMBER_INTEGER
max_replica_count:
defaultValue: 0.0
description: 'The maximum number of machine replicas the batch operation
may be scaled
to. Only used if `machine_type` is set.'
isOptional: true
parameterType: NUMBER_INTEGER
model_parameters:
defaultValue: {}
description: The parameters that govern the predictions. The schema of the
parameters
isOptional: true
parameterType: STRUCT
predictions_format:
defaultValue: jsonl
description: 'The format in which Vertex AI gives the predictions. Must
be one of the
Model''s supportedOutputStorageFormats.
For more details about this output config, see [OutputConfig](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#OutputConfig).'
isOptional: true
parameterType: STRING
project:
defaultValue: '{{$.pipeline_google_cloud_project_id}}'
description: Project to create the BatchPredictionJob. Defaults to the project
in which the PipelineJob is run.
isOptional: true
parameterType: STRING
starting_replica_count:
defaultValue: 0.0
description: 'The number of machine replicas
used at the start of the batch operation. If not set, Vertex AI
decides starting number, not greater than `max_replica_count`. Only
used if `machine_type` is set.'
isOptional: true
parameterType: NUMBER_INTEGER
outputDefinitions:
artifacts:
batchpredictionjob:
artifactType:
schemaTitle: google.VertexBatchPredictionJob
schemaVersion: 0.0.1
description: '[**Deprecated. Use gcs_output_directory and bigquery_output_table
instead.**] Artifact
representation of the created batch prediction job.'
bigquery_output_table:
artifactType:
schemaTitle: google.BQTable
schemaVersion: 0.0.1
description: 'Artifact tracking the batch prediction job output. This is
only
available if
bigquery_output_table is specified.'
gcs_output_directory:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
description: 'Artifact tracking the batch prediction job output. This is
only
available if
gcs_destination_output_uri_prefix is specified.'
parameters:
gcp_resources:
description: 'Serialized gcp_resources proto tracking the batch prediction
job.
For more details, see
https://github.com/kubeflow/pipelines/blob/master/components/google-cloud/google_cloud_pipeline_components/proto/README.md.'
parameterType: STRING
comp-model-batch-predict-4:
executorLabel: exec-model-batch-predict-4
inputDefinitions:
artifacts:
model:
artifactType:
schemaTitle: google.VertexModel
schemaVersion: 0.0.1
description: 'The Model used to get predictions via this job. Must share
the same
ancestor Location. Starting this job has no impact on any existing
deployments of the Model and their resources. Either this or
`unmanaged_container_model` must be specified.'
isOptional: true
unmanaged_container_model:
artifactType:
schemaTitle: google.UnmanagedContainerModel
schemaVersion: 0.0.1
description: 'The unmanaged container model used to get predictions via
this job.
This should be used for models that are not uploaded to Vertex. Either
this or model must be specified.'
isOptional: true
parameters:
accelerator_count:
defaultValue: 0.0
description: 'The number of accelerators to attach
to the `machine_type`. Only used if `machine_type` is set. For more
details about the machine spec, see
https://cloud.google.com/vertex-ai/docs/reference/rest/v1/MachineSpec'
isOptional: true
parameterType: NUMBER_INTEGER
accelerator_type:
defaultValue: ''
description: 'The type of accelerator(s) that may be
attached to the machine as per `accelerator_count`. Only used if
`machine_type` is set. For more details about the machine spec, see
https://cloud.google.com/vertex-ai/docs/reference/rest/v1/MachineSpec'
isOptional: true
parameterType: STRING
bigquery_destination_output_uri:
defaultValue: ''
description: 'The BigQuery project location where the output is to be written
to. In
the given project a new dataset is created with name
`prediction_<model-display-name>_<job-create-time>` where is made
BigQuery-dataset-name compatible (for example, most special characters
become underscores), and timestamp is in YYYY_MM_DDThh_mm_ss_sssZ
"based on ISO-8601" format. In the dataset two tables will be created,
`predictions`, and `errors`. If the Model has both `instance`
and `prediction` schemata defined then the tables have columns as
follows: The `predictions` table contains instances for which the
prediction succeeded, it has columns as per a concatenation of the
Model''s instance and prediction schemata. The `errors` table
contains rows for which the prediction has failed, it has instance
columns, as per the instance schema, followed by a single "errors"
column, which as values has [google.rpc.Status](Status)
represented as a STRUCT, and containing only `code` and
`message`. For more details about this output config, see
https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#OutputConfig.'
isOptional: true
parameterType: STRING
bigquery_source_input_uri:
defaultValue: ''
description: 'BigQuery URI to a table, up to 2000 characters long. For example:
`projectId.bqDatasetId.bqTableId` For more details about this input
config, see
https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#InputConfig.'
isOptional: true
parameterType: STRING
encryption_spec_key_name:
defaultValue: ''
description: 'Customer-managed encryption
key options for a BatchPredictionJob. If this is set, then all
resources created by the BatchPredictionJob will be encrypted with the
provided encryption key. Has the form:
`projects/my-project/locations/my-location/keyRings/my-kr/cryptoKeys/my-key`.
The key needs to be in the same region as where the compute resource
is created.'
isOptional: true
parameterType: STRING
excluded_fields:
defaultValue: []
description: 'Fields that will be excluded in the prediction instance that
is
sent to the Model.
Excluded will be attached to the batch prediction output if
key_field is not specified.
When `excluded_fields` is populated, `included_fields` must be empty.
The input must be JSONL with objects at each line, CSV, BigQuery
or TfRecord.
may be specified via the Model''s `parameters_schema_uri`.'
isOptional: true
parameterType: LIST
explanation_metadata:
defaultValue: {}
description: 'Explanation metadata
configuration for this BatchPredictionJob. Can be specified only if
`generate_explanation` is set to `True`. This value overrides the
value of `Model.explanation_metadata`. All fields of
`explanation_metadata` are optional in the request. If a field of the
`explanation_metadata` object is not populated, the corresponding
field of the `Model.explanation_metadata` object is inherited. For
more details, see
https://cloud.google.com/vertex-ai/docs/reference/rest/v1/ExplanationSpec#explanationmetadata.'
isOptional: true
parameterType: STRUCT
explanation_parameters:
defaultValue: {}
description: 'Parameters to configure
explaining for Model''s predictions. Can be specified only if
`generate_explanation` is set to `True`. This value overrides the
value of `Model.explanation_parameters`. All fields of
`explanation_parameters` are optional in the request. If a field of
the `explanation_parameters` object is not populated, the
corresponding field of the `Model.explanation_parameters` object is
inherited. For more details, see
https://cloud.google.com/vertex-ai/docs/reference/rest/v1/ExplanationSpec#ExplanationParameters.'
isOptional: true
parameterType: STRUCT
gcs_destination_output_uri_prefix:
defaultValue: ''
description: 'The Google Cloud
Storage location of the directory where the output is to be written
to. In the given directory a new directory is created. Its name is
`prediction-<model-display-name>-<job-create-time>`, where timestamp
is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format. Inside of it files
`predictions_0001.<extension>`, `predictions_0002.<extension>`,
..., `predictions_N.<extension>` are created where `<extension>`
depends on chosen `predictions_format`, and N may equal 0001 and
depends on the total number of successfully predicted instances. If
the Model has both `instance` and `prediction` schemata defined
then each such file contains predictions as per the
`predictions_format`. If prediction for any instance failed
(partially or completely), then an additional
`errors_0001.<extension>`, `errors_0002.<extension>`,...,
`errors_N.<extension>` files are created (N depends on total number
of failed predictions). These files contain the failed instances, as
per their schema, followed by an additional `error` field which as
value has `google.rpc.Status` containing only `code` and
`message` fields. For more details about this output config, see
https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#OutputConfig.'
isOptional: true
parameterType: STRING
gcs_source_uris:
defaultValue: []
description: 'Google Cloud Storage URI(-s) to your instances to run batch
prediction
on. They must match `instances_format`. May contain wildcards. For more
information on wildcards, see [WildcardNames](https://cloud.google.com/storage/docs/gsutil/addlhelp/WildcardNames).
For more details about this input config, see [InputConfig](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#InputConfig).'
isOptional: true
parameterType: LIST
generate_explanation:
defaultValue: false
description: 'Generate explanation along with
the batch prediction results. This will cause the batch prediction
output to include explanations based on the `prediction_format`: -
`bigquery`: output includes a column named `explanation`. The value is
a struct that conforms to the [aiplatform.gapic.Explanation] object. -
`jsonl`: The JSON objects on each line include an additional entry
keyed `explanation`. The value of the entry is a JSON object that
conforms to the [aiplatform.gapic.Explanation] object. - `csv`:
Generating explanations for CSV format is not supported. If this
field is set to true, either the Model.explanation_spec or
explanation_metadata and explanation_parameters must be populated.'
isOptional: true
parameterType: BOOLEAN
included_fields:
defaultValue: []
description: 'Fields that will be included in the prediction instance that
is
sent to the Model.
If `instance_type` is `array`, the order of field names in
`included_fields` also determines the order of the values in the array.
When `included_fields` is populated, `excluded_fields` must be empty.
The input must be JSONL with objects at each line, CSV, BigQuery
or TfRecord.'
isOptional: true
parameterType: LIST
instance_type:
defaultValue: ''
description: "The format of the instance that the Model\naccepts. Vertex\
\ AI will convert compatible\n[InstancesFormat](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#InputConfig)\n\
to the specified format. Supported values are:\n`object`: Each input is\
\ converted to JSON object format.\n * For `bigquery`, each row is converted\
\ to an object.\n * For `jsonl`, each line of the JSONL input must be\
\ an object.\n * Does not apply to `csv`, `file-list`, `tf-record`, or\
\ `tf-record-gzip`.\n`array`: Each input is converted to JSON array format.\n\
\ * For `bigquery`, each row is converted to an array. The order\n \
\ of columns is determined by the BigQuery column order, unless\n \
\ [included_fields](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#InputConfig)\
\ is populated.\n `included_fields` must be populated for specifying\
\ field orders.\n * For `jsonl`, if each line of the JSONL input is an\
\ object,\n `included_fields` must be populated for specifying field\
\ orders.\n * Does not apply to `csv`, `file-list`, `tf-record`, or\n\
\ `tf-record-gzip`.\nIf not specified, Vertex AI converts the batch\
\ prediction input as\nfollows:\n * For `bigquery` and `csv`, the behavior\
\ is the same as `array`. The\n order of columns is the same as defined\
\ in the file or table, unless\n included_fields is populated.\n * For\
\ `jsonl`, the prediction instance format is determined by\n each line\
\ of the input.\n * For `tf-record`/`tf-record-gzip`, each record will\
\ be converted to\n an object in the format of `{\"b64\": <value>}`,\
\ where `<value>` is\n the Base64-encoded string of the content of the\
\ record.\n * For `file-list`, each file in the list will be converted\
\ to an\n object in the format of `{\"b64\": <value>}`, where `<value>`\
\ is\n the Base64-encoded string of the content of the file."
isOptional: true
parameterType: STRING
instances_format:
defaultValue: jsonl
description: 'The format in which instances are
given, must be one of the [Model](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.models)''s
supportedInputStorageFormats.
For more details about this input config, see
[InputConfig](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#InputConfig.)'
isOptional: true
parameterType: STRING
job_display_name:
description: The user-defined name of this BatchPredictionJob.
parameterType: STRING
key_field:
defaultValue: ''
description: "The name of the field that is considered as a key.\nThe values\
\ identified by the key field is not included in the\ntransformed instances\
\ that is sent to the Model. This is similar to\nspecifying this name\
\ of the field in [excluded_fields](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#InputConfig).\
\ In addition,\nthe batch prediction output will not include the instances.\
\ Instead the\noutput will only include the value of the key field, in\
\ a field named\n`key` in the output:\n * For `jsonl` output format, the\
\ output will have a `key` field\n instead of the `instance` field.\n\
\ * For `csv`/`bigquery` output format, the output will have have a `key`\n\
\ column instead of the instance feature columns.\nThe input must be\
\ JSONL with objects at each line, CSV, BigQuery\nor TfRecord."
isOptional: true
parameterType: STRING
labels:
defaultValue: {}
description: 'The labels with user-defined metadata to
organize your BatchPredictionJobs. Label keys and values can be no
longer than 64 characters (Unicode codepoints), can only contain
lowercase letters, numeric characters, underscores and dashes.
International characters are allowed. See https://goo.gl/xmQnxf for
more information and examples of labels.'
isOptional: true
parameterType: STRUCT
location:
defaultValue: us-central1
description: Location for creating the BatchPredictionJob.
isOptional: true
parameterType: STRING
machine_type:
defaultValue: ''
description: 'The type of machine for running batch
prediction on dedicated resources. If the Model supports
DEDICATED_RESOURCES this config may be provided (and the job will use
these resources). If the Model doesn''t support AUTOMATIC_RESOURCES,
this config must be provided. For more details about the
BatchDedicatedResources, see
https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#BatchDedicatedResources.
For more details about the machine spec, see
https://cloud.google.com/vertex-ai/docs/reference/rest/v1/MachineSpec'
isOptional: true
parameterType: STRING
manual_batch_tuning_parameters_batch_size:
defaultValue: 0.0
description: 'The number of
the records (e.g. instances) of the operation given in each batch to a
machine replica. Machine type, and size of a single record should be
considered when setting this parameter, higher value speeds up the
batch operation''s execution, but too high value will result in a whole
batch not fitting in a machine''s memory, and the whole operation will
fail.'
isOptional: true
parameterType: NUMBER_INTEGER
max_replica_count:
defaultValue: 0.0
description: 'The maximum number of machine replicas the batch operation
may be scaled
to. Only used if `machine_type` is set.'
isOptional: true
parameterType: NUMBER_INTEGER
model_parameters:
defaultValue: {}
description: The parameters that govern the predictions. The schema of the
parameters
isOptional: true
parameterType: STRUCT
predictions_format:
defaultValue: jsonl
description: 'The format in which Vertex AI gives the predictions. Must
be one of the
Model''s supportedOutputStorageFormats.
For more details about this output config, see [OutputConfig](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#OutputConfig).'
isOptional: true
parameterType: STRING
project:
defaultValue: '{{$.pipeline_google_cloud_project_id}}'
description: Project to create the BatchPredictionJob. Defaults to the project
in which the PipelineJob is run.
isOptional: true
parameterType: STRING
starting_replica_count:
defaultValue: 0.0
description: 'The number of machine replicas
used at the start of the batch operation. If not set, Vertex AI
decides starting number, not greater than `max_replica_count`. Only
used if `machine_type` is set.'
isOptional: true
parameterType: NUMBER_INTEGER
outputDefinitions:
artifacts:
batchpredictionjob:
artifactType:
schemaTitle: google.VertexBatchPredictionJob
schemaVersion: 0.0.1
description: '[**Deprecated. Use gcs_output_directory and bigquery_output_table
instead.**] Artifact
representation of the created batch prediction job.'
bigquery_output_table:
artifactType:
schemaTitle: google.BQTable
schemaVersion: 0.0.1
description: 'Artifact tracking the batch prediction job output. This is
only
available if
bigquery_output_table is specified.'
gcs_output_directory:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
description: 'Artifact tracking the batch prediction job output. This is
only
available if
gcs_destination_output_uri_prefix is specified.'
parameters:
gcp_resources:
description: 'Serialized gcp_resources proto tracking the batch prediction
job.
For more details, see
https://github.com/kubeflow/pipelines/blob/master/components/google-cloud/google_cloud_pipeline_components/proto/README.md.'
parameterType: STRING
comp-model-batch-predict-5:
executorLabel: exec-model-batch-predict-5
inputDefinitions:
artifacts:
model:
artifactType:
schemaTitle: google.VertexModel
schemaVersion: 0.0.1
description: 'The Model used to get predictions via this job. Must share
the same
ancestor Location. Starting this job has no impact on any existing
deployments of the Model and their resources. Either this or
`unmanaged_container_model` must be specified.'
isOptional: true
unmanaged_container_model:
artifactType:
schemaTitle: google.UnmanagedContainerModel
schemaVersion: 0.0.1
description: 'The unmanaged container model used to get predictions via
this job.
This should be used for models that are not uploaded to Vertex. Either
this or model must be specified.'
isOptional: true
parameters:
accelerator_count:
defaultValue: 0.0
description: 'The number of accelerators to attach
to the `machine_type`. Only used if `machine_type` is set. For more
details about the machine spec, see
https://cloud.google.com/vertex-ai/docs/reference/rest/v1/MachineSpec'
isOptional: true
parameterType: NUMBER_INTEGER
accelerator_type:
defaultValue: ''
description: 'The type of accelerator(s) that may be
attached to the machine as per `accelerator_count`. Only used if
`machine_type` is set. For more details about the machine spec, see
https://cloud.google.com/vertex-ai/docs/reference/rest/v1/MachineSpec'
isOptional: true
parameterType: STRING
bigquery_destination_output_uri:
defaultValue: ''
description: 'The BigQuery project location where the output is to be written
to. In
the given project a new dataset is created with name
`prediction_<model-display-name>_<job-create-time>` where is made
BigQuery-dataset-name compatible (for example, most special characters
become underscores), and timestamp is in YYYY_MM_DDThh_mm_ss_sssZ
"based on ISO-8601" format. In the dataset two tables will be created,
`predictions`, and `errors`. If the Model has both `instance`
and `prediction` schemata defined then the tables have columns as
follows: The `predictions` table contains instances for which the
prediction succeeded, it has columns as per a concatenation of the
Model''s instance and prediction schemata. The `errors` table
contains rows for which the prediction has failed, it has instance
columns, as per the instance schema, followed by a single "errors"
column, which as values has [google.rpc.Status](Status)
represented as a STRUCT, and containing only `code` and
`message`. For more details about this output config, see
https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#OutputConfig.'
isOptional: true
parameterType: STRING
bigquery_source_input_uri:
defaultValue: ''
description: 'BigQuery URI to a table, up to 2000 characters long. For example:
`projectId.bqDatasetId.bqTableId` For more details about this input
config, see
https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#InputConfig.'
isOptional: true
parameterType: STRING
encryption_spec_key_name:
defaultValue: ''
description: 'Customer-managed encryption
key options for a BatchPredictionJob. If this is set, then all
resources created by the BatchPredictionJob will be encrypted with the
provided encryption key. Has the form:
`projects/my-project/locations/my-location/keyRings/my-kr/cryptoKeys/my-key`.
The key needs to be in the same region as where the compute resource
is created.'
isOptional: true
parameterType: STRING
excluded_fields:
defaultValue: []
description: 'Fields that will be excluded in the prediction instance that
is
sent to the Model.
Excluded will be attached to the batch prediction output if
key_field is not specified.
When `excluded_fields` is populated, `included_fields` must be empty.
The input must be JSONL with objects at each line, CSV, BigQuery
or TfRecord.
may be specified via the Model''s `parameters_schema_uri`.'
isOptional: true
parameterType: LIST
explanation_metadata:
defaultValue: {}
description: 'Explanation metadata
configuration for this BatchPredictionJob. Can be specified only if
`generate_explanation` is set to `True`. This value overrides the
value of `Model.explanation_metadata`. All fields of
`explanation_metadata` are optional in the request. If a field of the
`explanation_metadata` object is not populated, the corresponding
field of the `Model.explanation_metadata` object is inherited. For
more details, see
https://cloud.google.com/vertex-ai/docs/reference/rest/v1/ExplanationSpec#explanationmetadata.'
isOptional: true
parameterType: STRUCT
explanation_parameters:
defaultValue: {}
description: 'Parameters to configure
explaining for Model''s predictions. Can be specified only if
`generate_explanation` is set to `True`. This value overrides the
value of `Model.explanation_parameters`. All fields of
`explanation_parameters` are optional in the request. If a field of
the `explanation_parameters` object is not populated, the
corresponding field of the `Model.explanation_parameters` object is
inherited. For more details, see
https://cloud.google.com/vertex-ai/docs/reference/rest/v1/ExplanationSpec#ExplanationParameters.'
isOptional: true
parameterType: STRUCT
gcs_destination_output_uri_prefix:
defaultValue: ''
description: 'The Google Cloud
Storage location of the directory where the output is to be written
to. In the given directory a new directory is created. Its name is
`prediction-<model-display-name>-<job-create-time>`, where timestamp
is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format. Inside of it files
`predictions_0001.<extension>`, `predictions_0002.<extension>`,
..., `predictions_N.<extension>` are created where `<extension>`
depends on chosen `predictions_format`, and N may equal 0001 and
depends on the total number of successfully predicted instances. If
the Model has both `instance` and `prediction` schemata defined
then each such file contains predictions as per the
`predictions_format`. If prediction for any instance failed
(partially or completely), then an additional
`errors_0001.<extension>`, `errors_0002.<extension>`,...,
`errors_N.<extension>` files are created (N depends on total number
of failed predictions). These files contain the failed instances, as
per their schema, followed by an additional `error` field which as
value has `google.rpc.Status` containing only `code` and
`message` fields. For more details about this output config, see
https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#OutputConfig.'
isOptional: true
parameterType: STRING
gcs_source_uris:
defaultValue: []
description: 'Google Cloud Storage URI(-s) to your instances to run batch
prediction
on. They must match `instances_format`. May contain wildcards. For more
information on wildcards, see [WildcardNames](https://cloud.google.com/storage/docs/gsutil/addlhelp/WildcardNames).
For more details about this input config, see [InputConfig](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#InputConfig).'
isOptional: true
parameterType: LIST
generate_explanation:
defaultValue: false
description: 'Generate explanation along with
the batch prediction results. This will cause the batch prediction
output to include explanations based on the `prediction_format`: -
`bigquery`: output includes a column named `explanation`. The value is
a struct that conforms to the [aiplatform.gapic.Explanation] object. -
`jsonl`: The JSON objects on each line include an additional entry
keyed `explanation`. The value of the entry is a JSON object that
conforms to the [aiplatform.gapic.Explanation] object. - `csv`:
Generating explanations for CSV format is not supported. If this
field is set to true, either the Model.explanation_spec or
explanation_metadata and explanation_parameters must be populated.'
isOptional: true
parameterType: BOOLEAN
included_fields:
defaultValue: []
description: 'Fields that will be included in the prediction instance that
is
sent to the Model.
If `instance_type` is `array`, the order of field names in
`included_fields` also determines the order of the values in the array.
When `included_fields` is populated, `excluded_fields` must be empty.
The input must be JSONL with objects at each line, CSV, BigQuery
or TfRecord.'
isOptional: true
parameterType: LIST
instance_type:
defaultValue: ''
description: "The format of the instance that the Model\naccepts. Vertex\
\ AI will convert compatible\n[InstancesFormat](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#InputConfig)\n\
to the specified format. Supported values are:\n`object`: Each input is\
\ converted to JSON object format.\n * For `bigquery`, each row is converted\
\ to an object.\n * For `jsonl`, each line of the JSONL input must be\
\ an object.\n * Does not apply to `csv`, `file-list`, `tf-record`, or\
\ `tf-record-gzip`.\n`array`: Each input is converted to JSON array format.\n\
\ * For `bigquery`, each row is converted to an array. The order\n \
\ of columns is determined by the BigQuery column order, unless\n \
\ [included_fields](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#InputConfig)\
\ is populated.\n `included_fields` must be populated for specifying\
\ field orders.\n * For `jsonl`, if each line of the JSONL input is an\
\ object,\n `included_fields` must be populated for specifying field\
\ orders.\n * Does not apply to `csv`, `file-list`, `tf-record`, or\n\
\ `tf-record-gzip`.\nIf not specified, Vertex AI converts the batch\
\ prediction input as\nfollows:\n * For `bigquery` and `csv`, the behavior\
\ is the same as `array`. The\n order of columns is the same as defined\
\ in the file or table, unless\n included_fields is populated.\n * For\
\ `jsonl`, the prediction instance format is determined by\n each line\
\ of the input.\n * For `tf-record`/`tf-record-gzip`, each record will\
\ be converted to\n an object in the format of `{\"b64\": <value>}`,\
\ where `<value>` is\n the Base64-encoded string of the content of the\
\ record.\n * For `file-list`, each file in the list will be converted\
\ to an\n object in the format of `{\"b64\": <value>}`, where `<value>`\
\ is\n the Base64-encoded string of the content of the file."
isOptional: true
parameterType: STRING
instances_format:
defaultValue: jsonl
description: 'The format in which instances are
given, must be one of the [Model](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.models)''s
supportedInputStorageFormats.
For more details about this input config, see
[InputConfig](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#InputConfig.)'
isOptional: true
parameterType: STRING
job_display_name:
description: The user-defined name of this BatchPredictionJob.
parameterType: STRING
key_field:
defaultValue: ''
description: "The name of the field that is considered as a key.\nThe values\
\ identified by the key field is not included in the\ntransformed instances\
\ that is sent to the Model. This is similar to\nspecifying this name\
\ of the field in [excluded_fields](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#InputConfig).\
\ In addition,\nthe batch prediction output will not include the instances.\
\ Instead the\noutput will only include the value of the key field, in\
\ a field named\n`key` in the output:\n * For `jsonl` output format, the\
\ output will have a `key` field\n instead of the `instance` field.\n\
\ * For `csv`/`bigquery` output format, the output will have have a `key`\n\
\ column instead of the instance feature columns.\nThe input must be\
\ JSONL with objects at each line, CSV, BigQuery\nor TfRecord."
isOptional: true
parameterType: STRING
labels:
defaultValue: {}
description: 'The labels with user-defined metadata to
organize your BatchPredictionJobs. Label keys and values can be no
longer than 64 characters (Unicode codepoints), can only contain
lowercase letters, numeric characters, underscores and dashes.
International characters are allowed. See https://goo.gl/xmQnxf for
more information and examples of labels.'
isOptional: true
parameterType: STRUCT
location:
defaultValue: us-central1
description: Location for creating the BatchPredictionJob.
isOptional: true
parameterType: STRING
machine_type:
defaultValue: ''
description: 'The type of machine for running batch
prediction on dedicated resources. If the Model supports
DEDICATED_RESOURCES this config may be provided (and the job will use
these resources). If the Model doesn''t support AUTOMATIC_RESOURCES,
this config must be provided. For more details about the
BatchDedicatedResources, see
https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#BatchDedicatedResources.
For more details about the machine spec, see
https://cloud.google.com/vertex-ai/docs/reference/rest/v1/MachineSpec'
isOptional: true
parameterType: STRING
manual_batch_tuning_parameters_batch_size:
defaultValue: 0.0
description: 'The number of
the records (e.g. instances) of the operation given in each batch to a
machine replica. Machine type, and size of a single record should be
considered when setting this parameter, higher value speeds up the
batch operation''s execution, but too high value will result in a whole
batch not fitting in a machine''s memory, and the whole operation will
fail.'
isOptional: true
parameterType: NUMBER_INTEGER
max_replica_count:
defaultValue: 0.0
description: 'The maximum number of machine replicas the batch operation
may be scaled
to. Only used if `machine_type` is set.'
isOptional: true
parameterType: NUMBER_INTEGER
model_parameters:
defaultValue: {}
description: The parameters that govern the predictions. The schema of the
parameters
isOptional: true
parameterType: STRUCT
predictions_format:
defaultValue: jsonl
description: 'The format in which Vertex AI gives the predictions. Must
be one of the
Model''s supportedOutputStorageFormats.
For more details about this output config, see [OutputConfig](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#OutputConfig).'
isOptional: true
parameterType: STRING
project:
defaultValue: '{{$.pipeline_google_cloud_project_id}}'
description: Project to create the BatchPredictionJob. Defaults to the project
in which the PipelineJob is run.
isOptional: true
parameterType: STRING
starting_replica_count:
defaultValue: 0.0
description: 'The number of machine replicas
used at the start of the batch operation. If not set, Vertex AI
decides starting number, not greater than `max_replica_count`. Only
used if `machine_type` is set.'
isOptional: true
parameterType: NUMBER_INTEGER
outputDefinitions:
artifacts:
batchpredictionjob:
artifactType:
schemaTitle: google.VertexBatchPredictionJob
schemaVersion: 0.0.1
description: '[**Deprecated. Use gcs_output_directory and bigquery_output_table
instead.**] Artifact
representation of the created batch prediction job.'
bigquery_output_table:
artifactType:
schemaTitle: google.BQTable
schemaVersion: 0.0.1
description: 'Artifact tracking the batch prediction job output. This is
only
available if
bigquery_output_table is specified.'
gcs_output_directory:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
description: 'Artifact tracking the batch prediction job output. This is
only
available if
gcs_destination_output_uri_prefix is specified.'
parameters:
gcp_resources:
description: 'Serialized gcp_resources proto tracking the batch prediction
job.
For more details, see
https://github.com/kubeflow/pipelines/blob/master/components/google-cloud/google_cloud_pipeline_components/proto/README.md.'
parameterType: STRING
comp-model-evaluation:
executorLabel: exec-model-evaluation
inputDefinitions:
artifacts:
batch_prediction_job:
artifactType:
schemaTitle: google.VertexBatchPredictionJob
schemaVersion: 0.0.1
parameters:
dataflow_disk_size:
defaultValue: 50.0
isOptional: true
parameterType: NUMBER_INTEGER
dataflow_machine_type:
defaultValue: n1-standard-4
isOptional: true
parameterType: STRING
dataflow_max_workers_num:
defaultValue: 100.0
isOptional: true
parameterType: NUMBER_INTEGER
dataflow_service_account:
defaultValue: ''
isOptional: true
parameterType: STRING
dataflow_subnetwork:
defaultValue: ''
isOptional: true
parameterType: STRING
dataflow_use_public_ips:
defaultValue: true
isOptional: true
parameterType: BOOLEAN
dataflow_workers_num:
defaultValue: 10.0
isOptional: true
parameterType: NUMBER_INTEGER
encryption_spec_key_name:
defaultValue: ''
isOptional: true
parameterType: STRING
example_weight_column:
defaultValue: ''
isOptional: true
parameterType: STRING
ground_truth_column:
parameterType: STRING
ground_truth_format:
defaultValue: jsonl
isOptional: true
parameterType: STRING
location:
defaultValue: us-central1
isOptional: true
parameterType: STRING
prediction_id_column:
defaultValue: ''
isOptional: true
parameterType: STRING
prediction_label_column:
defaultValue: ''
isOptional: true
parameterType: STRING
prediction_score_column:
defaultValue: ''
isOptional: true
parameterType: STRING
predictions_format:
defaultValue: jsonl
isOptional: true
parameterType: STRING
problem_type:
parameterType: STRING
project:
parameterType: STRING
root_dir:
parameterType: STRING
outputDefinitions:
artifacts:
evaluation_metrics:
artifactType:
schemaTitle: system.Metrics
schemaVersion: 0.0.1
parameters:
gcp_resources:
parameterType: STRING
comp-model-evaluation-2:
executorLabel: exec-model-evaluation-2
inputDefinitions:
artifacts:
batch_prediction_job:
artifactType:
schemaTitle: google.VertexBatchPredictionJob
schemaVersion: 0.0.1
parameters:
dataflow_disk_size:
defaultValue: 50.0
isOptional: true
parameterType: NUMBER_INTEGER
dataflow_machine_type:
defaultValue: n1-standard-4
isOptional: true
parameterType: STRING
dataflow_max_workers_num:
defaultValue: 100.0
isOptional: true
parameterType: NUMBER_INTEGER
dataflow_service_account:
defaultValue: ''
isOptional: true
parameterType: STRING
dataflow_subnetwork:
defaultValue: ''
isOptional: true
parameterType: STRING
dataflow_use_public_ips:
defaultValue: true
isOptional: true
parameterType: BOOLEAN
dataflow_workers_num:
defaultValue: 10.0
isOptional: true
parameterType: NUMBER_INTEGER
encryption_spec_key_name:
defaultValue: ''
isOptional: true
parameterType: STRING
example_weight_column:
defaultValue: ''
isOptional: true
parameterType: STRING
ground_truth_column:
parameterType: STRING
ground_truth_format:
defaultValue: jsonl
isOptional: true
parameterType: STRING
location:
defaultValue: us-central1
isOptional: true
parameterType: STRING
prediction_id_column:
defaultValue: ''
isOptional: true
parameterType: STRING
prediction_label_column:
defaultValue: ''
isOptional: true
parameterType: STRING
prediction_score_column:
defaultValue: ''
isOptional: true
parameterType: STRING
predictions_format:
defaultValue: jsonl
isOptional: true
parameterType: STRING
problem_type:
parameterType: STRING
project:
parameterType: STRING
root_dir:
parameterType: STRING
outputDefinitions:
artifacts:
evaluation_metrics:
artifactType:
schemaTitle: system.Metrics
schemaVersion: 0.0.1
parameters:
gcp_resources:
parameterType: STRING
comp-model-evaluation-3:
executorLabel: exec-model-evaluation-3
inputDefinitions:
artifacts:
batch_prediction_job:
artifactType:
schemaTitle: google.VertexBatchPredictionJob
schemaVersion: 0.0.1
parameters:
dataflow_disk_size:
defaultValue: 50.0
isOptional: true
parameterType: NUMBER_INTEGER
dataflow_machine_type:
defaultValue: n1-standard-4
isOptional: true
parameterType: STRING
dataflow_max_workers_num:
defaultValue: 100.0
isOptional: true
parameterType: NUMBER_INTEGER
dataflow_service_account:
defaultValue: ''
isOptional: true
parameterType: STRING
dataflow_subnetwork:
defaultValue: ''
isOptional: true
parameterType: STRING
dataflow_use_public_ips:
defaultValue: true
isOptional: true
parameterType: BOOLEAN
dataflow_workers_num:
defaultValue: 10.0
isOptional: true
parameterType: NUMBER_INTEGER
encryption_spec_key_name:
defaultValue: ''
isOptional: true
parameterType: STRING
example_weight_column:
defaultValue: ''
isOptional: true
parameterType: STRING
ground_truth_column:
parameterType: STRING
ground_truth_format:
defaultValue: jsonl
isOptional: true
parameterType: STRING
location:
defaultValue: us-central1
isOptional: true
parameterType: STRING
prediction_id_column:
defaultValue: ''
isOptional: true
parameterType: STRING
prediction_label_column:
defaultValue: ''
isOptional: true
parameterType: STRING
prediction_score_column:
defaultValue: ''
isOptional: true
parameterType: STRING
predictions_format:
defaultValue: jsonl
isOptional: true
parameterType: STRING
problem_type:
parameterType: STRING
project:
parameterType: STRING
root_dir:
parameterType: STRING
outputDefinitions:
artifacts:
evaluation_metrics:
artifactType:
schemaTitle: system.Metrics
schemaVersion: 0.0.1
parameters:
gcp_resources:
parameterType: STRING
comp-model-evaluation-import:
executorLabel: exec-model-evaluation-import
inputDefinitions:
artifacts:
classification_metrics:
artifactType:
schemaTitle: google.ClassificationMetrics
schemaVersion: 0.0.1
description: 'google.ClassificationMetrics artifact generated from
the ModelEvaluationClassificationOp component.'
isOptional: true
embedding_metrics:
artifactType:
schemaTitle: system.Metrics
schemaVersion: 0.0.1
description: 'The embedding metrics artifact generated from the
embedding retrieval metrics component.'
isOptional: true
explanation:
artifactType:
schemaTitle: system.Metrics
schemaVersion: 0.0.1
description: 'Path for model explanation metrics generated from an evaluation
component.'
isOptional: true
feature_attributions:
artifactType:
schemaTitle: system.Metrics
schemaVersion: 0.0.1
description: 'The feature attributions metrics artifact generated
from the feature attribution component.'
isOptional: true
forecasting_metrics:
artifactType:
schemaTitle: google.ForecastingMetrics
schemaVersion: 0.0.1
description: 'google.ForecastingMetrics artifact generated from
the ModelEvaluationForecastingOp component.'
isOptional: true
metrics:
artifactType:
schemaTitle: system.Metrics
schemaVersion: 0.0.1
description: Path of metrics generated from an evaluation component.
isOptional: true
model:
artifactType:
schemaTitle: google.VertexModel
schemaVersion: 0.0.1
description: 'Vertex model resource that will be the parent resource of
the
uploaded evaluation.'
question_answering_metrics:
artifactType:
schemaTitle: system.Metrics
schemaVersion: 0.0.1
description: 'system.Metrics artifact generated from
the LLMEvaluationTextGenerationOp component. Subject to change to
google.QuestionAnsweringMetrics.'
isOptional: true
regression_metrics:
artifactType:
schemaTitle: google.RegressionMetrics
schemaVersion: 0.0.1
description: 'google.ClassificationMetrics artifact generated from
the ModelEvaluationRegressionOp component.'
isOptional: true
summarization_metrics:
artifactType:
schemaTitle: system.Metrics
schemaVersion: 0.0.1
description: 'system.Metrics artifact generated from
the LLMEvaluationTextGenerationOp component. Subject to change to
google.SummarizationMetrics.'
isOptional: true
text_generation_metrics:
artifactType:
schemaTitle: system.Metrics
schemaVersion: 0.0.1
description: 'system.Metrics artifact generated from
the LLMEvaluationTextGenerationOp component. Subject to change to
google.TextGenerationMetrics.'
isOptional: true
parameters:
dataset_path:
defaultValue: ''
isOptional: true
parameterType: STRING
dataset_paths:
defaultValue: []
isOptional: true
parameterType: LIST
dataset_type:
defaultValue: ''
isOptional: true
parameterType: STRING
display_name:
defaultValue: ''
description: The display name for the uploaded model evaluation resource.
isOptional: true
parameterType: STRING
problem_type:
description: 'The problem type of the metrics being imported to the
VertexModel. `classification`, `regression`, `forecasting`,
`text-generation`, `question-answering`, and `summarization` are the
currently supported problem types. Must be provided when `metrics` is
provided.'
isOptional: true
parameterType: STRING
outputDefinitions:
parameters:
evaluation_resource_name:
parameterType: STRING
gcp_resources:
parameterType: STRING
comp-model-evaluation-import-2:
executorLabel: exec-model-evaluation-import-2
inputDefinitions:
artifacts:
classification_metrics:
artifactType:
schemaTitle: google.ClassificationMetrics
schemaVersion: 0.0.1
description: 'google.ClassificationMetrics artifact generated from
the ModelEvaluationClassificationOp component.'
isOptional: true
embedding_metrics:
artifactType:
schemaTitle: system.Metrics
schemaVersion: 0.0.1
description: 'The embedding metrics artifact generated from the
embedding retrieval metrics component.'
isOptional: true
explanation:
artifactType:
schemaTitle: system.Metrics
schemaVersion: 0.0.1
description: 'Path for model explanation metrics generated from an evaluation
component.'
isOptional: true
feature_attributions:
artifactType:
schemaTitle: system.Metrics
schemaVersion: 0.0.1
description: 'The feature attributions metrics artifact generated
from the feature attribution component.'
isOptional: true
forecasting_metrics:
artifactType:
schemaTitle: google.ForecastingMetrics
schemaVersion: 0.0.1
description: 'google.ForecastingMetrics artifact generated from
the ModelEvaluationForecastingOp component.'
isOptional: true
metrics:
artifactType:
schemaTitle: system.Metrics
schemaVersion: 0.0.1
description: Path of metrics generated from an evaluation component.
isOptional: true
model:
artifactType:
schemaTitle: google.VertexModel
schemaVersion: 0.0.1
description: 'Vertex model resource that will be the parent resource of
the
uploaded evaluation.'
question_answering_metrics:
artifactType:
schemaTitle: system.Metrics
schemaVersion: 0.0.1
description: 'system.Metrics artifact generated from
the LLMEvaluationTextGenerationOp component. Subject to change to
google.QuestionAnsweringMetrics.'
isOptional: true
regression_metrics:
artifactType:
schemaTitle: google.RegressionMetrics
schemaVersion: 0.0.1
description: 'google.ClassificationMetrics artifact generated from
the ModelEvaluationRegressionOp component.'
isOptional: true
summarization_metrics:
artifactType:
schemaTitle: system.Metrics
schemaVersion: 0.0.1
description: 'system.Metrics artifact generated from
the LLMEvaluationTextGenerationOp component. Subject to change to
google.SummarizationMetrics.'
isOptional: true
text_generation_metrics:
artifactType:
schemaTitle: system.Metrics
schemaVersion: 0.0.1
description: 'system.Metrics artifact generated from
the LLMEvaluationTextGenerationOp component. Subject to change to
google.TextGenerationMetrics.'
isOptional: true
parameters:
dataset_path:
defaultValue: ''
isOptional: true
parameterType: STRING
dataset_paths:
defaultValue: []
isOptional: true
parameterType: LIST
dataset_type:
defaultValue: ''
isOptional: true
parameterType: STRING
display_name:
defaultValue: ''
description: The display name for the uploaded model evaluation resource.
isOptional: true
parameterType: STRING
problem_type:
description: 'The problem type of the metrics being imported to the
VertexModel. `classification`, `regression`, `forecasting`,
`text-generation`, `question-answering`, and `summarization` are the
currently supported problem types. Must be provided when `metrics` is
provided.'
isOptional: true
parameterType: STRING
outputDefinitions:
parameters:
evaluation_resource_name:
parameterType: STRING
gcp_resources:
parameterType: STRING
comp-model-evaluation-import-3:
executorLabel: exec-model-evaluation-import-3
inputDefinitions:
artifacts:
classification_metrics:
artifactType:
schemaTitle: google.ClassificationMetrics
schemaVersion: 0.0.1
description: 'google.ClassificationMetrics artifact generated from
the ModelEvaluationClassificationOp component.'
isOptional: true
embedding_metrics:
artifactType:
schemaTitle: system.Metrics
schemaVersion: 0.0.1
description: 'The embedding metrics artifact generated from the
embedding retrieval metrics component.'
isOptional: true
explanation:
artifactType:
schemaTitle: system.Metrics
schemaVersion: 0.0.1
description: 'Path for model explanation metrics generated from an evaluation
component.'
isOptional: true
feature_attributions:
artifactType:
schemaTitle: system.Metrics
schemaVersion: 0.0.1
description: 'The feature attributions metrics artifact generated
from the feature attribution component.'
isOptional: true
forecasting_metrics:
artifactType:
schemaTitle: google.ForecastingMetrics
schemaVersion: 0.0.1
description: 'google.ForecastingMetrics artifact generated from
the ModelEvaluationForecastingOp component.'
isOptional: true
metrics:
artifactType:
schemaTitle: system.Metrics
schemaVersion: 0.0.1
description: Path of metrics generated from an evaluation component.
isOptional: true
model:
artifactType:
schemaTitle: google.VertexModel
schemaVersion: 0.0.1
description: 'Vertex model resource that will be the parent resource of
the
uploaded evaluation.'
question_answering_metrics:
artifactType:
schemaTitle: system.Metrics
schemaVersion: 0.0.1
description: 'system.Metrics artifact generated from
the LLMEvaluationTextGenerationOp component. Subject to change to
google.QuestionAnsweringMetrics.'
isOptional: true
regression_metrics:
artifactType:
schemaTitle: google.RegressionMetrics
schemaVersion: 0.0.1
description: 'google.ClassificationMetrics artifact generated from
the ModelEvaluationRegressionOp component.'
isOptional: true
summarization_metrics:
artifactType:
schemaTitle: system.Metrics
schemaVersion: 0.0.1
description: 'system.Metrics artifact generated from
the LLMEvaluationTextGenerationOp component. Subject to change to
google.SummarizationMetrics.'
isOptional: true
text_generation_metrics:
artifactType:
schemaTitle: system.Metrics
schemaVersion: 0.0.1
description: 'system.Metrics artifact generated from
the LLMEvaluationTextGenerationOp component. Subject to change to
google.TextGenerationMetrics.'
isOptional: true
parameters:
dataset_path:
defaultValue: ''
isOptional: true
parameterType: STRING
dataset_paths:
defaultValue: []
isOptional: true
parameterType: LIST
dataset_type:
defaultValue: ''
isOptional: true
parameterType: STRING
display_name:
defaultValue: ''
description: The display name for the uploaded model evaluation resource.
isOptional: true
parameterType: STRING
problem_type:
description: 'The problem type of the metrics being imported to the
VertexModel. `classification`, `regression`, `forecasting`,
`text-generation`, `question-answering`, and `summarization` are the
currently supported problem types. Must be provided when `metrics` is
provided.'
isOptional: true
parameterType: STRING
outputDefinitions:
parameters:
evaluation_resource_name:
parameterType: STRING
gcp_resources:
parameterType: STRING
comp-model-upload:
executorLabel: exec-model-upload
inputDefinitions:
artifacts:
explanation_metadata_artifact:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
isOptional: true
parent_model:
artifactType:
schemaTitle: google.VertexModel
schemaVersion: 0.0.1
isOptional: true
unmanaged_container_model:
artifactType:
schemaTitle: google.UnmanagedContainerModel
schemaVersion: 0.0.1
isOptional: true
parameters:
description:
defaultValue: ''
isOptional: true
parameterType: STRING
display_name:
parameterType: STRING
encryption_spec_key_name:
defaultValue: ''
isOptional: true
parameterType: STRING
explanation_metadata:
defaultValue: {}
isOptional: true
parameterType: STRUCT
explanation_parameters:
defaultValue: {}
isOptional: true
parameterType: STRUCT
labels:
defaultValue: {}
isOptional: true
parameterType: STRUCT
location:
defaultValue: us-central1
isOptional: true
parameterType: STRING
project:
parameterType: STRING
outputDefinitions:
artifacts:
model:
artifactType:
schemaTitle: google.VertexModel
schemaVersion: 0.0.1
parameters:
gcp_resources:
parameterType: STRING
comp-model-upload-2:
executorLabel: exec-model-upload-2
inputDefinitions:
artifacts:
explanation_metadata_artifact:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
isOptional: true
parent_model:
artifactType:
schemaTitle: google.VertexModel
schemaVersion: 0.0.1
isOptional: true
unmanaged_container_model:
artifactType:
schemaTitle: google.UnmanagedContainerModel
schemaVersion: 0.0.1
isOptional: true
parameters:
description:
defaultValue: ''
isOptional: true
parameterType: STRING
display_name:
parameterType: STRING
encryption_spec_key_name:
defaultValue: ''
isOptional: true
parameterType: STRING
explanation_metadata:
defaultValue: {}
isOptional: true
parameterType: STRUCT
explanation_parameters:
defaultValue: {}
isOptional: true
parameterType: STRUCT
labels:
defaultValue: {}
isOptional: true
parameterType: STRUCT
location:
defaultValue: us-central1
isOptional: true
parameterType: STRING
project:
parameterType: STRING
outputDefinitions:
artifacts:
model:
artifactType:
schemaTitle: google.VertexModel
schemaVersion: 0.0.1
parameters:
gcp_resources:
parameterType: STRING
comp-model-upload-3:
executorLabel: exec-model-upload-3
inputDefinitions:
artifacts:
explanation_metadata_artifact:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
isOptional: true
parent_model:
artifactType:
schemaTitle: google.VertexModel
schemaVersion: 0.0.1
isOptional: true
unmanaged_container_model:
artifactType:
schemaTitle: google.UnmanagedContainerModel
schemaVersion: 0.0.1
isOptional: true
parameters:
description:
defaultValue: ''
isOptional: true
parameterType: STRING
display_name:
parameterType: STRING
encryption_spec_key_name:
defaultValue: ''
isOptional: true
parameterType: STRING
explanation_metadata:
defaultValue: {}
isOptional: true
parameterType: STRUCT
explanation_parameters:
defaultValue: {}
isOptional: true
parameterType: STRUCT
labels:
defaultValue: {}
isOptional: true
parameterType: STRUCT
location:
defaultValue: us-central1
isOptional: true
parameterType: STRING
project:
parameterType: STRING
outputDefinitions:
artifacts:
model:
artifactType:
schemaTitle: google.VertexModel
schemaVersion: 0.0.1
parameters:
gcp_resources:
parameterType: STRING
comp-read-input-uri:
executorLabel: exec-read-input-uri
inputDefinitions:
artifacts:
split_uri:
artifactType:
schemaTitle: system.Dataset
schemaVersion: 0.0.1
description: Tbe path to the file that contains Dataset data.
outputDefinitions:
parameters:
Output:
parameterType: LIST
comp-read-input-uri-2:
executorLabel: exec-read-input-uri-2
inputDefinitions:
artifacts:
split_uri:
artifactType:
schemaTitle: system.Dataset
schemaVersion: 0.0.1
description: Tbe path to the file that contains Dataset data.
outputDefinitions:
parameters:
Output:
parameterType: LIST
comp-set-optional-inputs:
executorLabel: exec-set-optional-inputs
inputDefinitions:
artifacts:
vertex_dataset:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
description: The Vertex dataset when data source is Vertex dataset.
parameters:
data_source_bigquery_table_path:
description: The BigQuery table when data source is BQ.
parameterType: STRING
data_source_csv_filenames:
description: The CSV GCS path when data source is CSV.
parameterType: STRING
location:
description: The GCP region that runs the pipeline components.
parameterType: STRING
project:
description: The GCP project that runs the pipeline components.
parameterType: STRING
outputDefinitions:
parameters:
data_source_bigquery_table_path:
parameterType: STRING
data_source_csv_filenames:
parameterType: STRING
comp-string-not-empty:
executorLabel: exec-string-not-empty
inputDefinitions:
parameters:
value:
description: String value to be checked.
parameterType: STRING
outputDefinitions:
parameters:
Output:
parameterType: STRING
comp-tabular-stats-and-example-gen:
executorLabel: exec-tabular-stats-and-example-gen
inputDefinitions:
parameters:
additional_experiments:
defaultValue: ''
isOptional: true
parameterType: STRING
additional_experiments_json:
defaultValue: {}
isOptional: true
parameterType: STRUCT
data_source_bigquery_table_path:
defaultValue: ''
isOptional: true
parameterType: STRING
data_source_csv_filenames:
defaultValue: ''
isOptional: true
parameterType: STRING
dataflow_disk_size_gb:
defaultValue: 40.0
description: The disk size, in gigabytes, to use on each Dataflow worker
instance. If not set, default to 40.
isOptional: true
parameterType: NUMBER_INTEGER
dataflow_machine_type:
defaultValue: n1-standard-16
description: The machine type used for dataflow jobs. If not set, default
to n1-standard-16.
isOptional: true
parameterType: STRING
dataflow_max_num_workers:
defaultValue: 25.0
description: The number of workers to run the dataflow job. If not set,
default to 25.
isOptional: true
parameterType: NUMBER_INTEGER
dataflow_service_account:
defaultValue: ''
description: Custom service account to run dataflow jobs.
isOptional: true
parameterType: STRING
dataflow_subnetwork:
defaultValue: ''
description: 'Dataflow''s fully qualified subnetwork name, when empty the
default subnetwork will be used. More details: https://cloud.google.com/dataflow/docs/guides/specifying-networks#example_network_and_subnetwork_specifications'
isOptional: true
parameterType: STRING
dataflow_use_public_ips:
defaultValue: true
description: Specifies whether Dataflow workers use public IP addresses.
isOptional: true
parameterType: BOOLEAN
enable_probabilistic_inference:
defaultValue: false
isOptional: true
parameterType: BOOLEAN
encryption_spec_key_name:
defaultValue: ''
description: Customer-managed encryption key.
isOptional: true
parameterType: STRING
location:
description: Location for running dataset statistics and example generation.
parameterType: STRING
optimization_objective:
defaultValue: ''
description: 'Objective function the model is optimizing towards. The training
process creates a model that maximizes/minimizes the value of the objective
function over the validation set. The supported optimization objectives
depend on the prediction type. If the field is not set, a default objective
function is used. classification: "maximize-au-roc" (default) - Maximize
the area under the receiver operating characteristic (ROC) curve. "minimize-log-loss"
- Minimize log loss. "maximize-au-prc" - Maximize the area under the precision-recall
curve. "maximize-precision-at-recall" - Maximize precision for a specified
recall value. "maximize-recall-at-precision" - Maximize recall for a specified
precision value. classification (multi-class): "minimize-log-loss" (default)
- Minimize log loss. regression: "minimize-rmse" (default) - Minimize
root-mean-squared error (RMSE). "minimize-mae" - Minimize mean-absolute
error (MAE). "minimize-rmsle" - Minimize root-mean-squared log error
(RMSLE).'
isOptional: true
parameterType: STRING
optimization_objective_precision_value:
defaultValue: -1.0
description: Required when optimization_objective is "maximize-recall-at-precision".
Must be between 0 and 1, inclusive.
isOptional: true
parameterType: NUMBER_DOUBLE
optimization_objective_recall_value:
defaultValue: -1.0
description: Required when optimization_objective is "maximize-precision-at-recall".
Must be between 0 and 1, inclusive.
isOptional: true
parameterType: NUMBER_DOUBLE
predefined_split_key:
defaultValue: ''
isOptional: true
parameterType: STRING
prediction_type:
description: 'The prediction type. Supported values: "classification", "regression".'
parameterType: STRING
project:
description: Project to run dataset statistics and example generation.
parameterType: STRING
quantiles:
defaultValue: []
isOptional: true
parameterType: LIST
request_type:
defaultValue: COLUMN_STATS_ONLY
isOptional: true
parameterType: STRING
root_dir:
description: The Cloud Storage location to store the output.
parameterType: STRING
run_distillation:
defaultValue: false
description: True if in distillation mode. The default value is false.
isOptional: true
parameterType: BOOLEAN
stratified_split_key:
defaultValue: ''
isOptional: true
parameterType: STRING
target_column_name:
description: The target column name.
parameterType: STRING
test_fraction:
defaultValue: -1.0
isOptional: true
parameterType: NUMBER_DOUBLE
timestamp_split_key:
defaultValue: ''
isOptional: true
parameterType: STRING
training_fraction:
defaultValue: -1.0
isOptional: true
parameterType: NUMBER_DOUBLE
transformations:
description: Quote escaped JSON string for transformations. Each transformation
will apply transform function to given input column. And the result will
be used for training. When creating transformation for BigQuery Struct
column, the column should be flattened using "." as the delimiter.
parameterType: STRING
transformations_path:
defaultValue: ''
description: Path to a GCS file containing JSON string for transformations.
isOptional: true
parameterType: STRING
validation_fraction:
defaultValue: -1.0
isOptional: true
parameterType: NUMBER_DOUBLE
weight_column_name:
defaultValue: ''
description: The weight column name.
isOptional: true
parameterType: STRING
outputDefinitions:
artifacts:
dataset_schema:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
description: The schema of the dataset.
dataset_stats:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
description: The stats of the dataset.
eval_split:
artifactType:
schemaTitle: system.Dataset
schemaVersion: 0.0.1
description: The eval split.
instance_baseline:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
description: The instance baseline used to calculate explanations.
metadata:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
description: The tabular example gen metadata.
test_split:
artifactType:
schemaTitle: system.Dataset
schemaVersion: 0.0.1
description: The test split.
train_split:
artifactType:
schemaTitle: system.Dataset
schemaVersion: 0.0.1
description: The train split.
parameters:
downsampled_test_split_json:
description: The downsampled test split JSON object.
parameterType: LIST
gcp_resources:
description: GCP resources created by this component. For more details,
see https://github.com/kubeflow/pipelines/blob/master/components/google-cloud/google_cloud_pipeline_components/proto/README.md.
parameterType: STRING
test_split_json:
description: The test split JSON object.
parameterType: LIST
comp-write-bp-result-path:
executorLabel: exec-write-bp-result-path
inputDefinitions:
artifacts:
bp_job:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
description: The batch prediction job artifact.
outputDefinitions:
artifacts:
result:
artifactType:
schemaTitle: system.Dataset
schemaVersion: 0.0.1
comp-write-bp-result-path-2:
executorLabel: exec-write-bp-result-path-2
inputDefinitions:
artifacts:
bp_job:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
description: The batch prediction job artifact.
outputDefinitions:
artifacts:
result:
artifactType:
schemaTitle: system.Dataset
schemaVersion: 0.0.1
deploymentSpec:
executors:
exec-automl-tabular-cv-trainer:
container:
args:
- --type
- CustomJob
- --project
- '{{$.inputs.parameters[''project'']}}'
- --location
- '{{$.inputs.parameters[''location'']}}'
- --gcp_resources
- '{{$.outputs.parameters[''gcp_resources''].output_file}}'
- --payload
- '{"Concat": ["{\"display_name\": \"automl-tabular-cv-tuner-{{$.pipeline_job_uuid}}-{{$.pipeline_task_uuid}}\",
\"encryption_spec\": {\"kms_key_name\":\"", "{{$.inputs.parameters[''encryption_spec_key_name'']}}",
"\"}, \"job_spec\": {\"worker_pool_specs\": [{\"replica_count\": 1, \"machine_spec\":
{\"machine_type\": \"n1-standard-8\"}, \"container_spec\": {\"image_uri\":\"",
"us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/training:20240808_0625", "\",
\"args\": [\"l2l_cv_tuner\", \"--transform_output_path=", "{{$.inputs.artifacts[''transform_output''].uri}}",
"\", \"--training_docker_uri=", "us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/training:20240808_0625",
"\", \"--component_id={{$.pipeline_task_uuid}}\", \"--training_base_dir=",
"{{$.inputs.parameters[''root_dir'']}}", "/{{$.pipeline_job_uuid}}/{{$.pipeline_task_uuid}}/train\",
\"--num_parallel_trial=", "{{$.inputs.parameters[''num_parallel_trials'']}}",
"\", \"--single_run_max_secs=", "{{$.inputs.parameters[''single_run_max_secs'']}}",
"\", \"--deadline_hours=", "{{$.inputs.parameters[''deadline_hours'']}}",
"\", \"--valid_trials_completed_threshold=0.7\", \"--num_selected_trials=",
"{{$.inputs.parameters[''num_selected_trials'']}}", "\", \"--num_selected_features=",
"{{$.inputs.parameters[''num_selected_features'']}}", "\", \"--lro_job_info=",
"{{$.inputs.parameters[''root_dir'']}}", "/{{$.pipeline_job_uuid}}/lro\",
\"--error_file_path=", "{{$.inputs.parameters[''root_dir'']}}", "/{{$.pipeline_job_uuid}}/{{$.pipeline_task_uuid}}/error.pb\",
\"--metadata_path=", "{{$.inputs.artifacts[''metadata''].uri}}", "\", \"--materialized_cv_splits=",
"{{$.inputs.artifacts[''materialized_cv_splits''].uri}}", "\", \"--tuning_result_input_path=",
"{{$.inputs.artifacts[''tuning_result_input''].uri}}", "\", \"--tuning_result_output_path=",
"{{$.outputs.artifacts[''tuning_result_output''].uri}}", "\", \"--kms_key_name=",
"{{$.inputs.parameters[''encryption_spec_key_name'']}}", "\", \"--gcp_resources_path=",
"{{$.outputs.parameters[''gcp_resources''].output_file}}", "\", \"--execution_metrics_path=",
"{{$.outputs.parameters[''execution_metrics''].output_file}}", "\", \"--use_custom_job=true\",
\"--use_json=true\", \"--log_level=ERROR\", \"--executor_input={{$.json_escape[1]}}\"]}}]}}"]}'
command:
- python3
- -u
- -m
- google_cloud_pipeline_components.container.v1.custom_job.launcher
image: gcr.io/ml-pipeline/google-cloud-pipeline-components:1.0.44
exec-automl-tabular-cv-trainer-2:
container:
args:
- --type
- CustomJob
- --project
- '{{$.inputs.parameters[''project'']}}'
- --location
- '{{$.inputs.parameters[''location'']}}'
- --gcp_resources
- '{{$.outputs.parameters[''gcp_resources''].output_file}}'
- --payload
- '{"Concat": ["{\"display_name\": \"automl-tabular-cv-tuner-{{$.pipeline_job_uuid}}-{{$.pipeline_task_uuid}}\",
\"encryption_spec\": {\"kms_key_name\":\"", "{{$.inputs.parameters[''encryption_spec_key_name'']}}",
"\"}, \"job_spec\": {\"worker_pool_specs\": [{\"replica_count\": 1, \"machine_spec\":
{\"machine_type\": \"n1-standard-8\"}, \"container_spec\": {\"image_uri\":\"",
"us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/training:20240808_0625", "\",
\"args\": [\"l2l_cv_tuner\", \"--transform_output_path=", "{{$.inputs.artifacts[''transform_output''].uri}}",
"\", \"--training_docker_uri=", "us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/training:20240808_0625",
"\", \"--component_id={{$.pipeline_task_uuid}}\", \"--training_base_dir=",
"{{$.inputs.parameters[''root_dir'']}}", "/{{$.pipeline_job_uuid}}/{{$.pipeline_task_uuid}}/train\",
\"--num_parallel_trial=", "{{$.inputs.parameters[''num_parallel_trials'']}}",
"\", \"--single_run_max_secs=", "{{$.inputs.parameters[''single_run_max_secs'']}}",
"\", \"--deadline_hours=", "{{$.inputs.parameters[''deadline_hours'']}}",
"\", \"--valid_trials_completed_threshold=0.7\", \"--num_selected_trials=",
"{{$.inputs.parameters[''num_selected_trials'']}}", "\", \"--num_selected_features=",
"{{$.inputs.parameters[''num_selected_features'']}}", "\", \"--lro_job_info=",
"{{$.inputs.parameters[''root_dir'']}}", "/{{$.pipeline_job_uuid}}/lro\",
\"--error_file_path=", "{{$.inputs.parameters[''root_dir'']}}", "/{{$.pipeline_job_uuid}}/{{$.pipeline_task_uuid}}/error.pb\",
\"--metadata_path=", "{{$.inputs.artifacts[''metadata''].uri}}", "\", \"--materialized_cv_splits=",
"{{$.inputs.artifacts[''materialized_cv_splits''].uri}}", "\", \"--tuning_result_input_path=",
"{{$.inputs.artifacts[''tuning_result_input''].uri}}", "\", \"--tuning_result_output_path=",
"{{$.outputs.artifacts[''tuning_result_output''].uri}}", "\", \"--kms_key_name=",
"{{$.inputs.parameters[''encryption_spec_key_name'']}}", "\", \"--gcp_resources_path=",
"{{$.outputs.parameters[''gcp_resources''].output_file}}", "\", \"--execution_metrics_path=",
"{{$.outputs.parameters[''execution_metrics''].output_file}}", "\", \"--use_custom_job=true\",
\"--use_json=true\", \"--log_level=ERROR\", \"--executor_input={{$.json_escape[1]}}\"]}}]}}"]}'
command:
- python3
- -u
- -m
- google_cloud_pipeline_components.container.v1.custom_job.launcher
image: gcr.io/ml-pipeline/google-cloud-pipeline-components:1.0.44
exec-automl-tabular-ensemble:
container:
args:
- --type
- CustomJob
- --project
- '{{$.inputs.parameters[''project'']}}'
- --location
- '{{$.inputs.parameters[''location'']}}'
- --gcp_resources
- '{{$.outputs.parameters[''gcp_resources''].output_file}}'
- --payload
- '{"Concat": ["{\"display_name\": \"automl-tabular-ensemble-{{$.pipeline_job_uuid}}-{{$.pipeline_task_uuid}}\",
\"encryption_spec\": {\"kms_key_name\":\"", "{{$.inputs.parameters[''encryption_spec_key_name'']}}",
"\"}, \"job_spec\": {\"worker_pool_specs\": [{\"replica_count\": 1, \"machine_spec\":
{\"machine_type\": \"n1-highmem-8\"}, \"container_spec\": {\"image_uri\":\"",
"us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/training:20240808_0625", "\",
\"args\": [\"ensemble\", \"--transform_output_path=", "{{$.inputs.artifacts[''transform_output''].uri}}",
"\", \"--model_output_path=", "{{$.inputs.parameters[''root_dir'']}}", "/{{$.pipeline_job_uuid}}/{{$.pipeline_task_uuid}}/model\",
\"--custom_model_output_path=", "{{$.inputs.parameters[''root_dir'']}}",
"/{{$.pipeline_job_uuid}}/{{$.pipeline_task_uuid}}/custom_model\", \"--error_file_path=",
"{{$.inputs.parameters[''root_dir'']}}", "/{{$.pipeline_job_uuid}}/{{$.pipeline_task_uuid}}/error.pb\",
\"--export_custom_model=", "{{$.inputs.parameters[''export_additional_model_without_custom_ops'']}}",
"\", \"--metadata_path=", "{{$.inputs.artifacts[''metadata''].uri}}", "\",
\"--dataset_schema_path=", "{{$.inputs.artifacts[''dataset_schema''].uri}}",
"\", \"--tuning_result_input_path=", "{{$.inputs.artifacts[''tuning_result_input''].uri}}",
"\", \"--instance_baseline_path=", "{{$.inputs.artifacts[''instance_baseline''].uri}}",
"\", \"--warmup_data=", "{{$.inputs.artifacts[''warmup_data''].uri}}", "\",
\"--prediction_docker_uri=", "us-docker.pkg.dev/vertex-ai/automl-tabular/prediction-server:20240808_0625",
"\", \"--model_path=", "{{$.outputs.artifacts[''model''].uri}}", "\", \"--custom_model_path=",
"{{$.outputs.artifacts[''model_without_custom_ops''].uri}}", "\", \"--explanation_metadata_path=",
"{{$.outputs.parameters[''explanation_metadata''].output_file}}", ",", "{{$.outputs.artifacts[''explanation_metadata_artifact''].uri}}",
"\", \"--explanation_parameters_path=", "{{$.outputs.parameters[''explanation_parameters''].output_file}}",
"\", \"--model_architecture_path=", "{{$.outputs.artifacts[''model_architecture''].uri}}",
"\", \"--use_json=true\", \"--executor_input={{$.json_escape[1]}}\"]}}]}}"]}'
command:
- python3
- -u
- -m
- google_cloud_pipeline_components.container.v1.custom_job.launcher
image: gcr.io/ml-pipeline/google-cloud-pipeline-components:1.0.44
exec-automl-tabular-ensemble-2:
container:
args:
- --type
- CustomJob
- --project
- '{{$.inputs.parameters[''project'']}}'
- --location
- '{{$.inputs.parameters[''location'']}}'
- --gcp_resources
- '{{$.outputs.parameters[''gcp_resources''].output_file}}'
- --payload
- '{"Concat": ["{\"display_name\": \"automl-tabular-ensemble-{{$.pipeline_job_uuid}}-{{$.pipeline_task_uuid}}\",
\"encryption_spec\": {\"kms_key_name\":\"", "{{$.inputs.parameters[''encryption_spec_key_name'']}}",
"\"}, \"job_spec\": {\"worker_pool_specs\": [{\"replica_count\": 1, \"machine_spec\":
{\"machine_type\": \"n1-highmem-8\"}, \"container_spec\": {\"image_uri\":\"",
"us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/training:20240808_0625", "\",
\"args\": [\"ensemble\", \"--transform_output_path=", "{{$.inputs.artifacts[''transform_output''].uri}}",
"\", \"--model_output_path=", "{{$.inputs.parameters[''root_dir'']}}", "/{{$.pipeline_job_uuid}}/{{$.pipeline_task_uuid}}/model\",
\"--custom_model_output_path=", "{{$.inputs.parameters[''root_dir'']}}",
"/{{$.pipeline_job_uuid}}/{{$.pipeline_task_uuid}}/custom_model\", \"--error_file_path=",
"{{$.inputs.parameters[''root_dir'']}}", "/{{$.pipeline_job_uuid}}/{{$.pipeline_task_uuid}}/error.pb\",
\"--export_custom_model=", "{{$.inputs.parameters[''export_additional_model_without_custom_ops'']}}",
"\", \"--metadata_path=", "{{$.inputs.artifacts[''metadata''].uri}}", "\",
\"--dataset_schema_path=", "{{$.inputs.artifacts[''dataset_schema''].uri}}",
"\", \"--tuning_result_input_path=", "{{$.inputs.artifacts[''tuning_result_input''].uri}}",
"\", \"--instance_baseline_path=", "{{$.inputs.artifacts[''instance_baseline''].uri}}",
"\", \"--warmup_data=", "{{$.inputs.artifacts[''warmup_data''].uri}}", "\",
\"--prediction_docker_uri=", "us-docker.pkg.dev/vertex-ai/automl-tabular/prediction-server:20240808_0625",
"\", \"--model_path=", "{{$.outputs.artifacts[''model''].uri}}", "\", \"--custom_model_path=",
"{{$.outputs.artifacts[''model_without_custom_ops''].uri}}", "\", \"--explanation_metadata_path=",
"{{$.outputs.parameters[''explanation_metadata''].output_file}}", ",", "{{$.outputs.artifacts[''explanation_metadata_artifact''].uri}}",
"\", \"--explanation_parameters_path=", "{{$.outputs.parameters[''explanation_parameters''].output_file}}",
"\", \"--model_architecture_path=", "{{$.outputs.artifacts[''model_architecture''].uri}}",
"\", \"--use_json=true\", \"--executor_input={{$.json_escape[1]}}\"]}}]}}"]}'
command:
- python3
- -u
- -m
- google_cloud_pipeline_components.container.v1.custom_job.launcher
image: gcr.io/ml-pipeline/google-cloud-pipeline-components:1.0.44
exec-automl-tabular-ensemble-3:
container:
args:
- --type
- CustomJob
- --project
- '{{$.inputs.parameters[''project'']}}'
- --location
- '{{$.inputs.parameters[''location'']}}'
- --gcp_resources
- '{{$.outputs.parameters[''gcp_resources''].output_file}}'
- --payload
- '{"Concat": ["{\"display_name\": \"automl-tabular-ensemble-{{$.pipeline_job_uuid}}-{{$.pipeline_task_uuid}}\",
\"encryption_spec\": {\"kms_key_name\":\"", "{{$.inputs.parameters[''encryption_spec_key_name'']}}",
"\"}, \"job_spec\": {\"worker_pool_specs\": [{\"replica_count\": 1, \"machine_spec\":
{\"machine_type\": \"n1-highmem-8\"}, \"container_spec\": {\"image_uri\":\"",
"us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/training:20240808_0625", "\",
\"args\": [\"ensemble\", \"--transform_output_path=", "{{$.inputs.artifacts[''transform_output''].uri}}",
"\", \"--model_output_path=", "{{$.inputs.parameters[''root_dir'']}}", "/{{$.pipeline_job_uuid}}/{{$.pipeline_task_uuid}}/model\",
\"--custom_model_output_path=", "{{$.inputs.parameters[''root_dir'']}}",
"/{{$.pipeline_job_uuid}}/{{$.pipeline_task_uuid}}/custom_model\", \"--error_file_path=",
"{{$.inputs.parameters[''root_dir'']}}", "/{{$.pipeline_job_uuid}}/{{$.pipeline_task_uuid}}/error.pb\",
\"--export_custom_model=", "{{$.inputs.parameters[''export_additional_model_without_custom_ops'']}}",
"\", \"--metadata_path=", "{{$.inputs.artifacts[''metadata''].uri}}", "\",
\"--dataset_schema_path=", "{{$.inputs.artifacts[''dataset_schema''].uri}}",
"\", \"--tuning_result_input_path=", "{{$.inputs.artifacts[''tuning_result_input''].uri}}",
"\", \"--instance_baseline_path=", "{{$.inputs.artifacts[''instance_baseline''].uri}}",
"\", \"--warmup_data=", "{{$.inputs.artifacts[''warmup_data''].uri}}", "\",
\"--prediction_docker_uri=", "us-docker.pkg.dev/vertex-ai/automl-tabular/prediction-server:20240808_0625",
"\", \"--model_path=", "{{$.outputs.artifacts[''model''].uri}}", "\", \"--custom_model_path=",
"{{$.outputs.artifacts[''model_without_custom_ops''].uri}}", "\", \"--explanation_metadata_path=",
"{{$.outputs.parameters[''explanation_metadata''].output_file}}", ",", "{{$.outputs.artifacts[''explanation_metadata_artifact''].uri}}",
"\", \"--explanation_parameters_path=", "{{$.outputs.parameters[''explanation_parameters''].output_file}}",
"\", \"--model_architecture_path=", "{{$.outputs.artifacts[''model_architecture''].uri}}",
"\", \"--use_json=true\", \"--executor_input={{$.json_escape[1]}}\"]}}]}}"]}'
command:
- python3
- -u
- -m
- google_cloud_pipeline_components.container.v1.custom_job.launcher
image: gcr.io/ml-pipeline/google-cloud-pipeline-components:1.0.44
exec-automl-tabular-finalizer:
container:
args:
- --type
- CustomJob
- --project
- '{{$.inputs.parameters[''project'']}}'
- --location
- '{{$.inputs.parameters[''location'']}}'
- --gcp_resources
- '{{$.outputs.parameters[''gcp_resources''].output_file}}'
- --payload
- '{"Concat": ["{\"display_name\": \"automl-tabular-finalizer-{{$.pipeline_job_uuid}}-{{$.pipeline_task_uuid}}\",
\"encryption_spec\": {\"kms_key_name\":\"", "{{$.inputs.parameters[''encryption_spec_key_name'']}}",
"\"}, \"job_spec\": {\"worker_pool_specs\": [{\"replica_count\": 1, \"machine_spec\":
{\"machine_type\": \"n1-standard-8\"}, \"container_spec\": {\"image_uri\":\"",
"us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/training:20240808_0625", "\",
\"args\": [\"cancel_l2l_tuner\", \"--error_file_path=", "{{$.inputs.parameters[''root_dir'']}}",
"/{{$.pipeline_job_uuid}}/{{$.pipeline_task_uuid}}/error.pb\", \"--cleanup_lro_job_infos=",
"{{$.inputs.parameters[''root_dir'']}}", "/{{$.pipeline_job_uuid}}/lro\"]}}]}}"]}'
command:
- python3
- -u
- -m
- google_cloud_pipeline_components.container.v1.custom_job.launcher
image: gcr.io/ml-pipeline/google-cloud-pipeline-components:1.0.44
exec-automl-tabular-infra-validator:
container:
args:
- --executor_input
- '{{$}}'
image: us-docker.pkg.dev/vertex-ai/automl-tabular/prediction-server:20240808_0625
resources:
cpuLimit: 8.0
memoryLimit: 52.0
exec-automl-tabular-infra-validator-2:
container:
args:
- --executor_input
- '{{$}}'
image: us-docker.pkg.dev/vertex-ai/automl-tabular/prediction-server:20240808_0625
resources:
cpuLimit: 8.0
memoryLimit: 52.0
exec-automl-tabular-infra-validator-3:
container:
args:
- --executor_input
- '{{$}}'
image: us-docker.pkg.dev/vertex-ai/automl-tabular/prediction-server:20240808_0625
resources:
cpuLimit: 8.0
memoryLimit: 52.0
exec-automl-tabular-stage-1-tuner:
container:
args:
- --type
- CustomJob
- --project
- '{{$.inputs.parameters[''project'']}}'
- --location
- '{{$.inputs.parameters[''location'']}}'
- --gcp_resources
- '{{$.outputs.parameters[''gcp_resources''].output_file}}'
- --payload
- '{"Concat": ["{\"display_name\": \"automl-tabular-stage-1-tuner-{{$.pipeline_job_uuid}}-{{$.pipeline_task_uuid}}\",
\"encryption_spec\": {\"kms_key_name\":\"", "{{$.inputs.parameters[''encryption_spec_key_name'']}}",
"\"}, \"job_spec\": {\"worker_pool_specs\": [{\"replica_count\": 1, \"machine_spec\":
{\"machine_type\": \"n1-standard-8\"}, \"container_spec\": {\"image_uri\":\"",
"us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/training:20240808_0625", "\",
\"args\": [\"l2l_stage_1_tuner\", \"--transform_output_path=", "{{$.inputs.artifacts[''transform_output''].uri}}",
"\", \"--training_docker_uri=", "us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/training:20240808_0625",
"\", \"--feature_selection_result_path=", "{{$.inputs.artifacts[''feature_ranking''].uri}}",
"\", \"--disable_early_stopping=", "{{$.inputs.parameters[''disable_early_stopping'']}}",
"\", \"--tune_feature_selection_rate=", "{{$.inputs.parameters[''tune_feature_selection_rate'']}}",
"\", \"--reduce_search_space_mode=", "{{$.inputs.parameters[''reduce_search_space_mode'']}}",
"\", \"--component_id={{$.pipeline_task_uuid}}\", \"--training_base_dir=",
"{{$.inputs.parameters[''root_dir'']}}", "/{{$.pipeline_job_uuid}}/{{$.pipeline_task_uuid}}/train\",
\"--num_parallel_trial=", "{{$.inputs.parameters[''num_parallel_trials'']}}",
"\", \"--single_run_max_secs=", "{{$.inputs.parameters[''single_run_max_secs'']}}",
"\", \"--deadline_hours=", "{{$.inputs.parameters[''deadline_hours'']}}",
"\", \"--num_selected_trials=", "{{$.inputs.parameters[''num_selected_trials'']}}",
"\", \"--num_selected_features=", "{{$.inputs.parameters[''num_selected_features'']}}",
"\", \"--lro_job_info=", "{{$.inputs.parameters[''root_dir'']}}", "/{{$.pipeline_job_uuid}}/lro\",
\"--error_file_path=", "{{$.inputs.parameters[''root_dir'']}}", "/{{$.pipeline_job_uuid}}/{{$.pipeline_task_uuid}}/error.pb\",
\"--metadata_path=", "{{$.inputs.artifacts[''metadata''].uri}}", "\", \"--materialized_train_split=",
"{{$.inputs.artifacts[''materialized_train_split''].uri}}", "\", \"--materialized_eval_split=",
"{{$.inputs.artifacts[''materialized_eval_split''].uri}}", "\", \"--is_distill=",
"{{$.inputs.parameters[''run_distillation'']}}", "\", \"--tuning_result_output_path=",
"{{$.outputs.artifacts[''tuning_result_output''].uri}}", "\", \"--kms_key_name=",
"{{$.inputs.parameters[''encryption_spec_key_name'']}}", "\", \"--gcp_resources_path=",
"{{$.outputs.parameters[''gcp_resources''].output_file}}", "\", \"--execution_metrics_path=",
"{{$.outputs.parameters[''execution_metrics''].output_file}}", "\", \"--use_json=true\",
\"--log_level=ERROR\", \"--executor_input={{$.json_escape[1]}}\"]}}]}}"]}'
command:
- python3
- -u
- -m
- google_cloud_pipeline_components.container.v1.custom_job.launcher
image: gcr.io/ml-pipeline/google-cloud-pipeline-components:1.0.44
exec-automl-tabular-stage-1-tuner-2:
container:
args:
- --type
- CustomJob
- --project
- '{{$.inputs.parameters[''project'']}}'
- --location
- '{{$.inputs.parameters[''location'']}}'
- --gcp_resources
- '{{$.outputs.parameters[''gcp_resources''].output_file}}'
- --payload
- '{"Concat": ["{\"display_name\": \"automl-tabular-stage-1-tuner-{{$.pipeline_job_uuid}}-{{$.pipeline_task_uuid}}\",
\"encryption_spec\": {\"kms_key_name\":\"", "{{$.inputs.parameters[''encryption_spec_key_name'']}}",
"\"}, \"job_spec\": {\"worker_pool_specs\": [{\"replica_count\": 1, \"machine_spec\":
{\"machine_type\": \"n1-standard-8\"}, \"container_spec\": {\"image_uri\":\"",
"us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/training:20240808_0625", "\",
\"args\": [\"l2l_stage_1_tuner\", \"--transform_output_path=", "{{$.inputs.artifacts[''transform_output''].uri}}",
"\", \"--training_docker_uri=", "us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/training:20240808_0625",
"\", \"--feature_selection_result_path=", "{{$.inputs.artifacts[''feature_ranking''].uri}}",
"\", \"--disable_early_stopping=", "{{$.inputs.parameters[''disable_early_stopping'']}}",
"\", \"--tune_feature_selection_rate=", "{{$.inputs.parameters[''tune_feature_selection_rate'']}}",
"\", \"--reduce_search_space_mode=", "{{$.inputs.parameters[''reduce_search_space_mode'']}}",
"\", \"--component_id={{$.pipeline_task_uuid}}\", \"--training_base_dir=",
"{{$.inputs.parameters[''root_dir'']}}", "/{{$.pipeline_job_uuid}}/{{$.pipeline_task_uuid}}/train\",
\"--num_parallel_trial=", "{{$.inputs.parameters[''num_parallel_trials'']}}",
"\", \"--single_run_max_secs=", "{{$.inputs.parameters[''single_run_max_secs'']}}",
"\", \"--deadline_hours=", "{{$.inputs.parameters[''deadline_hours'']}}",
"\", \"--num_selected_trials=", "{{$.inputs.parameters[''num_selected_trials'']}}",
"\", \"--num_selected_features=", "{{$.inputs.parameters[''num_selected_features'']}}",
"\", \"--lro_job_info=", "{{$.inputs.parameters[''root_dir'']}}", "/{{$.pipeline_job_uuid}}/lro\",
\"--error_file_path=", "{{$.inputs.parameters[''root_dir'']}}", "/{{$.pipeline_job_uuid}}/{{$.pipeline_task_uuid}}/error.pb\",
\"--metadata_path=", "{{$.inputs.artifacts[''metadata''].uri}}", "\", \"--materialized_train_split=",
"{{$.inputs.artifacts[''materialized_train_split''].uri}}", "\", \"--materialized_eval_split=",
"{{$.inputs.artifacts[''materialized_eval_split''].uri}}", "\", \"--is_distill=",
"{{$.inputs.parameters[''run_distillation'']}}", "\", \"--tuning_result_output_path=",
"{{$.outputs.artifacts[''tuning_result_output''].uri}}", "\", \"--kms_key_name=",
"{{$.inputs.parameters[''encryption_spec_key_name'']}}", "\", \"--gcp_resources_path=",
"{{$.outputs.parameters[''gcp_resources''].output_file}}", "\", \"--execution_metrics_path=",
"{{$.outputs.parameters[''execution_metrics''].output_file}}", "\", \"--use_json=true\",
\"--log_level=ERROR\", \"--executor_input={{$.json_escape[1]}}\"]}}]}}"]}'
command:
- python3
- -u
- -m
- google_cloud_pipeline_components.container.v1.custom_job.launcher
image: gcr.io/ml-pipeline/google-cloud-pipeline-components:1.0.44
exec-automl-tabular-transform:
container:
args:
- --type
- CustomJob
- --project
- '{{$.inputs.parameters[''project'']}}'
- --location
- '{{$.inputs.parameters[''location'']}}'
- --gcp_resources
- '{{$.outputs.parameters[''gcp_resources''].output_file}}'
- --payload
- '{"Concat": ["{\"display_name\": \"automl-tabular-transform-{{$.pipeline_job_uuid}}-{{$.pipeline_task_uuid}}\",
\"encryption_spec\": {\"kms_key_name\":\"", "{{$.inputs.parameters[''encryption_spec_key_name'']}}",
"\"}, \"job_spec\": {\"worker_pool_specs\": [{\"replica_count\": 1, \"machine_spec\":
{\"machine_type\": \"n1-standard-8\"}, \"container_spec\": {\"image_uri\":\"",
"us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/training:20240808_0625", "\",
\"args\": [\"transform\", \"--is_mp=true\", \"--transform_output_artifact_path=",
"{{$.outputs.artifacts[''transform_output''].uri}}", "\", \"--transform_output_path=",
"{{$.inputs.parameters[''root_dir'']}}", "/{{$.pipeline_job_uuid}}/{{$.pipeline_task_uuid}}/transform\",
\"--materialized_splits_output_path=", "{{$.inputs.parameters[''root_dir'']}}",
"/{{$.pipeline_job_uuid}}/{{$.pipeline_task_uuid}}/transform_materialized\",
\"--metadata_path=", "{{$.inputs.artifacts[''metadata''].uri}}", "\", \"--dataset_schema_path=",
"{{$.inputs.artifacts[''dataset_schema''].uri}}", "\", \"--train_split=",
"{{$.inputs.artifacts[''train_split''].uri}}", "\", \"--eval_split=", "{{$.inputs.artifacts[''eval_split''].uri}}",
"\", \"--test_split=", "{{$.inputs.artifacts[''test_split''].uri}}", "\",
\"--materialized_train_split=", "{{$.outputs.artifacts[''materialized_train_split''].uri}}",
"\", \"--materialized_eval_split=", "{{$.outputs.artifacts[''materialized_eval_split''].uri}}",
"\", \"--materialized_test_split=", "{{$.outputs.artifacts[''materialized_test_split''].uri}}",
"\", \"--training_schema_path=", "{{$.outputs.artifacts[''training_schema_uri''].uri}}",
"\", \"--job_name=automl-tabular-transform-{{$.pipeline_job_uuid}}-{{$.pipeline_task_uuid}}",
"\", \"--dataflow_project=", "{{$.inputs.parameters[''project'']}}", "\",
\"--error_file_path=", "{{$.inputs.parameters[''root_dir'']}}", "/{{$.pipeline_job_uuid}}/{{$.pipeline_task_uuid}}/error.pb\",
\"--dataflow_staging_dir=", "{{$.inputs.parameters[''root_dir'']}}", "/{{$.pipeline_job_uuid}}/{{$.pipeline_task_uuid}}/dataflow_staging\",
\"--dataflow_tmp_dir=", "{{$.inputs.parameters[''root_dir'']}}", "/{{$.pipeline_job_uuid}}/{{$.pipeline_task_uuid}}/dataflow_tmp\",
\"--dataflow_max_num_workers=", "{{$.inputs.parameters[''dataflow_max_num_workers'']}}",
"\", \"--dataflow_machine_type=", "{{$.inputs.parameters[''dataflow_machine_type'']}}",
"\", \"--dataflow_worker_container_image=", "us-docker.pkg.dev/vertex-ai/automl-tabular/dataflow-worker:20240808_0625",
"\", \"--dataflow_disk_size_gb=", "{{$.inputs.parameters[''dataflow_disk_size_gb'']}}",
"\", \"--dataflow_subnetwork_fully_qualified=", "{{$.inputs.parameters[''dataflow_subnetwork'']}}",
"\", \"--dataflow_use_public_ips=", "{{$.inputs.parameters[''dataflow_use_public_ips'']}}",
"\", \"--dataflow_kms_key=", "{{$.inputs.parameters[''encryption_spec_key_name'']}}",
"\", \"--dataflow_service_account=", "{{$.inputs.parameters[''dataflow_service_account'']}}",
"\", \"--lro_job_info=", "{{$.inputs.parameters[''root_dir'']}}", "/{{$.pipeline_job_uuid}}/lro\",
\"--gcp_resources_path=", "{{$.outputs.parameters[''gcp_resources''].output_file}}",
"\"]}}]}}"]}'
command:
- python3
- -u
- -m
- google_cloud_pipeline_components.container.v1.custom_job.launcher
image: gcr.io/ml-pipeline/google-cloud-pipeline-components:1.0.44
exec-automl-tabular-transform-2:
container:
args:
- --type
- CustomJob
- --project
- '{{$.inputs.parameters[''project'']}}'
- --location
- '{{$.inputs.parameters[''location'']}}'
- --gcp_resources
- '{{$.outputs.parameters[''gcp_resources''].output_file}}'
- --payload
- '{"Concat": ["{\"display_name\": \"automl-tabular-transform-{{$.pipeline_job_uuid}}-{{$.pipeline_task_uuid}}\",
\"encryption_spec\": {\"kms_key_name\":\"", "{{$.inputs.parameters[''encryption_spec_key_name'']}}",
"\"}, \"job_spec\": {\"worker_pool_specs\": [{\"replica_count\": 1, \"machine_spec\":
{\"machine_type\": \"n1-standard-8\"}, \"container_spec\": {\"image_uri\":\"",
"us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/training:20240808_0625", "\",
\"args\": [\"transform\", \"--is_mp=true\", \"--transform_output_artifact_path=",
"{{$.outputs.artifacts[''transform_output''].uri}}", "\", \"--transform_output_path=",
"{{$.inputs.parameters[''root_dir'']}}", "/{{$.pipeline_job_uuid}}/{{$.pipeline_task_uuid}}/transform\",
\"--materialized_splits_output_path=", "{{$.inputs.parameters[''root_dir'']}}",
"/{{$.pipeline_job_uuid}}/{{$.pipeline_task_uuid}}/transform_materialized\",
\"--metadata_path=", "{{$.inputs.artifacts[''metadata''].uri}}", "\", \"--dataset_schema_path=",
"{{$.inputs.artifacts[''dataset_schema''].uri}}", "\", \"--train_split=",
"{{$.inputs.artifacts[''train_split''].uri}}", "\", \"--eval_split=", "{{$.inputs.artifacts[''eval_split''].uri}}",
"\", \"--test_split=", "{{$.inputs.artifacts[''test_split''].uri}}", "\",
\"--materialized_train_split=", "{{$.outputs.artifacts[''materialized_train_split''].uri}}",
"\", \"--materialized_eval_split=", "{{$.outputs.artifacts[''materialized_eval_split''].uri}}",
"\", \"--materialized_test_split=", "{{$.outputs.artifacts[''materialized_test_split''].uri}}",
"\", \"--training_schema_path=", "{{$.outputs.artifacts[''training_schema_uri''].uri}}",
"\", \"--job_name=automl-tabular-transform-{{$.pipeline_job_uuid}}-{{$.pipeline_task_uuid}}",
"\", \"--dataflow_project=", "{{$.inputs.parameters[''project'']}}", "\",
\"--error_file_path=", "{{$.inputs.parameters[''root_dir'']}}", "/{{$.pipeline_job_uuid}}/{{$.pipeline_task_uuid}}/error.pb\",
\"--dataflow_staging_dir=", "{{$.inputs.parameters[''root_dir'']}}", "/{{$.pipeline_job_uuid}}/{{$.pipeline_task_uuid}}/dataflow_staging\",
\"--dataflow_tmp_dir=", "{{$.inputs.parameters[''root_dir'']}}", "/{{$.pipeline_job_uuid}}/{{$.pipeline_task_uuid}}/dataflow_tmp\",
\"--dataflow_max_num_workers=", "{{$.inputs.parameters[''dataflow_max_num_workers'']}}",
"\", \"--dataflow_machine_type=", "{{$.inputs.parameters[''dataflow_machine_type'']}}",
"\", \"--dataflow_worker_container_image=", "us-docker.pkg.dev/vertex-ai/automl-tabular/dataflow-worker:20240808_0625",
"\", \"--dataflow_disk_size_gb=", "{{$.inputs.parameters[''dataflow_disk_size_gb'']}}",
"\", \"--dataflow_subnetwork_fully_qualified=", "{{$.inputs.parameters[''dataflow_subnetwork'']}}",
"\", \"--dataflow_use_public_ips=", "{{$.inputs.parameters[''dataflow_use_public_ips'']}}",
"\", \"--dataflow_kms_key=", "{{$.inputs.parameters[''encryption_spec_key_name'']}}",
"\", \"--dataflow_service_account=", "{{$.inputs.parameters[''dataflow_service_account'']}}",
"\", \"--lro_job_info=", "{{$.inputs.parameters[''root_dir'']}}", "/{{$.pipeline_job_uuid}}/lro\",
\"--gcp_resources_path=", "{{$.outputs.parameters[''gcp_resources''].output_file}}",
"\"]}}]}}"]}'
command:
- python3
- -u
- -m
- google_cloud_pipeline_components.container.v1.custom_job.launcher
image: gcr.io/ml-pipeline/google-cloud-pipeline-components:1.0.44
exec-bool-identity:
container:
args:
- --executor_input
- '{{$}}'
- --function_to_execute
- _bool_identity
command:
- sh
- -ec
- 'program_path=$(mktemp -d)
printf "%s" "$0" > "$program_path/ephemeral_component.py"
python3 -m kfp.components.executor_main --component_module_path "$program_path/ephemeral_component.py" "$@"
'
- "\nimport kfp\nfrom kfp import dsl\nfrom kfp.dsl import *\nfrom typing import\
\ *\n\ndef _bool_identity(value: bool) -> str:\n \"\"\"Returns boolean\
\ value.\n\n Args:\n value: Boolean value to return\n\n Returns:\n\
\ Boolean value.\n \"\"\"\n return 'true' if value else 'false'\n\n"
image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20240808_0625
exec-bool-identity-2:
container:
args:
- --executor_input
- '{{$}}'
- --function_to_execute
- _bool_identity
command:
- sh
- -ec
- 'program_path=$(mktemp -d)
printf "%s" "$0" > "$program_path/ephemeral_component.py"
python3 -m kfp.components.executor_main --component_module_path "$program_path/ephemeral_component.py" "$@"
'
- "\nimport kfp\nfrom kfp import dsl\nfrom kfp.dsl import *\nfrom typing import\
\ *\n\ndef _bool_identity(value: bool) -> str:\n \"\"\"Returns boolean\
\ value.\n\n Args:\n value: Boolean value to return\n\n Returns:\n\
\ Boolean value.\n \"\"\"\n return 'true' if value else 'false'\n\n"
image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20240808_0625
exec-bool-identity-3:
container:
args:
- --executor_input
- '{{$}}'
- --function_to_execute
- _bool_identity
command:
- sh
- -ec
- 'program_path=$(mktemp -d)
printf "%s" "$0" > "$program_path/ephemeral_component.py"
python3 -m kfp.components.executor_main --component_module_path "$program_path/ephemeral_component.py" "$@"
'
- "\nimport kfp\nfrom kfp import dsl\nfrom kfp.dsl import *\nfrom typing import\
\ *\n\ndef _bool_identity(value: bool) -> str:\n \"\"\"Returns boolean\
\ value.\n\n Args:\n value: Boolean value to return\n\n Returns:\n\
\ Boolean value.\n \"\"\"\n return 'true' if value else 'false'\n\n"
image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20240808_0625
exec-calculate-training-parameters:
container:
args:
- --executor_input
- '{{$}}'
- --function_to_execute
- _calculate_training_parameters
command:
- sh
- -ec
- 'program_path=$(mktemp -d)
printf "%s" "$0" > "$program_path/ephemeral_component.py"
python3 -m kfp.components.executor_main --component_module_path "$program_path/ephemeral_component.py" "$@"
'
- "\nimport kfp\nfrom kfp import dsl\nfrom kfp.dsl import *\nfrom typing import\
\ *\n\ndef _calculate_training_parameters(\n stage_1_num_parallel_trials:\
\ int,\n train_budget_milli_node_hours: float,\n stage_2_num_parallel_trials:\
\ int,\n run_distillation: bool,\n is_skip_architecture_search: bool\
\ = False,\n fast_testing: bool = False,\n) -> NamedTuple(\n 'Outputs',\n\
\ [\n ('stage_1_deadline_hours', float),\n ('stage_1_num_selected_trials',\
\ int),\n ('stage_1_single_run_max_secs', int),\n ('stage_2_deadline_hours',\
\ float),\n ('stage_2_single_run_max_secs', int),\n ('distill_stage_1_deadline_hours',\
\ float),\n ('reduce_search_space_mode', str),\n ],\n):\n \"\"\
\"Calculates training parameters.\n\n Args:\n stage_1_num_parallel_trials:\
\ Number of parallel trails for stage 1.\n train_budget_milli_node_hours:\
\ The train budget of creating this model,\n expressed in milli node\
\ hours i.e. 1,000 value in this field means 1 node\n hour.\n stage_2_num_parallel_trials:\
\ Number of parallel trails for stage 2.\n run_distillation: Whether\
\ to run distill in the training pipeline.\n is_skip_architecture_search:\
\ If component is being called in the\n skip_architecture_search pipeline.\n\
\ fast_testing: Internal flag used for presubmit tests.\n\n Returns:\n\
\ stage_1_deadline_hours: Maximum number of hours to run stage 1.\n\
\ stage_1_num_selected_trials: Number of selected trails for stage\
\ 1.\n stage_1_single_run_max_secs: Maximum number seconds to for a\
\ single stage\n 1\n training trial.\n stage_2_deadline_hours:\
\ Maximum number of hours to run stage 2.\n stage_2_single_run_max_secs:\
\ Maximum number seconds to for a single stage\n 2\n training\
\ trial.\n distill_stage_1_deadline_hours: Maximum number of hours\
\ to run stage 1 for\n the model distillation.\n reduce_search_space_mode:\
\ The reduce search space mode. Possible values:\n minimal, regular,\
\ full.\n \"\"\"\n # pylint: disable=g-import-not-at-top,import-outside-toplevel,redefined-outer-name\n\
\ import collections\n import math\n # pylint: enable=g-import-not-at-top,import-outside-toplevel,redefined-outer-name\n\
\ num_folds = 5\n distill_total_trials = 100\n\n stage_1_deadline_hours\
\ = -1.0\n stage_1_num_selected_trials = -1\n stage_1_single_run_max_secs\
\ = -1\n stage_2_deadline_hours = -1.0\n stage_2_single_run_max_secs =\
\ -1\n distill_stage_1_deadline_hours = 1.0\n reduce_search_space_mode\
\ = 'regular'\n\n if is_skip_architecture_search:\n stage_2_deadline_hours\
\ = train_budget_milli_node_hours / 1000.0\n stage_2_single_run_max_secs\
\ = int(stage_2_deadline_hours * 3600.0 / 1.3)\n else:\n hours = float(train_budget_milli_node_hours)\
\ / 1000.0\n multiplier = stage_1_num_parallel_trials * hours / 500.0\n\
\ stage_1_single_run_max_secs = int(math.sqrt(multiplier) * 2400.0)\n\
\ phase_2_rounds = int(\n math.sqrt(multiplier) * 100 / stage_2_num_parallel_trials\
\ + 0.5\n )\n if phase_2_rounds < 1:\n phase_2_rounds = 1\n\n\
\ # All of magic number \"1.3\" above is because the trial doesn't\n\
\ # always finish in time_per_trial. 1.3 is an empirical safety margin\
\ here.\n stage_1_deadline_secs = int(\n hours * 3600.0 - 1.3\
\ * stage_1_single_run_max_secs * phase_2_rounds\n )\n\n if stage_1_deadline_secs\
\ < hours * 3600.0 * 0.5:\n stage_1_deadline_secs = int(hours * 3600.0\
\ * 0.5)\n # Phase 1 deadline is the same as phase 2 deadline in this\
\ case. Phase 2\n # can't finish in time after the deadline is cut,\
\ so adjust the time per\n # trial to meet the deadline.\n stage_1_single_run_max_secs\
\ = int(\n stage_1_deadline_secs / (1.3 * phase_2_rounds)\n \
\ )\n\n reduce_search_space_mode = 'minimal'\n if multiplier > 2:\n\
\ reduce_search_space_mode = 'regular'\n if multiplier > 4:\n \
\ reduce_search_space_mode = 'full'\n\n # Stage 2 number of trials\
\ is stage_1_num_selected_trials *\n # num_folds, which should be equal\
\ to phase_2_rounds *\n # stage_2_num_parallel_trials. Use this information\
\ to calculate\n # stage_1_num_selected_trials:\n stage_1_num_selected_trials\
\ = int(\n phase_2_rounds * stage_2_num_parallel_trials / num_folds\n\
\ )\n stage_1_deadline_hours = stage_1_deadline_secs / 3600.0\n\n\
\ stage_2_deadline_hours = hours - stage_1_deadline_hours\n stage_2_single_run_max_secs\
\ = stage_1_single_run_max_secs\n\n if run_distillation:\n # All\
\ of magic number \"1.3\" above is because the trial doesn't always\n \
\ # finish in time_per_trial. 1.3 is an empirical safety margin here.\n\
\ distill_stage_1_deadline_hours = (\n math.ceil(float(distill_total_trials)\
\ / stage_1_num_parallel_trials)\n * stage_1_single_run_max_secs\n\
\ * 1.3\n / 3600.0\n )\n\n if fast_testing:\n \
\ distill_stage_1_deadline_hours = 0.2\n stage_1_deadline_hours = 0.2\n\
\ stage_1_single_run_max_secs = 1\n stage_2_deadline_hours = 0.2\n\
\ stage_2_single_run_max_secs = 1\n\n return collections.namedtuple(\n\
\ 'Outputs',\n [\n 'stage_1_deadline_hours',\n \
\ 'stage_1_num_selected_trials',\n 'stage_1_single_run_max_secs',\n\
\ 'stage_2_deadline_hours',\n 'stage_2_single_run_max_secs',\n\
\ 'distill_stage_1_deadline_hours',\n 'reduce_search_space_mode',\n\
\ ],\n )(\n stage_1_deadline_hours,\n stage_1_num_selected_trials,\n\
\ stage_1_single_run_max_secs,\n stage_2_deadline_hours,\n \
\ stage_2_single_run_max_secs,\n distill_stage_1_deadline_hours,\n\
\ reduce_search_space_mode,\n )\n\n"
image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20240808_0625
exec-calculate-training-parameters-2:
container:
args:
- --executor_input
- '{{$}}'
- --function_to_execute
- _calculate_training_parameters
command:
- sh
- -ec
- 'program_path=$(mktemp -d)
printf "%s" "$0" > "$program_path/ephemeral_component.py"
python3 -m kfp.components.executor_main --component_module_path "$program_path/ephemeral_component.py" "$@"
'
- "\nimport kfp\nfrom kfp import dsl\nfrom kfp.dsl import *\nfrom typing import\
\ *\n\ndef _calculate_training_parameters(\n stage_1_num_parallel_trials:\
\ int,\n train_budget_milli_node_hours: float,\n stage_2_num_parallel_trials:\
\ int,\n run_distillation: bool,\n is_skip_architecture_search: bool\
\ = False,\n fast_testing: bool = False,\n) -> NamedTuple(\n 'Outputs',\n\
\ [\n ('stage_1_deadline_hours', float),\n ('stage_1_num_selected_trials',\
\ int),\n ('stage_1_single_run_max_secs', int),\n ('stage_2_deadline_hours',\
\ float),\n ('stage_2_single_run_max_secs', int),\n ('distill_stage_1_deadline_hours',\
\ float),\n ('reduce_search_space_mode', str),\n ],\n):\n \"\"\
\"Calculates training parameters.\n\n Args:\n stage_1_num_parallel_trials:\
\ Number of parallel trails for stage 1.\n train_budget_milli_node_hours:\
\ The train budget of creating this model,\n expressed in milli node\
\ hours i.e. 1,000 value in this field means 1 node\n hour.\n stage_2_num_parallel_trials:\
\ Number of parallel trails for stage 2.\n run_distillation: Whether\
\ to run distill in the training pipeline.\n is_skip_architecture_search:\
\ If component is being called in the\n skip_architecture_search pipeline.\n\
\ fast_testing: Internal flag used for presubmit tests.\n\n Returns:\n\
\ stage_1_deadline_hours: Maximum number of hours to run stage 1.\n\
\ stage_1_num_selected_trials: Number of selected trails for stage\
\ 1.\n stage_1_single_run_max_secs: Maximum number seconds to for a\
\ single stage\n 1\n training trial.\n stage_2_deadline_hours:\
\ Maximum number of hours to run stage 2.\n stage_2_single_run_max_secs:\
\ Maximum number seconds to for a single stage\n 2\n training\
\ trial.\n distill_stage_1_deadline_hours: Maximum number of hours\
\ to run stage 1 for\n the model distillation.\n reduce_search_space_mode:\
\ The reduce search space mode. Possible values:\n minimal, regular,\
\ full.\n \"\"\"\n # pylint: disable=g-import-not-at-top,import-outside-toplevel,redefined-outer-name\n\
\ import collections\n import math\n # pylint: enable=g-import-not-at-top,import-outside-toplevel,redefined-outer-name\n\
\ num_folds = 5\n distill_total_trials = 100\n\n stage_1_deadline_hours\
\ = -1.0\n stage_1_num_selected_trials = -1\n stage_1_single_run_max_secs\
\ = -1\n stage_2_deadline_hours = -1.0\n stage_2_single_run_max_secs =\
\ -1\n distill_stage_1_deadline_hours = 1.0\n reduce_search_space_mode\
\ = 'regular'\n\n if is_skip_architecture_search:\n stage_2_deadline_hours\
\ = train_budget_milli_node_hours / 1000.0\n stage_2_single_run_max_secs\
\ = int(stage_2_deadline_hours * 3600.0 / 1.3)\n else:\n hours = float(train_budget_milli_node_hours)\
\ / 1000.0\n multiplier = stage_1_num_parallel_trials * hours / 500.0\n\
\ stage_1_single_run_max_secs = int(math.sqrt(multiplier) * 2400.0)\n\
\ phase_2_rounds = int(\n math.sqrt(multiplier) * 100 / stage_2_num_parallel_trials\
\ + 0.5\n )\n if phase_2_rounds < 1:\n phase_2_rounds = 1\n\n\
\ # All of magic number \"1.3\" above is because the trial doesn't\n\
\ # always finish in time_per_trial. 1.3 is an empirical safety margin\
\ here.\n stage_1_deadline_secs = int(\n hours * 3600.0 - 1.3\
\ * stage_1_single_run_max_secs * phase_2_rounds\n )\n\n if stage_1_deadline_secs\
\ < hours * 3600.0 * 0.5:\n stage_1_deadline_secs = int(hours * 3600.0\
\ * 0.5)\n # Phase 1 deadline is the same as phase 2 deadline in this\
\ case. Phase 2\n # can't finish in time after the deadline is cut,\
\ so adjust the time per\n # trial to meet the deadline.\n stage_1_single_run_max_secs\
\ = int(\n stage_1_deadline_secs / (1.3 * phase_2_rounds)\n \
\ )\n\n reduce_search_space_mode = 'minimal'\n if multiplier > 2:\n\
\ reduce_search_space_mode = 'regular'\n if multiplier > 4:\n \
\ reduce_search_space_mode = 'full'\n\n # Stage 2 number of trials\
\ is stage_1_num_selected_trials *\n # num_folds, which should be equal\
\ to phase_2_rounds *\n # stage_2_num_parallel_trials. Use this information\
\ to calculate\n # stage_1_num_selected_trials:\n stage_1_num_selected_trials\
\ = int(\n phase_2_rounds * stage_2_num_parallel_trials / num_folds\n\
\ )\n stage_1_deadline_hours = stage_1_deadline_secs / 3600.0\n\n\
\ stage_2_deadline_hours = hours - stage_1_deadline_hours\n stage_2_single_run_max_secs\
\ = stage_1_single_run_max_secs\n\n if run_distillation:\n # All\
\ of magic number \"1.3\" above is because the trial doesn't always\n \
\ # finish in time_per_trial. 1.3 is an empirical safety margin here.\n\
\ distill_stage_1_deadline_hours = (\n math.ceil(float(distill_total_trials)\
\ / stage_1_num_parallel_trials)\n * stage_1_single_run_max_secs\n\
\ * 1.3\n / 3600.0\n )\n\n if fast_testing:\n \
\ distill_stage_1_deadline_hours = 0.2\n stage_1_deadline_hours = 0.2\n\
\ stage_1_single_run_max_secs = 1\n stage_2_deadline_hours = 0.2\n\
\ stage_2_single_run_max_secs = 1\n\n return collections.namedtuple(\n\
\ 'Outputs',\n [\n 'stage_1_deadline_hours',\n \
\ 'stage_1_num_selected_trials',\n 'stage_1_single_run_max_secs',\n\
\ 'stage_2_deadline_hours',\n 'stage_2_single_run_max_secs',\n\
\ 'distill_stage_1_deadline_hours',\n 'reduce_search_space_mode',\n\
\ ],\n )(\n stage_1_deadline_hours,\n stage_1_num_selected_trials,\n\
\ stage_1_single_run_max_secs,\n stage_2_deadline_hours,\n \
\ stage_2_single_run_max_secs,\n distill_stage_1_deadline_hours,\n\
\ reduce_search_space_mode,\n )\n\n"
image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20240808_0625
exec-feature-attribution:
container:
args:
- --task
- explanation
- --setup_file
- /setup.py
- --project_id
- '{{$.inputs.parameters[''project'']}}'
- --location
- '{{$.inputs.parameters[''location'']}}'
- --problem_type
- '{{$.inputs.parameters[''problem_type'']}}'
- --root_dir
- '{{$.pipeline_root}}/{{$.pipeline_job_uuid}}-{{$.pipeline_task_uuid}}'
- --batch_prediction_format
- '{{$.inputs.parameters[''predictions_format'']}}'
- '{"IfPresent": {"InputName": "predictions_gcs_source", "Then": ["--batch_prediction_gcs_source",
"{{$.inputs.artifacts[''predictions_gcs_source''].uri}}"]}}'
- '{"IfPresent": {"InputName": "predictions_bigquery_source", "Then": ["--batch_prediction_bigquery_source",
{"Concat": ["bq://", "{{$.inputs.artifacts[''predictions_bigquery_source''].metadata[''projectId'']}}",
".", "{{$.inputs.artifacts[''predictions_bigquery_source''].metadata[''datasetId'']}}",
".", "{{$.inputs.artifacts[''predictions_bigquery_source''].metadata[''tableId'']}}"]}]}}'
- --dataflow_job_prefix
- evaluation-feautre-attribution-{{$.pipeline_job_uuid}}-{{$.pipeline_task_uuid}}
- --dataflow_service_account
- '{{$.inputs.parameters[''dataflow_service_account'']}}'
- --dataflow_disk_size
- '{{$.inputs.parameters[''dataflow_disk_size_gb'']}}'
- --dataflow_machine_type
- '{{$.inputs.parameters[''dataflow_machine_type'']}}'
- --dataflow_workers_num
- '{{$.inputs.parameters[''dataflow_workers_num'']}}'
- --dataflow_max_workers_num
- '{{$.inputs.parameters[''dataflow_max_workers_num'']}}'
- --dataflow_subnetwork
- '{{$.inputs.parameters[''dataflow_subnetwork'']}}'
- --dataflow_use_public_ips
- '{{$.inputs.parameters[''dataflow_use_public_ips'']}}'
- --kms_key_name
- '{{$.inputs.parameters[''encryption_spec_key_name'']}}'
- --force_runner_mode
- '{{$.inputs.parameters[''force_runner_mode'']}}'
- --gcs_output_path
- '{{$.outputs.artifacts[''feature_attributions''].path}}'
- --gcp_resources
- '{{$.outputs.parameters[''gcp_resources''].output_file}}'
- --executor_input
- '{{$}}'
command:
- python3
- /main.py
image: gcr.io/ml-pipeline/model-evaluation:v0.9.2
exec-feature-attribution-2:
container:
args:
- --task
- explanation
- --setup_file
- /setup.py
- --project_id
- '{{$.inputs.parameters[''project'']}}'
- --location
- '{{$.inputs.parameters[''location'']}}'
- --problem_type
- '{{$.inputs.parameters[''problem_type'']}}'
- --root_dir
- '{{$.pipeline_root}}/{{$.pipeline_job_uuid}}-{{$.pipeline_task_uuid}}'
- --batch_prediction_format
- '{{$.inputs.parameters[''predictions_format'']}}'
- '{"IfPresent": {"InputName": "predictions_gcs_source", "Then": ["--batch_prediction_gcs_source",
"{{$.inputs.artifacts[''predictions_gcs_source''].uri}}"]}}'
- '{"IfPresent": {"InputName": "predictions_bigquery_source", "Then": ["--batch_prediction_bigquery_source",
{"Concat": ["bq://", "{{$.inputs.artifacts[''predictions_bigquery_source''].metadata[''projectId'']}}",
".", "{{$.inputs.artifacts[''predictions_bigquery_source''].metadata[''datasetId'']}}",
".", "{{$.inputs.artifacts[''predictions_bigquery_source''].metadata[''tableId'']}}"]}]}}'
- --dataflow_job_prefix
- evaluation-feautre-attribution-{{$.pipeline_job_uuid}}-{{$.pipeline_task_uuid}}
- --dataflow_service_account
- '{{$.inputs.parameters[''dataflow_service_account'']}}'
- --dataflow_disk_size
- '{{$.inputs.parameters[''dataflow_disk_size_gb'']}}'
- --dataflow_machine_type
- '{{$.inputs.parameters[''dataflow_machine_type'']}}'
- --dataflow_workers_num
- '{{$.inputs.parameters[''dataflow_workers_num'']}}'
- --dataflow_max_workers_num
- '{{$.inputs.parameters[''dataflow_max_workers_num'']}}'
- --dataflow_subnetwork
- '{{$.inputs.parameters[''dataflow_subnetwork'']}}'
- --dataflow_use_public_ips
- '{{$.inputs.parameters[''dataflow_use_public_ips'']}}'
- --kms_key_name
- '{{$.inputs.parameters[''encryption_spec_key_name'']}}'
- --force_runner_mode
- '{{$.inputs.parameters[''force_runner_mode'']}}'
- --gcs_output_path
- '{{$.outputs.artifacts[''feature_attributions''].path}}'
- --gcp_resources
- '{{$.outputs.parameters[''gcp_resources''].output_file}}'
- --executor_input
- '{{$}}'
command:
- python3
- /main.py
image: gcr.io/ml-pipeline/model-evaluation:v0.9.2
exec-feature-attribution-3:
container:
args:
- --task
- explanation
- --setup_file
- /setup.py
- --project_id
- '{{$.inputs.parameters[''project'']}}'
- --location
- '{{$.inputs.parameters[''location'']}}'
- --problem_type
- '{{$.inputs.parameters[''problem_type'']}}'
- --root_dir
- '{{$.pipeline_root}}/{{$.pipeline_job_uuid}}-{{$.pipeline_task_uuid}}'
- --batch_prediction_format
- '{{$.inputs.parameters[''predictions_format'']}}'
- '{"IfPresent": {"InputName": "predictions_gcs_source", "Then": ["--batch_prediction_gcs_source",
"{{$.inputs.artifacts[''predictions_gcs_source''].uri}}"]}}'
- '{"IfPresent": {"InputName": "predictions_bigquery_source", "Then": ["--batch_prediction_bigquery_source",
{"Concat": ["bq://", "{{$.inputs.artifacts[''predictions_bigquery_source''].metadata[''projectId'']}}",
".", "{{$.inputs.artifacts[''predictions_bigquery_source''].metadata[''datasetId'']}}",
".", "{{$.inputs.artifacts[''predictions_bigquery_source''].metadata[''tableId'']}}"]}]}}'
- --dataflow_job_prefix
- evaluation-feautre-attribution-{{$.pipeline_job_uuid}}-{{$.pipeline_task_uuid}}
- --dataflow_service_account
- '{{$.inputs.parameters[''dataflow_service_account'']}}'
- --dataflow_disk_size
- '{{$.inputs.parameters[''dataflow_disk_size_gb'']}}'
- --dataflow_machine_type
- '{{$.inputs.parameters[''dataflow_machine_type'']}}'
- --dataflow_workers_num
- '{{$.inputs.parameters[''dataflow_workers_num'']}}'
- --dataflow_max_workers_num
- '{{$.inputs.parameters[''dataflow_max_workers_num'']}}'
- --dataflow_subnetwork
- '{{$.inputs.parameters[''dataflow_subnetwork'']}}'
- --dataflow_use_public_ips
- '{{$.inputs.parameters[''dataflow_use_public_ips'']}}'
- --kms_key_name
- '{{$.inputs.parameters[''encryption_spec_key_name'']}}'
- --force_runner_mode
- '{{$.inputs.parameters[''force_runner_mode'']}}'
- --gcs_output_path
- '{{$.outputs.artifacts[''feature_attributions''].path}}'
- --gcp_resources
- '{{$.outputs.parameters[''gcp_resources''].output_file}}'
- --executor_input
- '{{$}}'
command:
- python3
- /main.py
image: gcr.io/ml-pipeline/model-evaluation:v0.9.2
exec-get-model-display-name:
container:
args:
- --executor_input
- '{{$}}'
- --function_to_execute
- _get_model_display_name
command:
- sh
- -ec
- 'program_path=$(mktemp -d)
printf "%s" "$0" > "$program_path/ephemeral_component.py"
python3 -m kfp.components.executor_main --component_module_path "$program_path/ephemeral_component.py" "$@"
'
- "\nimport kfp\nfrom kfp import dsl\nfrom kfp.dsl import *\nfrom typing import\
\ *\n\ndef _get_model_display_name(\n model_display_name: str,\n) ->\
\ NamedTuple('Outputs', [('model_display_name', str),]):\n \"\"\"Returns\
\ the model display name.\"\"\"\n # pylint: disable=g-import-not-at-top,import-outside-toplevel,redefined-outer-name\n\
\ import collections\n import uuid\n # pylint: enable=g-import-not-at-top,import-outside-toplevel,redefined-outer-name\n\
\n if not model_display_name:\n model_display_name = f'tabular-workflow-model-{uuid.uuid4()}'\n\
\n return collections.namedtuple(\n 'Outputs',\n [\n \
\ 'model_display_name',\n ],\n )(\n model_display_name,\n )\n\
\n"
image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20240808_0625
exec-importer:
importer:
artifactUri:
runtimeParameter: uri
typeSchema:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
exec-merge-materialized-splits:
container:
args:
- --executor_input
- '{{$}}'
- --function_to_execute
- _merge_materialized_splits
command:
- sh
- -ec
- 'program_path=$(mktemp -d)
printf "%s" "$0" > "$program_path/ephemeral_component.py"
python3 -m kfp.components.executor_main --component_module_path "$program_path/ephemeral_component.py" "$@"
'
- "\nimport kfp\nfrom kfp import dsl\nfrom kfp.dsl import *\nfrom typing import\
\ *\n\ndef _merge_materialized_splits(\n split_0: dsl.InputPath('MaterializedSplit'),\n\
\ split_1: dsl.InputPath('MaterializedSplit'),\n splits: dsl.OutputPath('MaterializedSplit'),\n\
):\n \"\"\"Merge two materialized splits.\n\n Args:\n split_0: The\
\ first materialized split.\n split_1: The second materialized split.\n\
\ splits: The merged materialized split.\n \"\"\"\n with open(split_0,\
\ 'r') as f:\n split_0_content = f.read()\n with open(split_1, 'r')\
\ as f:\n split_1_content = f.read()\n with open(splits, 'w') as f:\n\
\ f.write(','.join([split_0_content, split_1_content]))\n\n"
image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20240808_0625
exec-model-batch-explanation:
container:
args:
- --type
- BatchPredictionJob
- --payload
- '{"Concat": ["{", "\"display_name\": \"", "{{$.inputs.parameters[''job_display_name'']}}",
"\", ", " \"input_config\": {", "\"instances_format\": \"", "{{$.inputs.parameters[''instances_format'']}}",
"\"", ", \"gcs_source\": {", "\"uris\":", "{{$.inputs.parameters[''gcs_source_uris'']}}",
"}", ", \"bigquery_source\": {", "\"input_uri\": \"", "{{$.inputs.parameters[''bigquery_source_input_uri'']}}",
"\"", "}", "}", ", \"model_parameters\": ", "{{$.inputs.parameters[''model_parameters'']}}",
", \"output_config\": {", "\"predictions_format\": \"", "{{$.inputs.parameters[''predictions_format'']}}",
"\"", ", \"gcs_destination\": {", "\"output_uri_prefix\": \"", "{{$.inputs.parameters[''gcs_destination_output_uri_prefix'']}}",
"\"", "}", ", \"bigquery_destination\": {", "\"output_uri\": \"", "{{$.inputs.parameters[''bigquery_destination_output_uri'']}}",
"\"", "}", "}", ", \"dedicated_resources\": {", "\"machine_spec\": {", "\"machine_type\":
\"", "{{$.inputs.parameters[''machine_type'']}}", "\"", ", \"accelerator_type\":
\"", "{{$.inputs.parameters[''accelerator_type'']}}", "\"", ", \"accelerator_count\":
", "{{$.inputs.parameters[''accelerator_count'']}}", "}", ", \"starting_replica_count\":
", "{{$.inputs.parameters[''starting_replica_count'']}}", ", \"max_replica_count\":
", "{{$.inputs.parameters[''max_replica_count'']}}", "}", ", \"manual_batch_tuning_parameters\":
{", "\"batch_size\": ", "{{$.inputs.parameters[''manual_batch_tuning_parameters_batch_size'']}}",
"}", ", \"generate_explanation\": ", "{{$.inputs.parameters[''generate_explanation'']}}",
", \"explanation_spec\": {", "\"parameters\": ", "{{$.inputs.parameters[''explanation_parameters'']}}",
", \"metadata\": ", "{{$.inputs.parameters[''explanation_metadata'']}}",
"}", ", \"explanation_metadata_artifact\": \"", "{{$.inputs.artifacts[''explanation_metadata_artifact''].uri}}",
"\"", ", \"labels\": ", "{{$.inputs.parameters[''labels'']}}", ", \"encryption_spec\":
{\"kms_key_name\":\"", "{{$.inputs.parameters[''encryption_spec_key_name'']}}",
"\"}", "}"]}'
- --project
- '{{$.inputs.parameters[''project'']}}'
- --location
- '{{$.inputs.parameters[''location'']}}'
- --gcp_resources
- '{{$.outputs.parameters[''gcp_resources''].output_file}}'
- --executor_input
- '{{$}}'
command:
- python3
- -u
- -m
- launcher
image: gcr.io/ml-pipeline/automl-tables-private:1.0.18
exec-model-batch-explanation-2:
container:
args:
- --type
- BatchPredictionJob
- --payload
- '{"Concat": ["{", "\"display_name\": \"", "{{$.inputs.parameters[''job_display_name'']}}",
"\", ", " \"input_config\": {", "\"instances_format\": \"", "{{$.inputs.parameters[''instances_format'']}}",
"\"", ", \"gcs_source\": {", "\"uris\":", "{{$.inputs.parameters[''gcs_source_uris'']}}",
"}", ", \"bigquery_source\": {", "\"input_uri\": \"", "{{$.inputs.parameters[''bigquery_source_input_uri'']}}",
"\"", "}", "}", ", \"model_parameters\": ", "{{$.inputs.parameters[''model_parameters'']}}",
", \"output_config\": {", "\"predictions_format\": \"", "{{$.inputs.parameters[''predictions_format'']}}",
"\"", ", \"gcs_destination\": {", "\"output_uri_prefix\": \"", "{{$.inputs.parameters[''gcs_destination_output_uri_prefix'']}}",
"\"", "}", ", \"bigquery_destination\": {", "\"output_uri\": \"", "{{$.inputs.parameters[''bigquery_destination_output_uri'']}}",
"\"", "}", "}", ", \"dedicated_resources\": {", "\"machine_spec\": {", "\"machine_type\":
\"", "{{$.inputs.parameters[''machine_type'']}}", "\"", ", \"accelerator_type\":
\"", "{{$.inputs.parameters[''accelerator_type'']}}", "\"", ", \"accelerator_count\":
", "{{$.inputs.parameters[''accelerator_count'']}}", "}", ", \"starting_replica_count\":
", "{{$.inputs.parameters[''starting_replica_count'']}}", ", \"max_replica_count\":
", "{{$.inputs.parameters[''max_replica_count'']}}", "}", ", \"manual_batch_tuning_parameters\":
{", "\"batch_size\": ", "{{$.inputs.parameters[''manual_batch_tuning_parameters_batch_size'']}}",
"}", ", \"generate_explanation\": ", "{{$.inputs.parameters[''generate_explanation'']}}",
", \"explanation_spec\": {", "\"parameters\": ", "{{$.inputs.parameters[''explanation_parameters'']}}",
", \"metadata\": ", "{{$.inputs.parameters[''explanation_metadata'']}}",
"}", ", \"explanation_metadata_artifact\": \"", "{{$.inputs.artifacts[''explanation_metadata_artifact''].uri}}",
"\"", ", \"labels\": ", "{{$.inputs.parameters[''labels'']}}", ", \"encryption_spec\":
{\"kms_key_name\":\"", "{{$.inputs.parameters[''encryption_spec_key_name'']}}",
"\"}", "}"]}'
- --project
- '{{$.inputs.parameters[''project'']}}'
- --location
- '{{$.inputs.parameters[''location'']}}'
- --gcp_resources
- '{{$.outputs.parameters[''gcp_resources''].output_file}}'
- --executor_input
- '{{$}}'
command:
- python3
- -u
- -m
- launcher
image: gcr.io/ml-pipeline/automl-tables-private:1.0.18
exec-model-batch-explanation-3:
container:
args:
- --type
- BatchPredictionJob
- --payload
- '{"Concat": ["{", "\"display_name\": \"", "{{$.inputs.parameters[''job_display_name'']}}",
"\", ", " \"input_config\": {", "\"instances_format\": \"", "{{$.inputs.parameters[''instances_format'']}}",
"\"", ", \"gcs_source\": {", "\"uris\":", "{{$.inputs.parameters[''gcs_source_uris'']}}",
"}", ", \"bigquery_source\": {", "\"input_uri\": \"", "{{$.inputs.parameters[''bigquery_source_input_uri'']}}",
"\"", "}", "}", ", \"model_parameters\": ", "{{$.inputs.parameters[''model_parameters'']}}",
", \"output_config\": {", "\"predictions_format\": \"", "{{$.inputs.parameters[''predictions_format'']}}",
"\"", ", \"gcs_destination\": {", "\"output_uri_prefix\": \"", "{{$.inputs.parameters[''gcs_destination_output_uri_prefix'']}}",
"\"", "}", ", \"bigquery_destination\": {", "\"output_uri\": \"", "{{$.inputs.parameters[''bigquery_destination_output_uri'']}}",
"\"", "}", "}", ", \"dedicated_resources\": {", "\"machine_spec\": {", "\"machine_type\":
\"", "{{$.inputs.parameters[''machine_type'']}}", "\"", ", \"accelerator_type\":
\"", "{{$.inputs.parameters[''accelerator_type'']}}", "\"", ", \"accelerator_count\":
", "{{$.inputs.parameters[''accelerator_count'']}}", "}", ", \"starting_replica_count\":
", "{{$.inputs.parameters[''starting_replica_count'']}}", ", \"max_replica_count\":
", "{{$.inputs.parameters[''max_replica_count'']}}", "}", ", \"manual_batch_tuning_parameters\":
{", "\"batch_size\": ", "{{$.inputs.parameters[''manual_batch_tuning_parameters_batch_size'']}}",
"}", ", \"generate_explanation\": ", "{{$.inputs.parameters[''generate_explanation'']}}",
", \"explanation_spec\": {", "\"parameters\": ", "{{$.inputs.parameters[''explanation_parameters'']}}",
", \"metadata\": ", "{{$.inputs.parameters[''explanation_metadata'']}}",
"}", ", \"explanation_metadata_artifact\": \"", "{{$.inputs.artifacts[''explanation_metadata_artifact''].uri}}",
"\"", ", \"labels\": ", "{{$.inputs.parameters[''labels'']}}", ", \"encryption_spec\":
{\"kms_key_name\":\"", "{{$.inputs.parameters[''encryption_spec_key_name'']}}",
"\"}", "}"]}'
- --project
- '{{$.inputs.parameters[''project'']}}'
- --location
- '{{$.inputs.parameters[''location'']}}'
- --gcp_resources
- '{{$.outputs.parameters[''gcp_resources''].output_file}}'
- --executor_input
- '{{$}}'
command:
- python3
- -u
- -m
- launcher
image: gcr.io/ml-pipeline/automl-tables-private:1.0.18
exec-model-batch-predict:
container:
args:
- --type
- BatchPredictionJob
- --payload
- '{"Concat": ["{", "\"display_name\": \"", "{{$.inputs.parameters[''job_display_name'']}}",
"\", ", {"IfPresent": {"InputName": "model", "Then": {"Concat": ["\"model\":
\"", "{{$.inputs.artifacts[''model''].metadata[''resourceName'']}}", "\","]}}},
" \"input_config\": {", "\"instances_format\": \"", "{{$.inputs.parameters[''instances_format'']}}",
"\"", ", \"gcs_source\": {", "\"uris\":", "{{$.inputs.parameters[''gcs_source_uris'']}}",
"}", ", \"bigquery_source\": {", "\"input_uri\": \"", "{{$.inputs.parameters[''bigquery_source_input_uri'']}}",
"\"", "}", "}", ", \"instance_config\": {", "\"instance_type\": \"", "{{$.inputs.parameters[''instance_type'']}}",
"\"", ", \"key_field\": \"", "{{$.inputs.parameters[''key_field'']}}", "\"
", {"IfPresent": {"InputName": "included_fields", "Then": {"Concat": [",
\"included_fields\": ", "{{$.inputs.parameters[''included_fields'']}}"]}}},
{"IfPresent": {"InputName": "excluded_fields", "Then": {"Concat": [", \"excluded_fields\":
", "{{$.inputs.parameters[''excluded_fields'']}}"]}}}, "}", ", \"model_parameters\":
", "{{$.inputs.parameters[''model_parameters'']}}", ", \"output_config\":
{", "\"predictions_format\": \"", "{{$.inputs.parameters[''predictions_format'']}}",
"\"", ", \"gcs_destination\": {", "\"output_uri_prefix\": \"", "{{$.inputs.parameters[''gcs_destination_output_uri_prefix'']}}",
"\"", "}", ", \"bigquery_destination\": {", "\"output_uri\": \"", "{{$.inputs.parameters[''bigquery_destination_output_uri'']}}",
"\"", "}", "}", ", \"dedicated_resources\": {", "\"machine_spec\": {", "\"machine_type\":
\"", "{{$.inputs.parameters[''machine_type'']}}", "\"", ", \"accelerator_type\":
\"", "{{$.inputs.parameters[''accelerator_type'']}}", "\"", ", \"accelerator_count\":
", "{{$.inputs.parameters[''accelerator_count'']}}", "}", ", \"starting_replica_count\":
", "{{$.inputs.parameters[''starting_replica_count'']}}", ", \"max_replica_count\":
", "{{$.inputs.parameters[''max_replica_count'']}}", "}", ", \"manual_batch_tuning_parameters\":
{", "\"batch_size\": ", "{{$.inputs.parameters[''manual_batch_tuning_parameters_batch_size'']}}",
"}", ", \"generate_explanation\": ", "{{$.inputs.parameters[''generate_explanation'']}}",
", \"explanation_spec\": {", "\"parameters\": ", "{{$.inputs.parameters[''explanation_parameters'']}}",
", \"metadata\": ", "{{$.inputs.parameters[''explanation_metadata'']}}",
"}", ", \"labels\": ", "{{$.inputs.parameters[''labels'']}}", ", \"encryption_spec\":
{\"kms_key_name\":\"", "{{$.inputs.parameters[''encryption_spec_key_name'']}}",
"\"}", "}"]}'
- --project
- '{{$.inputs.parameters[''project'']}}'
- --location
- '{{$.inputs.parameters[''location'']}}'
- --gcp_resources
- '{{$.outputs.parameters[''gcp_resources''].output_file}}'
- --executor_input
- '{{$}}'
command:
- python3
- -u
- -m
- google_cloud_pipeline_components.container.v1.batch_prediction_job.launcher
image: gcr.io/ml-pipeline/google-cloud-pipeline-components:2.3.1
exec-model-batch-predict-2:
container:
args:
- --type
- BatchPredictionJob
- --payload
- '{"Concat": ["{", "\"display_name\": \"", "{{$.inputs.parameters[''job_display_name'']}}",
"\", ", {"IfPresent": {"InputName": "model", "Then": {"Concat": ["\"model\":
\"", "{{$.inputs.artifacts[''model''].metadata[''resourceName'']}}", "\","]}}},
" \"input_config\": {", "\"instances_format\": \"", "{{$.inputs.parameters[''instances_format'']}}",
"\"", ", \"gcs_source\": {", "\"uris\":", "{{$.inputs.parameters[''gcs_source_uris'']}}",
"}", ", \"bigquery_source\": {", "\"input_uri\": \"", "{{$.inputs.parameters[''bigquery_source_input_uri'']}}",
"\"", "}", "}", ", \"instance_config\": {", "\"instance_type\": \"", "{{$.inputs.parameters[''instance_type'']}}",
"\"", ", \"key_field\": \"", "{{$.inputs.parameters[''key_field'']}}", "\"
", {"IfPresent": {"InputName": "included_fields", "Then": {"Concat": [",
\"included_fields\": ", "{{$.inputs.parameters[''included_fields'']}}"]}}},
{"IfPresent": {"InputName": "excluded_fields", "Then": {"Concat": [", \"excluded_fields\":
", "{{$.inputs.parameters[''excluded_fields'']}}"]}}}, "}", ", \"model_parameters\":
", "{{$.inputs.parameters[''model_parameters'']}}", ", \"output_config\":
{", "\"predictions_format\": \"", "{{$.inputs.parameters[''predictions_format'']}}",
"\"", ", \"gcs_destination\": {", "\"output_uri_prefix\": \"", "{{$.inputs.parameters[''gcs_destination_output_uri_prefix'']}}",
"\"", "}", ", \"bigquery_destination\": {", "\"output_uri\": \"", "{{$.inputs.parameters[''bigquery_destination_output_uri'']}}",
"\"", "}", "}", ", \"dedicated_resources\": {", "\"machine_spec\": {", "\"machine_type\":
\"", "{{$.inputs.parameters[''machine_type'']}}", "\"", ", \"accelerator_type\":
\"", "{{$.inputs.parameters[''accelerator_type'']}}", "\"", ", \"accelerator_count\":
", "{{$.inputs.parameters[''accelerator_count'']}}", "}", ", \"starting_replica_count\":
", "{{$.inputs.parameters[''starting_replica_count'']}}", ", \"max_replica_count\":
", "{{$.inputs.parameters[''max_replica_count'']}}", "}", ", \"manual_batch_tuning_parameters\":
{", "\"batch_size\": ", "{{$.inputs.parameters[''manual_batch_tuning_parameters_batch_size'']}}",
"}", ", \"generate_explanation\": ", "{{$.inputs.parameters[''generate_explanation'']}}",
", \"explanation_spec\": {", "\"parameters\": ", "{{$.inputs.parameters[''explanation_parameters'']}}",
", \"metadata\": ", "{{$.inputs.parameters[''explanation_metadata'']}}",
"}", ", \"labels\": ", "{{$.inputs.parameters[''labels'']}}", ", \"encryption_spec\":
{\"kms_key_name\":\"", "{{$.inputs.parameters[''encryption_spec_key_name'']}}",
"\"}", "}"]}'
- --project
- '{{$.inputs.parameters[''project'']}}'
- --location
- '{{$.inputs.parameters[''location'']}}'
- --gcp_resources
- '{{$.outputs.parameters[''gcp_resources''].output_file}}'
- --executor_input
- '{{$}}'
command:
- python3
- -u
- -m
- google_cloud_pipeline_components.container.v1.batch_prediction_job.launcher
image: gcr.io/ml-pipeline/google-cloud-pipeline-components:2.3.1
exec-model-batch-predict-3:
container:
args:
- --type
- BatchPredictionJob
- --payload
- '{"Concat": ["{", "\"display_name\": \"", "{{$.inputs.parameters[''job_display_name'']}}",
"\", ", {"IfPresent": {"InputName": "model", "Then": {"Concat": ["\"model\":
\"", "{{$.inputs.artifacts[''model''].metadata[''resourceName'']}}", "\","]}}},
" \"input_config\": {", "\"instances_format\": \"", "{{$.inputs.parameters[''instances_format'']}}",
"\"", ", \"gcs_source\": {", "\"uris\":", "{{$.inputs.parameters[''gcs_source_uris'']}}",
"}", ", \"bigquery_source\": {", "\"input_uri\": \"", "{{$.inputs.parameters[''bigquery_source_input_uri'']}}",
"\"", "}", "}", ", \"instance_config\": {", "\"instance_type\": \"", "{{$.inputs.parameters[''instance_type'']}}",
"\"", ", \"key_field\": \"", "{{$.inputs.parameters[''key_field'']}}", "\"
", {"IfPresent": {"InputName": "included_fields", "Then": {"Concat": [",
\"included_fields\": ", "{{$.inputs.parameters[''included_fields'']}}"]}}},
{"IfPresent": {"InputName": "excluded_fields", "Then": {"Concat": [", \"excluded_fields\":
", "{{$.inputs.parameters[''excluded_fields'']}}"]}}}, "}", ", \"model_parameters\":
", "{{$.inputs.parameters[''model_parameters'']}}", ", \"output_config\":
{", "\"predictions_format\": \"", "{{$.inputs.parameters[''predictions_format'']}}",
"\"", ", \"gcs_destination\": {", "\"output_uri_prefix\": \"", "{{$.inputs.parameters[''gcs_destination_output_uri_prefix'']}}",
"\"", "}", ", \"bigquery_destination\": {", "\"output_uri\": \"", "{{$.inputs.parameters[''bigquery_destination_output_uri'']}}",
"\"", "}", "}", ", \"dedicated_resources\": {", "\"machine_spec\": {", "\"machine_type\":
\"", "{{$.inputs.parameters[''machine_type'']}}", "\"", ", \"accelerator_type\":
\"", "{{$.inputs.parameters[''accelerator_type'']}}", "\"", ", \"accelerator_count\":
", "{{$.inputs.parameters[''accelerator_count'']}}", "}", ", \"starting_replica_count\":
", "{{$.inputs.parameters[''starting_replica_count'']}}", ", \"max_replica_count\":
", "{{$.inputs.parameters[''max_replica_count'']}}", "}", ", \"manual_batch_tuning_parameters\":
{", "\"batch_size\": ", "{{$.inputs.parameters[''manual_batch_tuning_parameters_batch_size'']}}",
"}", ", \"generate_explanation\": ", "{{$.inputs.parameters[''generate_explanation'']}}",
", \"explanation_spec\": {", "\"parameters\": ", "{{$.inputs.parameters[''explanation_parameters'']}}",
", \"metadata\": ", "{{$.inputs.parameters[''explanation_metadata'']}}",
"}", ", \"labels\": ", "{{$.inputs.parameters[''labels'']}}", ", \"encryption_spec\":
{\"kms_key_name\":\"", "{{$.inputs.parameters[''encryption_spec_key_name'']}}",
"\"}", "}"]}'
- --project
- '{{$.inputs.parameters[''project'']}}'
- --location
- '{{$.inputs.parameters[''location'']}}'
- --gcp_resources
- '{{$.outputs.parameters[''gcp_resources''].output_file}}'
- --executor_input
- '{{$}}'
command:
- python3
- -u
- -m
- google_cloud_pipeline_components.container.v1.batch_prediction_job.launcher
image: gcr.io/ml-pipeline/google-cloud-pipeline-components:2.3.1
exec-model-batch-predict-4:
container:
args:
- --type
- BatchPredictionJob
- --payload
- '{"Concat": ["{", "\"display_name\": \"", "{{$.inputs.parameters[''job_display_name'']}}",
"\", ", {"IfPresent": {"InputName": "model", "Then": {"Concat": ["\"model\":
\"", "{{$.inputs.artifacts[''model''].metadata[''resourceName'']}}", "\","]}}},
" \"input_config\": {", "\"instances_format\": \"", "{{$.inputs.parameters[''instances_format'']}}",
"\"", ", \"gcs_source\": {", "\"uris\":", "{{$.inputs.parameters[''gcs_source_uris'']}}",
"}", ", \"bigquery_source\": {", "\"input_uri\": \"", "{{$.inputs.parameters[''bigquery_source_input_uri'']}}",
"\"", "}", "}", ", \"instance_config\": {", "\"instance_type\": \"", "{{$.inputs.parameters[''instance_type'']}}",
"\"", ", \"key_field\": \"", "{{$.inputs.parameters[''key_field'']}}", "\"
", {"IfPresent": {"InputName": "included_fields", "Then": {"Concat": [",
\"included_fields\": ", "{{$.inputs.parameters[''included_fields'']}}"]}}},
{"IfPresent": {"InputName": "excluded_fields", "Then": {"Concat": [", \"excluded_fields\":
", "{{$.inputs.parameters[''excluded_fields'']}}"]}}}, "}", ", \"model_parameters\":
", "{{$.inputs.parameters[''model_parameters'']}}", ", \"output_config\":
{", "\"predictions_format\": \"", "{{$.inputs.parameters[''predictions_format'']}}",
"\"", ", \"gcs_destination\": {", "\"output_uri_prefix\": \"", "{{$.inputs.parameters[''gcs_destination_output_uri_prefix'']}}",
"\"", "}", ", \"bigquery_destination\": {", "\"output_uri\": \"", "{{$.inputs.parameters[''bigquery_destination_output_uri'']}}",
"\"", "}", "}", ", \"dedicated_resources\": {", "\"machine_spec\": {", "\"machine_type\":
\"", "{{$.inputs.parameters[''machine_type'']}}", "\"", ", \"accelerator_type\":
\"", "{{$.inputs.parameters[''accelerator_type'']}}", "\"", ", \"accelerator_count\":
", "{{$.inputs.parameters[''accelerator_count'']}}", "}", ", \"starting_replica_count\":
", "{{$.inputs.parameters[''starting_replica_count'']}}", ", \"max_replica_count\":
", "{{$.inputs.parameters[''max_replica_count'']}}", "}", ", \"manual_batch_tuning_parameters\":
{", "\"batch_size\": ", "{{$.inputs.parameters[''manual_batch_tuning_parameters_batch_size'']}}",
"}", ", \"generate_explanation\": ", "{{$.inputs.parameters[''generate_explanation'']}}",
", \"explanation_spec\": {", "\"parameters\": ", "{{$.inputs.parameters[''explanation_parameters'']}}",
", \"metadata\": ", "{{$.inputs.parameters[''explanation_metadata'']}}",
"}", ", \"labels\": ", "{{$.inputs.parameters[''labels'']}}", ", \"encryption_spec\":
{\"kms_key_name\":\"", "{{$.inputs.parameters[''encryption_spec_key_name'']}}",
"\"}", "}"]}'
- --project
- '{{$.inputs.parameters[''project'']}}'
- --location
- '{{$.inputs.parameters[''location'']}}'
- --gcp_resources
- '{{$.outputs.parameters[''gcp_resources''].output_file}}'
- --executor_input
- '{{$}}'
command:
- python3
- -u
- -m
- google_cloud_pipeline_components.container.v1.batch_prediction_job.launcher
image: gcr.io/ml-pipeline/google-cloud-pipeline-components:2.3.1
exec-model-batch-predict-5:
container:
args:
- --type
- BatchPredictionJob
- --payload
- '{"Concat": ["{", "\"display_name\": \"", "{{$.inputs.parameters[''job_display_name'']}}",
"\", ", {"IfPresent": {"InputName": "model", "Then": {"Concat": ["\"model\":
\"", "{{$.inputs.artifacts[''model''].metadata[''resourceName'']}}", "\","]}}},
" \"input_config\": {", "\"instances_format\": \"", "{{$.inputs.parameters[''instances_format'']}}",
"\"", ", \"gcs_source\": {", "\"uris\":", "{{$.inputs.parameters[''gcs_source_uris'']}}",
"}", ", \"bigquery_source\": {", "\"input_uri\": \"", "{{$.inputs.parameters[''bigquery_source_input_uri'']}}",
"\"", "}", "}", ", \"instance_config\": {", "\"instance_type\": \"", "{{$.inputs.parameters[''instance_type'']}}",
"\"", ", \"key_field\": \"", "{{$.inputs.parameters[''key_field'']}}", "\"
", {"IfPresent": {"InputName": "included_fields", "Then": {"Concat": [",
\"included_fields\": ", "{{$.inputs.parameters[''included_fields'']}}"]}}},
{"IfPresent": {"InputName": "excluded_fields", "Then": {"Concat": [", \"excluded_fields\":
", "{{$.inputs.parameters[''excluded_fields'']}}"]}}}, "}", ", \"model_parameters\":
", "{{$.inputs.parameters[''model_parameters'']}}", ", \"output_config\":
{", "\"predictions_format\": \"", "{{$.inputs.parameters[''predictions_format'']}}",
"\"", ", \"gcs_destination\": {", "\"output_uri_prefix\": \"", "{{$.inputs.parameters[''gcs_destination_output_uri_prefix'']}}",
"\"", "}", ", \"bigquery_destination\": {", "\"output_uri\": \"", "{{$.inputs.parameters[''bigquery_destination_output_uri'']}}",
"\"", "}", "}", ", \"dedicated_resources\": {", "\"machine_spec\": {", "\"machine_type\":
\"", "{{$.inputs.parameters[''machine_type'']}}", "\"", ", \"accelerator_type\":
\"", "{{$.inputs.parameters[''accelerator_type'']}}", "\"", ", \"accelerator_count\":
", "{{$.inputs.parameters[''accelerator_count'']}}", "}", ", \"starting_replica_count\":
", "{{$.inputs.parameters[''starting_replica_count'']}}", ", \"max_replica_count\":
", "{{$.inputs.parameters[''max_replica_count'']}}", "}", ", \"manual_batch_tuning_parameters\":
{", "\"batch_size\": ", "{{$.inputs.parameters[''manual_batch_tuning_parameters_batch_size'']}}",
"}", ", \"generate_explanation\": ", "{{$.inputs.parameters[''generate_explanation'']}}",
", \"explanation_spec\": {", "\"parameters\": ", "{{$.inputs.parameters[''explanation_parameters'']}}",
", \"metadata\": ", "{{$.inputs.parameters[''explanation_metadata'']}}",
"}", ", \"labels\": ", "{{$.inputs.parameters[''labels'']}}", ", \"encryption_spec\":
{\"kms_key_name\":\"", "{{$.inputs.parameters[''encryption_spec_key_name'']}}",
"\"}", "}"]}'
- --project
- '{{$.inputs.parameters[''project'']}}'
- --location
- '{{$.inputs.parameters[''location'']}}'
- --gcp_resources
- '{{$.outputs.parameters[''gcp_resources''].output_file}}'
- --executor_input
- '{{$}}'
command:
- python3
- -u
- -m
- google_cloud_pipeline_components.container.v1.batch_prediction_job.launcher
image: gcr.io/ml-pipeline/google-cloud-pipeline-components:2.3.1
exec-model-evaluation:
container:
args:
- --setup_file
- /setup.py
- --json_mode
- 'true'
- --project_id
- '{{$.inputs.parameters[''project'']}}'
- --location
- '{{$.inputs.parameters[''location'']}}'
- --problem_type
- '{{$.inputs.parameters[''problem_type'']}}'
- --batch_prediction_format
- '{{$.inputs.parameters[''predictions_format'']}}'
- --batch_prediction_gcs_source
- '{{$.inputs.artifacts[''batch_prediction_job''].metadata[''gcsOutputDirectory'']}}'
- --ground_truth_format
- '{{$.inputs.parameters[''ground_truth_format'']}}'
- --key_prefix_in_prediction_dataset
- instance
- --root_dir
- '{{$.inputs.parameters[''root_dir'']}}/{{$.pipeline_job_uuid}}-{{$.pipeline_task_uuid}}'
- --classification_type
- multiclass
- --ground_truth_column
- instance.{{$.inputs.parameters['ground_truth_column']}}
- --prediction_score_column
- '{{$.inputs.parameters[''prediction_score_column'']}}'
- --prediction_label_column
- '{{$.inputs.parameters[''prediction_label_column'']}}'
- --prediction_id_column
- ''
- --example_weight_column
- ''
- --generate_feature_attribution
- 'false'
- --dataflow_job_prefix
- evaluation-{{$.pipeline_job_uuid}}-{{$.pipeline_task_uuid}}
- --dataflow_service_account
- '{{$.inputs.parameters[''dataflow_service_account'']}}'
- --dataflow_disk_size
- '{{$.inputs.parameters[''dataflow_disk_size'']}}'
- --dataflow_machine_type
- '{{$.inputs.parameters[''dataflow_machine_type'']}}'
- --dataflow_workers_num
- '{{$.inputs.parameters[''dataflow_workers_num'']}}'
- --dataflow_max_workers_num
- '{{$.inputs.parameters[''dataflow_max_workers_num'']}}'
- --dataflow_subnetwork
- '{{$.inputs.parameters[''dataflow_subnetwork'']}}'
- --dataflow_use_public_ips
- '{{$.inputs.parameters[''dataflow_use_public_ips'']}}'
- --kms_key_name
- '{{$.inputs.parameters[''encryption_spec_key_name'']}}'
- --output_metrics_gcs_path
- '{{$.outputs.artifacts[''evaluation_metrics''].uri}}'
- --gcp_resources
- '{{$.outputs.parameters[''gcp_resources''].output_file}}'
- --executor_input
- '{{$}}'
command:
- python
- /main.py
image: gcr.io/ml-pipeline/model-evaluation:v0.4
exec-model-evaluation-2:
container:
args:
- --setup_file
- /setup.py
- --json_mode
- 'true'
- --project_id
- '{{$.inputs.parameters[''project'']}}'
- --location
- '{{$.inputs.parameters[''location'']}}'
- --problem_type
- '{{$.inputs.parameters[''problem_type'']}}'
- --batch_prediction_format
- '{{$.inputs.parameters[''predictions_format'']}}'
- --batch_prediction_gcs_source
- '{{$.inputs.artifacts[''batch_prediction_job''].metadata[''gcsOutputDirectory'']}}'
- --ground_truth_format
- '{{$.inputs.parameters[''ground_truth_format'']}}'
- --key_prefix_in_prediction_dataset
- instance
- --root_dir
- '{{$.inputs.parameters[''root_dir'']}}/{{$.pipeline_job_uuid}}-{{$.pipeline_task_uuid}}'
- --classification_type
- multiclass
- --ground_truth_column
- instance.{{$.inputs.parameters['ground_truth_column']}}
- --prediction_score_column
- '{{$.inputs.parameters[''prediction_score_column'']}}'
- --prediction_label_column
- '{{$.inputs.parameters[''prediction_label_column'']}}'
- --prediction_id_column
- ''
- --example_weight_column
- ''
- --generate_feature_attribution
- 'false'
- --dataflow_job_prefix
- evaluation-{{$.pipeline_job_uuid}}-{{$.pipeline_task_uuid}}
- --dataflow_service_account
- '{{$.inputs.parameters[''dataflow_service_account'']}}'
- --dataflow_disk_size
- '{{$.inputs.parameters[''dataflow_disk_size'']}}'
- --dataflow_machine_type
- '{{$.inputs.parameters[''dataflow_machine_type'']}}'
- --dataflow_workers_num
- '{{$.inputs.parameters[''dataflow_workers_num'']}}'
- --dataflow_max_workers_num
- '{{$.inputs.parameters[''dataflow_max_workers_num'']}}'
- --dataflow_subnetwork
- '{{$.inputs.parameters[''dataflow_subnetwork'']}}'
- --dataflow_use_public_ips
- '{{$.inputs.parameters[''dataflow_use_public_ips'']}}'
- --kms_key_name
- '{{$.inputs.parameters[''encryption_spec_key_name'']}}'
- --output_metrics_gcs_path
- '{{$.outputs.artifacts[''evaluation_metrics''].uri}}'
- --gcp_resources
- '{{$.outputs.parameters[''gcp_resources''].output_file}}'
- --executor_input
- '{{$}}'
command:
- python
- /main.py
image: gcr.io/ml-pipeline/model-evaluation:v0.4
exec-model-evaluation-3:
container:
args:
- --setup_file
- /setup.py
- --json_mode
- 'true'
- --project_id
- '{{$.inputs.parameters[''project'']}}'
- --location
- '{{$.inputs.parameters[''location'']}}'
- --problem_type
- '{{$.inputs.parameters[''problem_type'']}}'
- --batch_prediction_format
- '{{$.inputs.parameters[''predictions_format'']}}'
- --batch_prediction_gcs_source
- '{{$.inputs.artifacts[''batch_prediction_job''].metadata[''gcsOutputDirectory'']}}'
- --ground_truth_format
- '{{$.inputs.parameters[''ground_truth_format'']}}'
- --key_prefix_in_prediction_dataset
- instance
- --root_dir
- '{{$.inputs.parameters[''root_dir'']}}/{{$.pipeline_job_uuid}}-{{$.pipeline_task_uuid}}'
- --classification_type
- multiclass
- --ground_truth_column
- instance.{{$.inputs.parameters['ground_truth_column']}}
- --prediction_score_column
- '{{$.inputs.parameters[''prediction_score_column'']}}'
- --prediction_label_column
- '{{$.inputs.parameters[''prediction_label_column'']}}'
- --prediction_id_column
- ''
- --example_weight_column
- ''
- --generate_feature_attribution
- 'false'
- --dataflow_job_prefix
- evaluation-{{$.pipeline_job_uuid}}-{{$.pipeline_task_uuid}}
- --dataflow_service_account
- '{{$.inputs.parameters[''dataflow_service_account'']}}'
- --dataflow_disk_size
- '{{$.inputs.parameters[''dataflow_disk_size'']}}'
- --dataflow_machine_type
- '{{$.inputs.parameters[''dataflow_machine_type'']}}'
- --dataflow_workers_num
- '{{$.inputs.parameters[''dataflow_workers_num'']}}'
- --dataflow_max_workers_num
- '{{$.inputs.parameters[''dataflow_max_workers_num'']}}'
- --dataflow_subnetwork
- '{{$.inputs.parameters[''dataflow_subnetwork'']}}'
- --dataflow_use_public_ips
- '{{$.inputs.parameters[''dataflow_use_public_ips'']}}'
- --kms_key_name
- '{{$.inputs.parameters[''encryption_spec_key_name'']}}'
- --output_metrics_gcs_path
- '{{$.outputs.artifacts[''evaluation_metrics''].uri}}'
- --gcp_resources
- '{{$.outputs.parameters[''gcp_resources''].output_file}}'
- --executor_input
- '{{$}}'
command:
- python
- /main.py
image: gcr.io/ml-pipeline/model-evaluation:v0.4
exec-model-evaluation-import:
container:
args:
- '{"IfPresent": {"InputName": "metrics", "Then": ["--metrics", "{{$.inputs.artifacts[''metrics''].uri}}",
"--metrics_explanation", "{{$.inputs.artifacts[''metrics''].metadata[''explanation_gcs_path'']}}"]}}'
- '{"IfPresent": {"InputName": "explanation", "Then": ["--explanation", "{{$.inputs.artifacts[''explanation''].metadata[''explanation_gcs_path'']}}"]}}'
- '{"IfPresent": {"InputName": "classification_metrics", "Then": ["--classification_metrics",
"{{$.inputs.artifacts[''classification_metrics''].uri}}"]}}'
- '{"IfPresent": {"InputName": "forecasting_metrics", "Then": ["--forecasting_metrics",
"{{$.inputs.artifacts[''forecasting_metrics''].uri}}"]}}'
- '{"IfPresent": {"InputName": "regression_metrics", "Then": ["--regression_metrics",
"{{$.inputs.artifacts[''regression_metrics''].uri}}"]}}'
- '{"IfPresent": {"InputName": "text_generation_metrics", "Then": ["--text_generation_metrics",
"{{$.inputs.artifacts[''text_generation_metrics''].uri}}"]}}'
- '{"IfPresent": {"InputName": "question_answering_metrics", "Then": ["--question_answering_metrics",
"{{$.inputs.artifacts[''question_answering_metrics''].uri}}"]}}'
- '{"IfPresent": {"InputName": "summarization_metrics", "Then": ["--summarization_metrics",
"{{$.inputs.artifacts[''summarization_metrics''].uri}}"]}}'
- '{"IfPresent": {"InputName": "feature_attributions", "Then": ["--feature_attributions",
"{{$.inputs.artifacts[''feature_attributions''].uri}}"]}}'
- '{"IfPresent": {"InputName": "embedding_metrics", "Then": ["--embedding_metrics",
"{{$.inputs.artifacts[''embedding_metrics''].uri}}"]}}'
- '{"IfPresent": {"InputName": "problem_type", "Then": ["--problem_type",
"{{$.inputs.parameters[''problem_type'']}}"]}}'
- --display_name
- '{{$.inputs.parameters[''display_name'']}}'
- --dataset_path
- '{{$.inputs.parameters[''dataset_path'']}}'
- --dataset_paths
- '{{$.inputs.parameters[''dataset_paths'']}}'
- --dataset_type
- '{{$.inputs.parameters[''dataset_type'']}}'
- --pipeline_job_id
- '{{$.pipeline_job_uuid}}'
- --pipeline_job_resource_name
- '{{$.pipeline_job_resource_name}}'
- --model_name
- '{{$.inputs.artifacts[''model''].metadata[''resourceName'']}}'
- --gcp_resources
- '{{$.outputs.parameters[''gcp_resources''].output_file}}'
- --evaluation_resource_name
- '{{$.outputs.parameters[''evaluation_resource_name''].output_file}}'
command:
- python3
- -u
- -m
- google_cloud_pipeline_components.container._implementation.model_evaluation.import_model_evaluation
image: gcr.io/ml-pipeline/google-cloud-pipeline-components:2.3.1
exec-model-evaluation-import-2:
container:
args:
- '{"IfPresent": {"InputName": "metrics", "Then": ["--metrics", "{{$.inputs.artifacts[''metrics''].uri}}",
"--metrics_explanation", "{{$.inputs.artifacts[''metrics''].metadata[''explanation_gcs_path'']}}"]}}'
- '{"IfPresent": {"InputName": "explanation", "Then": ["--explanation", "{{$.inputs.artifacts[''explanation''].metadata[''explanation_gcs_path'']}}"]}}'
- '{"IfPresent": {"InputName": "classification_metrics", "Then": ["--classification_metrics",
"{{$.inputs.artifacts[''classification_metrics''].uri}}"]}}'
- '{"IfPresent": {"InputName": "forecasting_metrics", "Then": ["--forecasting_metrics",
"{{$.inputs.artifacts[''forecasting_metrics''].uri}}"]}}'
- '{"IfPresent": {"InputName": "regression_metrics", "Then": ["--regression_metrics",
"{{$.inputs.artifacts[''regression_metrics''].uri}}"]}}'
- '{"IfPresent": {"InputName": "text_generation_metrics", "Then": ["--text_generation_metrics",
"{{$.inputs.artifacts[''text_generation_metrics''].uri}}"]}}'
- '{"IfPresent": {"InputName": "question_answering_metrics", "Then": ["--question_answering_metrics",
"{{$.inputs.artifacts[''question_answering_metrics''].uri}}"]}}'
- '{"IfPresent": {"InputName": "summarization_metrics", "Then": ["--summarization_metrics",
"{{$.inputs.artifacts[''summarization_metrics''].uri}}"]}}'
- '{"IfPresent": {"InputName": "feature_attributions", "Then": ["--feature_attributions",
"{{$.inputs.artifacts[''feature_attributions''].uri}}"]}}'
- '{"IfPresent": {"InputName": "embedding_metrics", "Then": ["--embedding_metrics",
"{{$.inputs.artifacts[''embedding_metrics''].uri}}"]}}'
- '{"IfPresent": {"InputName": "problem_type", "Then": ["--problem_type",
"{{$.inputs.parameters[''problem_type'']}}"]}}'
- --display_name
- '{{$.inputs.parameters[''display_name'']}}'
- --dataset_path
- '{{$.inputs.parameters[''dataset_path'']}}'
- --dataset_paths
- '{{$.inputs.parameters[''dataset_paths'']}}'
- --dataset_type
- '{{$.inputs.parameters[''dataset_type'']}}'
- --pipeline_job_id
- '{{$.pipeline_job_uuid}}'
- --pipeline_job_resource_name
- '{{$.pipeline_job_resource_name}}'
- --model_name
- '{{$.inputs.artifacts[''model''].metadata[''resourceName'']}}'
- --gcp_resources
- '{{$.outputs.parameters[''gcp_resources''].output_file}}'
- --evaluation_resource_name
- '{{$.outputs.parameters[''evaluation_resource_name''].output_file}}'
command:
- python3
- -u
- -m
- google_cloud_pipeline_components.container._implementation.model_evaluation.import_model_evaluation
image: gcr.io/ml-pipeline/google-cloud-pipeline-components:2.3.1
exec-model-evaluation-import-3:
container:
args:
- '{"IfPresent": {"InputName": "metrics", "Then": ["--metrics", "{{$.inputs.artifacts[''metrics''].uri}}",
"--metrics_explanation", "{{$.inputs.artifacts[''metrics''].metadata[''explanation_gcs_path'']}}"]}}'
- '{"IfPresent": {"InputName": "explanation", "Then": ["--explanation", "{{$.inputs.artifacts[''explanation''].metadata[''explanation_gcs_path'']}}"]}}'
- '{"IfPresent": {"InputName": "classification_metrics", "Then": ["--classification_metrics",
"{{$.inputs.artifacts[''classification_metrics''].uri}}"]}}'
- '{"IfPresent": {"InputName": "forecasting_metrics", "Then": ["--forecasting_metrics",
"{{$.inputs.artifacts[''forecasting_metrics''].uri}}"]}}'
- '{"IfPresent": {"InputName": "regression_metrics", "Then": ["--regression_metrics",
"{{$.inputs.artifacts[''regression_metrics''].uri}}"]}}'
- '{"IfPresent": {"InputName": "text_generation_metrics", "Then": ["--text_generation_metrics",
"{{$.inputs.artifacts[''text_generation_metrics''].uri}}"]}}'
- '{"IfPresent": {"InputName": "question_answering_metrics", "Then": ["--question_answering_metrics",
"{{$.inputs.artifacts[''question_answering_metrics''].uri}}"]}}'
- '{"IfPresent": {"InputName": "summarization_metrics", "Then": ["--summarization_metrics",
"{{$.inputs.artifacts[''summarization_metrics''].uri}}"]}}'
- '{"IfPresent": {"InputName": "feature_attributions", "Then": ["--feature_attributions",
"{{$.inputs.artifacts[''feature_attributions''].uri}}"]}}'
- '{"IfPresent": {"InputName": "embedding_metrics", "Then": ["--embedding_metrics",
"{{$.inputs.artifacts[''embedding_metrics''].uri}}"]}}'
- '{"IfPresent": {"InputName": "problem_type", "Then": ["--problem_type",
"{{$.inputs.parameters[''problem_type'']}}"]}}'
- --display_name
- '{{$.inputs.parameters[''display_name'']}}'
- --dataset_path
- '{{$.inputs.parameters[''dataset_path'']}}'
- --dataset_paths
- '{{$.inputs.parameters[''dataset_paths'']}}'
- --dataset_type
- '{{$.inputs.parameters[''dataset_type'']}}'
- --pipeline_job_id
- '{{$.pipeline_job_uuid}}'
- --pipeline_job_resource_name
- '{{$.pipeline_job_resource_name}}'
- --model_name
- '{{$.inputs.artifacts[''model''].metadata[''resourceName'']}}'
- --gcp_resources
- '{{$.outputs.parameters[''gcp_resources''].output_file}}'
- --evaluation_resource_name
- '{{$.outputs.parameters[''evaluation_resource_name''].output_file}}'
command:
- python3
- -u
- -m
- google_cloud_pipeline_components.container._implementation.model_evaluation.import_model_evaluation
image: gcr.io/ml-pipeline/google-cloud-pipeline-components:2.3.1
exec-model-upload:
container:
args:
- --type
- UploadModel
- --payload
- '{"Concat": ["{", "\"display_name\": \"", "{{$.inputs.parameters[''display_name'']}}",
"\"", ", \"description\": \"", "{{$.inputs.parameters[''description'']}}",
"\"", ", \"explanation_spec\": {", "\"parameters\": ", "{{$.inputs.parameters[''explanation_parameters'']}}",
", \"metadata\": ", "{{$.inputs.parameters[''explanation_metadata'']}}",
"}", ", \"explanation_metadata_artifact\": \"", "{{$.inputs.artifacts[''explanation_metadata_artifact''].uri}}",
"\"", ", \"encryption_spec\": {\"kms_key_name\":\"", "{{$.inputs.parameters[''encryption_spec_key_name'']}}",
"\"}", ", \"labels\": ", "{{$.inputs.parameters[''labels'']}}", "}"]}'
- --project
- '{{$.inputs.parameters[''project'']}}'
- --location
- '{{$.inputs.parameters[''location'']}}'
- --gcp_resources
- '{{$.outputs.parameters[''gcp_resources''].output_file}}'
- --executor_input
- '{{$}}'
- '{"IfPresent": {"InputName": "parent_model", "Then": ["--parent_model_name",
"{{$.inputs.artifacts[''parent_model''].metadata[''resourceName'']}}"]}}'
command:
- python3
- -u
- -m
- launcher
image: gcr.io/ml-pipeline/automl-tables-private:1.0.18
exec-model-upload-2:
container:
args:
- --type
- UploadModel
- --payload
- '{"Concat": ["{", "\"display_name\": \"", "{{$.inputs.parameters[''display_name'']}}",
"\"", ", \"description\": \"", "{{$.inputs.parameters[''description'']}}",
"\"", ", \"explanation_spec\": {", "\"parameters\": ", "{{$.inputs.parameters[''explanation_parameters'']}}",
", \"metadata\": ", "{{$.inputs.parameters[''explanation_metadata'']}}",
"}", ", \"explanation_metadata_artifact\": \"", "{{$.inputs.artifacts[''explanation_metadata_artifact''].uri}}",
"\"", ", \"encryption_spec\": {\"kms_key_name\":\"", "{{$.inputs.parameters[''encryption_spec_key_name'']}}",
"\"}", ", \"labels\": ", "{{$.inputs.parameters[''labels'']}}", "}"]}'
- --project
- '{{$.inputs.parameters[''project'']}}'
- --location
- '{{$.inputs.parameters[''location'']}}'
- --gcp_resources
- '{{$.outputs.parameters[''gcp_resources''].output_file}}'
- --executor_input
- '{{$}}'
- '{"IfPresent": {"InputName": "parent_model", "Then": ["--parent_model_name",
"{{$.inputs.artifacts[''parent_model''].metadata[''resourceName'']}}"]}}'
command:
- python3
- -u
- -m
- launcher
image: gcr.io/ml-pipeline/automl-tables-private:1.0.18
exec-model-upload-3:
container:
args:
- --type
- UploadModel
- --payload
- '{"Concat": ["{", "\"display_name\": \"", "{{$.inputs.parameters[''display_name'']}}",
"\"", ", \"description\": \"", "{{$.inputs.parameters[''description'']}}",
"\"", ", \"explanation_spec\": {", "\"parameters\": ", "{{$.inputs.parameters[''explanation_parameters'']}}",
", \"metadata\": ", "{{$.inputs.parameters[''explanation_metadata'']}}",
"}", ", \"explanation_metadata_artifact\": \"", "{{$.inputs.artifacts[''explanation_metadata_artifact''].uri}}",
"\"", ", \"encryption_spec\": {\"kms_key_name\":\"", "{{$.inputs.parameters[''encryption_spec_key_name'']}}",
"\"}", ", \"labels\": ", "{{$.inputs.parameters[''labels'']}}", "}"]}'
- --project
- '{{$.inputs.parameters[''project'']}}'
- --location
- '{{$.inputs.parameters[''location'']}}'
- --gcp_resources
- '{{$.outputs.parameters[''gcp_resources''].output_file}}'
- --executor_input
- '{{$}}'
- '{"IfPresent": {"InputName": "parent_model", "Then": ["--parent_model_name",
"{{$.inputs.artifacts[''parent_model''].metadata[''resourceName'']}}"]}}'
command:
- python3
- -u
- -m
- launcher
image: gcr.io/ml-pipeline/automl-tables-private:1.0.18
exec-read-input-uri:
container:
args:
- --executor_input
- '{{$}}'
- --function_to_execute
- _read_input_uri
command:
- sh
- -ec
- 'program_path=$(mktemp -d)
printf "%s" "$0" > "$program_path/ephemeral_component.py"
python3 -m kfp.components.executor_main --component_module_path "$program_path/ephemeral_component.py" "$@"
'
- "\nimport kfp\nfrom kfp import dsl\nfrom kfp.dsl import *\nfrom typing import\
\ *\n\ndef _read_input_uri(\n split_uri: dsl.InputPath('Dataset'),\n\
) -> list: # Required by KFP; pylint:disable=g-bare-generic\n \"\"\"Construct\
\ Dataset based on the batch prediction job.\n\n Args:\n split_uri:\
\ Tbe path to the file that contains Dataset data.\n\n Returns:\n The\
\ list of string that represents the batch prediction input files.\n \"\
\"\"\n # pylint: disable=g-import-not-at-top,import-outside-toplevel,redefined-outer-name,reimported\n\
\ import json\n # pylint: enable=g-import-not-at-top,import-outside-toplevel,redefined-outer-name,reimported\n\
\ with open(split_uri, 'r') as f:\n data_source = json.loads(f.read())\n\
\ return data_source['tf_record_data_source']['file_patterns']\n\n"
image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20240808_0625
exec-read-input-uri-2:
container:
args:
- --executor_input
- '{{$}}'
- --function_to_execute
- _read_input_uri
command:
- sh
- -ec
- 'program_path=$(mktemp -d)
printf "%s" "$0" > "$program_path/ephemeral_component.py"
python3 -m kfp.components.executor_main --component_module_path "$program_path/ephemeral_component.py" "$@"
'
- "\nimport kfp\nfrom kfp import dsl\nfrom kfp.dsl import *\nfrom typing import\
\ *\n\ndef _read_input_uri(\n split_uri: dsl.InputPath('Dataset'),\n\
) -> list: # Required by KFP; pylint:disable=g-bare-generic\n \"\"\"Construct\
\ Dataset based on the batch prediction job.\n\n Args:\n split_uri:\
\ Tbe path to the file that contains Dataset data.\n\n Returns:\n The\
\ list of string that represents the batch prediction input files.\n \"\
\"\"\n # pylint: disable=g-import-not-at-top,import-outside-toplevel,redefined-outer-name,reimported\n\
\ import json\n # pylint: enable=g-import-not-at-top,import-outside-toplevel,redefined-outer-name,reimported\n\
\ with open(split_uri, 'r') as f:\n data_source = json.loads(f.read())\n\
\ return data_source['tf_record_data_source']['file_patterns']\n\n"
image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20240808_0625
exec-set-optional-inputs:
container:
args:
- --executor_input
- '{{$}}'
- --function_to_execute
- _set_optional_inputs
command:
- sh
- -ec
- 'program_path=$(mktemp -d)
printf "%s" "$0" > "$program_path/ephemeral_component.py"
python3 -m kfp.components.executor_main --component_module_path "$program_path/ephemeral_component.py" "$@"
'
- "\nimport kfp\nfrom kfp import dsl\nfrom kfp.dsl import *\nfrom typing import\
\ *\n\ndef _set_optional_inputs(\n project: str,\n location: str,\n\
\ data_source_csv_filenames: str,\n data_source_bigquery_table_path:\
\ str,\n vertex_dataset: dsl.Input[dsl.Artifact],\n) -> NamedTuple(\n\
\ 'Outputs',\n [\n ('data_source_csv_filenames', str),\n \
\ ('data_source_bigquery_table_path', str),\n ],\n):\n \"\"\"Get\
\ the data source URI.\n\n Args:\n project: The GCP project that runs\
\ the pipeline components.\n location: The GCP region that runs the pipeline\
\ components.\n data_source_csv_filenames: The CSV GCS path when data\
\ source is CSV.\n data_source_bigquery_table_path: The BigQuery table\
\ when data source is BQ.\n vertex_dataset: The Vertex dataset when data\
\ source is Vertex dataset.\n\n Returns:\n A named tuple of CSV or BQ\
\ URI.\n \"\"\"\n # pylint: disable=g-import-not-at-top,import-outside-toplevel,redefined-outer-name\n\
\ import collections\n from google.cloud import aiplatform\n from google.cloud\
\ import aiplatform_v1beta1 as aip\n # pylint: enable=g-import-not-at-top,import-outside-toplevel,redefined-outer-name\n\
\n if vertex_dataset is not None:\n # of format\n # projects/294348452381/locations/us-central1/datasets/7104764862735056896\n\
\ dataset_name = vertex_dataset.metadata['resourceName']\n\n aiplatform.init(project=project,\
\ location=location)\n client = aip.DatasetServiceClient(\n client_options={'api_endpoint':\
\ f'{location}-aiplatform.googleapis.com'}\n )\n dataset = client.get_dataset(name=dataset_name)\n\
\ input_config = dataset.metadata['inputConfig']\n if 'gcsSource'\
\ in input_config:\n data_source_csv_filenames = ','.join(input_config['gcsSource']['uri'])\n\
\ elif 'bigquerySource' in input_config:\n data_source_bigquery_table_path\
\ = input_config['bigquerySource']['uri']\n elif data_source_csv_filenames:\n\
\ pass\n elif data_source_bigquery_table_path:\n pass\n else:\n\
\ raise ValueError(\n 'One of vertex_dataset, data_source_csv_filenames,'\n\
\ ' data_source_bigquery_table_path must be specified'\n )\n\n\
\ return collections.namedtuple(\n 'Outputs',\n [\n \
\ 'data_source_csv_filenames',\n 'data_source_bigquery_table_path',\n\
\ ],\n )(\n data_source_csv_filenames,\n data_source_bigquery_table_path,\n\
\ )\n\n"
image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20240808_0625
exec-string-not-empty:
container:
args:
- --executor_input
- '{{$}}'
- --function_to_execute
- _string_not_empty
command:
- sh
- -ec
- 'program_path=$(mktemp -d)
printf "%s" "$0" > "$program_path/ephemeral_component.py"
python3 -m kfp.components.executor_main --component_module_path "$program_path/ephemeral_component.py" "$@"
'
- "\nimport kfp\nfrom kfp import dsl\nfrom kfp.dsl import *\nfrom typing import\
\ *\n\ndef _string_not_empty(value: str) -> str:\n \"\"\"Check if the input\
\ string value is not empty.\n\n Args:\n value: String value to be checked.\n\
\n Returns:\n Boolean value. -> 'true' if empty, 'false' if not empty.\
\ We need to use str\n instead of bool due to a limitation in KFP compiler.\n\
\ \"\"\"\n return 'true' if value else 'false'\n\n"
image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20240808_0625
exec-tabular-stats-and-example-gen:
container:
args:
- --type
- CustomJob
- --project
- '{{$.inputs.parameters[''project'']}}'
- --location
- '{{$.inputs.parameters[''location'']}}'
- --gcp_resources
- '{{$.outputs.parameters[''gcp_resources''].output_file}}'
- --payload
- '{"Concat": ["{\"display_name\": \"tabular-stats-and-example-gen-{{$.pipeline_job_uuid}}-{{$.pipeline_task_uuid}}\",
\"encryption_spec\": {\"kms_key_name\":\"", "{{$.inputs.parameters[''encryption_spec_key_name'']}}",
"\"}, \"job_spec\": {\"worker_pool_specs\": [{\"replica_count\": 1, \"machine_spec\":
{\"machine_type\": \"n1-standard-8\"}, \"container_spec\": {\"image_uri\":\"",
"us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/training:20240808_0625", "\",
\"args\": [\"stats_generator\",", "\"--train_spec={\\\"prediction_type\\\":
\\\"", "{{$.inputs.parameters[''prediction_type'']}}", "\\\", \\\"target_column\\\":
\\\"", "{{$.inputs.parameters[''target_column_name'']}}", "\\\", \\\"optimization_objective\\\":
\\\"", "{{$.inputs.parameters[''optimization_objective'']}}", "\\\", \\\"weight_column_name\\\":
\\\"", "{{$.inputs.parameters[''weight_column_name'']}}", "\\\", \\\"transformations\\\":
", "{{$.inputs.parameters[''transformations'']}}", ", \\\"quantiles\\\":
", "{{$.inputs.parameters[''quantiles'']}}", ", \\\"enable_probabilistic_inference\\\":
", "{{$.inputs.parameters[''enable_probabilistic_inference'']}}", "}\",
\"--transformations_override_path=", "{{$.inputs.parameters[''transformations_path'']}}",
"\", \"--data_source_csv_filenames=", "{{$.inputs.parameters[''data_source_csv_filenames'']}}",
"\", \"--data_source_bigquery_table_path=", "{{$.inputs.parameters[''data_source_bigquery_table_path'']}}",
"\", \"--predefined_split_key=", "{{$.inputs.parameters[''predefined_split_key'']}}",
"\", \"--timestamp_split_key=", "{{$.inputs.parameters[''timestamp_split_key'']}}",
"\", \"--stratified_split_key=", "{{$.inputs.parameters[''stratified_split_key'']}}",
"\", \"--training_fraction=", "{{$.inputs.parameters[''training_fraction'']}}",
"\", \"--validation_fraction=", "{{$.inputs.parameters[''validation_fraction'']}}",
"\", \"--test_fraction=", "{{$.inputs.parameters[''test_fraction'']}}",
"\", \"--target_column=", "{{$.inputs.parameters[''target_column_name'']}}",
"\", \"--request_type=", "{{$.inputs.parameters[''request_type'']}}", "\",
\"--optimization_objective_recall_value=", "{{$.inputs.parameters[''optimization_objective_recall_value'']}}",
"\", \"--optimization_objective_precision_value=", "{{$.inputs.parameters[''optimization_objective_precision_value'']}}",
"\", \"--example_gen_gcs_output_prefix=", "{{$.inputs.parameters[''root_dir'']}}",
"/{{$.pipeline_job_uuid}}/{{$.pipeline_task_uuid}}/example_gen_output\",
\"--dataset_stats_dir=", "{{$.inputs.parameters[''root_dir'']}}", "/{{$.pipeline_job_uuid}}/{{$.pipeline_task_uuid}}/stats/\",
\"--stats_result_path=", "{{$.outputs.artifacts[''dataset_stats''].uri}}",
"\", \"--dataset_schema_path=", "{{$.outputs.artifacts[''dataset_schema''].uri}}",
"\", \"--job_name=tabular-stats-and-example-gen-{{$.pipeline_job_uuid}}-{{$.pipeline_task_uuid}}",
"\", \"--dataflow_project=", "{{$.inputs.parameters[''project'']}}", "\",
\"--error_file_path=", "{{$.inputs.parameters[''root_dir'']}}", "/{{$.pipeline_job_uuid}}/{{$.pipeline_task_uuid}}/error.pb\",
\"--dataflow_staging_dir=", "{{$.inputs.parameters[''root_dir'']}}", "/{{$.pipeline_job_uuid}}/{{$.pipeline_task_uuid}}/dataflow_staging\",
\"--dataflow_tmp_dir=", "{{$.inputs.parameters[''root_dir'']}}", "/{{$.pipeline_job_uuid}}/{{$.pipeline_task_uuid}}/dataflow_tmp\",
\"--dataflow_max_num_workers=", "{{$.inputs.parameters[''dataflow_max_num_workers'']}}",
"\", \"--dataflow_worker_container_image=", "us-docker.pkg.dev/vertex-ai/automl-tabular/dataflow-worker:20240808_0625",
"\", \"--dataflow_machine_type=", "{{$.inputs.parameters[''dataflow_machine_type'']}}",
"\", \"--dataflow_disk_size_gb=", "{{$.inputs.parameters[''dataflow_disk_size_gb'']}}",
"\", \"--dataflow_kms_key=", "{{$.inputs.parameters[''encryption_spec_key_name'']}}",
"\", \"--dataflow_subnetwork_fully_qualified=", "{{$.inputs.parameters[''dataflow_subnetwork'']}}",
"\", \"--dataflow_use_public_ips=", "{{$.inputs.parameters[''dataflow_use_public_ips'']}}",
"\", \"--dataflow_service_account=", "{{$.inputs.parameters[''dataflow_service_account'']}}",
"\", \"--is_distill=", "{{$.inputs.parameters[''run_distillation'']}}",
"\", \"--additional_experiments=", "{{$.inputs.parameters[''additional_experiments'']}}",
"\", \"--metadata_path=", "{{$.outputs.artifacts[''metadata''].uri}}", "\",
\"--train_split=", "{{$.outputs.artifacts[''train_split''].uri}}", "\",
\"--eval_split=", "{{$.outputs.artifacts[''eval_split''].uri}}", "\", \"--test_split=",
"{{$.outputs.artifacts[''test_split''].uri}}", "\", \"--test_split_for_batch_prediction_component=",
"{{$.outputs.parameters[''test_split_json''].output_file}}", "\", \"--downsampled_test_split_for_batch_prediction_component=",
"{{$.outputs.parameters[''downsampled_test_split_json''].output_file}}",
"\", \"--instance_baseline_path=", "{{$.outputs.artifacts[''instance_baseline''].uri}}",
"\", \"--lro_job_info=", "{{$.inputs.parameters[''root_dir'']}}", "/{{$.pipeline_job_uuid}}/lro\",
\"--gcp_resources_path=", "{{$.outputs.parameters[''gcp_resources''].output_file}}",
"\", \"--parse_json=true\", \"--generate_additional_downsample_test_split=true\",
\"--executor_input={{$.json_escape[1]}}\"]}}]}}"]}'
command:
- python3
- -u
- -m
- google_cloud_pipeline_components.container.v1.custom_job.launcher
image: gcr.io/ml-pipeline/google-cloud-pipeline-components:1.0.44
exec-write-bp-result-path:
container:
args:
- --executor_input
- '{{$}}'
- --function_to_execute
- _write_bp_result_path
command:
- sh
- -ec
- 'program_path=$(mktemp -d)
printf "%s" "$0" > "$program_path/ephemeral_component.py"
python3 -m kfp.components.executor_main --component_module_path "$program_path/ephemeral_component.py" "$@"
'
- "\nimport kfp\nfrom kfp import dsl\nfrom kfp.dsl import *\nfrom typing import\
\ *\n\ndef _write_bp_result_path(\n bp_job: dsl.Input[dsl.Artifact],\n\
\ result: dsl.OutputPath('Dataset'),\n):\n \"\"\"Construct Dataset based\
\ on the batch prediction job.\n\n Args:\n bp_job: The batch prediction\
\ job artifact.\n result: Tbe path to the file that contains Dataset\
\ data.\n \"\"\"\n # pylint: disable=g-import-not-at-top,import-outside-toplevel,redefined-outer-name,reimported\n\
\ import json\n # pylint: enable=g-import-not-at-top,import-outside-toplevel,redefined-outer-name,reimported\n\
\ directory = bp_job.metadata['gcsOutputDirectory']\n data_source = {\n\
\ 'tf_record_data_source': {\n 'file_patterns': [\n \
\ f'{directory}/prediction.results-*',\n ],\n 'coder':\
\ 'PROTO_VALUE',\n },\n }\n with open(result, 'w') as f:\n f.write(json.dumps(data_source))\n\
\n"
image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20240808_0625
exec-write-bp-result-path-2:
container:
args:
- --executor_input
- '{{$}}'
- --function_to_execute
- _write_bp_result_path
command:
- sh
- -ec
- 'program_path=$(mktemp -d)
printf "%s" "$0" > "$program_path/ephemeral_component.py"
python3 -m kfp.components.executor_main --component_module_path "$program_path/ephemeral_component.py" "$@"
'
- "\nimport kfp\nfrom kfp import dsl\nfrom kfp.dsl import *\nfrom typing import\
\ *\n\ndef _write_bp_result_path(\n bp_job: dsl.Input[dsl.Artifact],\n\
\ result: dsl.OutputPath('Dataset'),\n):\n \"\"\"Construct Dataset based\
\ on the batch prediction job.\n\n Args:\n bp_job: The batch prediction\
\ job artifact.\n result: Tbe path to the file that contains Dataset\
\ data.\n \"\"\"\n # pylint: disable=g-import-not-at-top,import-outside-toplevel,redefined-outer-name,reimported\n\
\ import json\n # pylint: enable=g-import-not-at-top,import-outside-toplevel,redefined-outer-name,reimported\n\
\ directory = bp_job.metadata['gcsOutputDirectory']\n data_source = {\n\
\ 'tf_record_data_source': {\n 'file_patterns': [\n \
\ f'{directory}/prediction.results-*',\n ],\n 'coder':\
\ 'PROTO_VALUE',\n },\n }\n with open(result, 'w') as f:\n f.write(json.dumps(data_source))\n\
\n"
image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20240808_0625
pipelineInfo:
description: 'Complete AutoML Tables pipeline.
Includes feature engineering, architecture search, and hyper-parameter tuning.'
name: automl-tabular
root:
dag:
outputs:
artifacts:
feature-attribution-2-feature_attributions:
artifactSelectors:
- outputArtifactKey: feature-attribution-2-feature_attributions
producerSubtask: exit-handler-1
feature-attribution-3-feature_attributions:
artifactSelectors:
- outputArtifactKey: feature-attribution-3-feature_attributions
producerSubtask: exit-handler-1
feature-attribution-feature_attributions:
artifactSelectors:
- outputArtifactKey: feature-attribution-feature_attributions
producerSubtask: exit-handler-1
model-evaluation-2-evaluation_metrics:
artifactSelectors:
- outputArtifactKey: model-evaluation-2-evaluation_metrics
producerSubtask: exit-handler-1
model-evaluation-3-evaluation_metrics:
artifactSelectors:
- outputArtifactKey: model-evaluation-3-evaluation_metrics
producerSubtask: exit-handler-1
model-evaluation-evaluation_metrics:
artifactSelectors:
- outputArtifactKey: model-evaluation-evaluation_metrics
producerSubtask: exit-handler-1
tasks:
automl-tabular-finalizer:
cachingOptions:
enableCache: true
componentRef:
name: comp-automl-tabular-finalizer
dependentTasks:
- exit-handler-1
inputs:
parameters:
location:
componentInputParameter: location
project:
componentInputParameter: project
root_dir:
componentInputParameter: root_dir
taskInfo:
name: automl-tabular-finalizer
triggerPolicy:
strategy: ALL_UPSTREAM_TASKS_COMPLETED
exit-handler-1:
componentRef:
name: comp-exit-handler-1
dependentTasks:
- get-model-display-name
- set-optional-inputs
inputs:
artifacts:
pipelinechannel--parent_model:
componentInputArtifact: parent_model
parameters:
pipelinechannel--additional_experiments:
componentInputParameter: additional_experiments
pipelinechannel--cv_trainer_worker_pool_specs_override:
componentInputParameter: cv_trainer_worker_pool_specs_override
pipelinechannel--dataflow_service_account:
componentInputParameter: dataflow_service_account
pipelinechannel--dataflow_subnetwork:
componentInputParameter: dataflow_subnetwork
pipelinechannel--dataflow_use_public_ips:
componentInputParameter: dataflow_use_public_ips
pipelinechannel--disable_early_stopping:
componentInputParameter: disable_early_stopping
pipelinechannel--distill_batch_predict_machine_type:
componentInputParameter: distill_batch_predict_machine_type
pipelinechannel--distill_batch_predict_max_replica_count:
componentInputParameter: distill_batch_predict_max_replica_count
pipelinechannel--distill_batch_predict_starting_replica_count:
componentInputParameter: distill_batch_predict_starting_replica_count
pipelinechannel--enable_probabilistic_inference:
componentInputParameter: enable_probabilistic_inference
pipelinechannel--encryption_spec_key_name:
componentInputParameter: encryption_spec_key_name
pipelinechannel--evaluation_batch_explain_machine_type:
componentInputParameter: evaluation_batch_explain_machine_type
pipelinechannel--evaluation_batch_explain_max_replica_count:
componentInputParameter: evaluation_batch_explain_max_replica_count
pipelinechannel--evaluation_batch_explain_starting_replica_count:
componentInputParameter: evaluation_batch_explain_starting_replica_count
pipelinechannel--evaluation_batch_predict_machine_type:
componentInputParameter: evaluation_batch_predict_machine_type
pipelinechannel--evaluation_batch_predict_max_replica_count:
componentInputParameter: evaluation_batch_predict_max_replica_count
pipelinechannel--evaluation_batch_predict_starting_replica_count:
componentInputParameter: evaluation_batch_predict_starting_replica_count
pipelinechannel--evaluation_dataflow_disk_size_gb:
componentInputParameter: evaluation_dataflow_disk_size_gb
pipelinechannel--evaluation_dataflow_machine_type:
componentInputParameter: evaluation_dataflow_machine_type
pipelinechannel--evaluation_dataflow_max_num_workers:
componentInputParameter: evaluation_dataflow_max_num_workers
pipelinechannel--evaluation_dataflow_starting_num_workers:
componentInputParameter: evaluation_dataflow_starting_num_workers
pipelinechannel--export_additional_model_without_custom_ops:
componentInputParameter: export_additional_model_without_custom_ops
pipelinechannel--fast_testing:
componentInputParameter: fast_testing
pipelinechannel--get-model-display-name-model_display_name:
taskOutputParameter:
outputParameterKey: model_display_name
producerTask: get-model-display-name
pipelinechannel--location:
componentInputParameter: location
pipelinechannel--model_description:
componentInputParameter: model_description
pipelinechannel--optimization_objective:
componentInputParameter: optimization_objective
pipelinechannel--optimization_objective_precision_value:
componentInputParameter: optimization_objective_precision_value
pipelinechannel--optimization_objective_recall_value:
componentInputParameter: optimization_objective_recall_value
pipelinechannel--predefined_split_key:
componentInputParameter: predefined_split_key
pipelinechannel--prediction_type:
componentInputParameter: prediction_type
pipelinechannel--project:
componentInputParameter: project
pipelinechannel--quantiles:
componentInputParameter: quantiles
pipelinechannel--root_dir:
componentInputParameter: root_dir
pipelinechannel--run_distillation:
componentInputParameter: run_distillation
pipelinechannel--run_evaluation:
componentInputParameter: run_evaluation
pipelinechannel--set-optional-inputs-data_source_bigquery_table_path:
taskOutputParameter:
outputParameterKey: data_source_bigquery_table_path
producerTask: set-optional-inputs
pipelinechannel--set-optional-inputs-data_source_csv_filenames:
taskOutputParameter:
outputParameterKey: data_source_csv_filenames
producerTask: set-optional-inputs
pipelinechannel--stage_1_num_parallel_trials:
componentInputParameter: stage_1_num_parallel_trials
pipelinechannel--stage_1_tuner_worker_pool_specs_override:
componentInputParameter: stage_1_tuner_worker_pool_specs_override
pipelinechannel--stage_1_tuning_result_artifact_uri:
componentInputParameter: stage_1_tuning_result_artifact_uri
pipelinechannel--stage_2_num_parallel_trials:
componentInputParameter: stage_2_num_parallel_trials
pipelinechannel--stage_2_num_selected_trials:
componentInputParameter: stage_2_num_selected_trials
pipelinechannel--stats_and_example_gen_dataflow_disk_size_gb:
componentInputParameter: stats_and_example_gen_dataflow_disk_size_gb
pipelinechannel--stats_and_example_gen_dataflow_machine_type:
componentInputParameter: stats_and_example_gen_dataflow_machine_type
pipelinechannel--stats_and_example_gen_dataflow_max_num_workers:
componentInputParameter: stats_and_example_gen_dataflow_max_num_workers
pipelinechannel--stratified_split_key:
componentInputParameter: stratified_split_key
pipelinechannel--study_spec_parameters_override:
componentInputParameter: study_spec_parameters_override
pipelinechannel--target_column:
componentInputParameter: target_column
pipelinechannel--test_fraction:
componentInputParameter: test_fraction
pipelinechannel--timestamp_split_key:
componentInputParameter: timestamp_split_key
pipelinechannel--train_budget_milli_node_hours:
componentInputParameter: train_budget_milli_node_hours
pipelinechannel--training_fraction:
componentInputParameter: training_fraction
pipelinechannel--transform_dataflow_disk_size_gb:
componentInputParameter: transform_dataflow_disk_size_gb
pipelinechannel--transform_dataflow_machine_type:
componentInputParameter: transform_dataflow_machine_type
pipelinechannel--transform_dataflow_max_num_workers:
componentInputParameter: transform_dataflow_max_num_workers
pipelinechannel--transformations:
componentInputParameter: transformations
pipelinechannel--validation_fraction:
componentInputParameter: validation_fraction
pipelinechannel--weight_column:
componentInputParameter: weight_column
taskInfo:
name: exit-handler-1
get-model-display-name:
cachingOptions:
enableCache: true
componentRef:
name: comp-get-model-display-name
inputs:
parameters:
model_display_name:
componentInputParameter: model_display_name
taskInfo:
name: get-model-display-name
set-optional-inputs:
cachingOptions:
enableCache: true
componentRef:
name: comp-set-optional-inputs
inputs:
artifacts:
vertex_dataset:
componentInputArtifact: vertex_dataset
parameters:
data_source_bigquery_table_path:
componentInputParameter: data_source_bigquery_table_path
data_source_csv_filenames:
componentInputParameter: data_source_csv_filenames
location:
componentInputParameter: location
project:
componentInputParameter: project
taskInfo:
name: set-optional-inputs
inputDefinitions:
artifacts:
parent_model:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
description: Vertex Model to upload this model as a version of.
isOptional: true
vertex_dataset:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
description: The Vertex dataset artifact.
parameters:
additional_experiments:
description: Use this field to config private preview features.
isOptional: true
parameterType: STRUCT
cv_trainer_worker_pool_specs_override:
description: 'The dictionary for overriding stage
cv trainer worker pool spec.'
isOptional: true
parameterType: LIST
data_source_bigquery_table_path:
defaultValue: ''
description: 'The BigQuery table path of format
bq://bq_project.bq_dataset.bq_table'
isOptional: true
parameterType: STRING
data_source_csv_filenames:
defaultValue: ''
description: 'A string that represents a list of comma
separated CSV filenames.'
isOptional: true
parameterType: STRING
dataflow_service_account:
defaultValue: ''
description: Custom service account to run dataflow jobs.
isOptional: true
parameterType: STRING
dataflow_subnetwork:
defaultValue: ''
description: 'Dataflow''s fully qualified subnetwork name, when empty
the default subnetwork will be used. Example:
https://cloud.google.com/dataflow/docs/guides/specifying-networks#example_network_and_subnetwork_specifications'
isOptional: true
parameterType: STRING
dataflow_use_public_ips:
defaultValue: true
description: 'Specifies whether Dataflow workers use public IP
addresses.'
isOptional: true
parameterType: BOOLEAN
disable_early_stopping:
defaultValue: false
description: If disable easly stopping.
isOptional: true
parameterType: BOOLEAN
distill_batch_predict_machine_type:
defaultValue: n1-standard-16
description: 'The prediction server machine type for
batch predict component in the model distillation.'
isOptional: true
parameterType: STRING
distill_batch_predict_max_replica_count:
defaultValue: 25.0
description: 'The max number of prediction server
for batch predict component in the model distillation.'
isOptional: true
parameterType: NUMBER_INTEGER
distill_batch_predict_starting_replica_count:
defaultValue: 25.0
description: 'The initial number of
prediction server for batch predict component in the model distillation.'
isOptional: true
parameterType: NUMBER_INTEGER
enable_probabilistic_inference:
defaultValue: false
description: 'If probabilistic inference is enabled, the
model will fit a distribution that captures the uncertainty of a
prediction. At inference time, the predictive distribution is used to make
a point prediction that minimizes the optimization objective. For example,
the mean of a predictive distribution is the point prediction that
minimizes RMSE loss. If quantiles are specified, then the quantiles of the
distribution are also returned.'
isOptional: true
parameterType: BOOLEAN
encryption_spec_key_name:
defaultValue: ''
description: The KMS key name.
isOptional: true
parameterType: STRING
evaluation_batch_explain_machine_type:
defaultValue: n1-highmem-8
description: 'The prediction server machine type
for batch explain components during evaluation.'
isOptional: true
parameterType: STRING
evaluation_batch_explain_max_replica_count:
defaultValue: 10.0
description: 'The max number of prediction
server for batch explain components during evaluation.'
isOptional: true
parameterType: NUMBER_INTEGER
evaluation_batch_explain_starting_replica_count:
defaultValue: 10.0
description: 'The initial number of
prediction server for batch explain components during evaluation.'
isOptional: true
parameterType: NUMBER_INTEGER
evaluation_batch_predict_machine_type:
defaultValue: n1-highmem-8
description: 'The prediction server machine type
for batch predict components during evaluation.'
isOptional: true
parameterType: STRING
evaluation_batch_predict_max_replica_count:
defaultValue: 20.0
description: 'The max number of prediction
server for batch predict components during evaluation.'
isOptional: true
parameterType: NUMBER_INTEGER
evaluation_batch_predict_starting_replica_count:
defaultValue: 20.0
description: 'The initial number of
prediction server for batch predict components during evaluation.'
isOptional: true
parameterType: NUMBER_INTEGER
evaluation_dataflow_disk_size_gb:
defaultValue: 50.0
description: 'Dataflow worker''s disk size in GB for
evaluation components.'
isOptional: true
parameterType: NUMBER_INTEGER
evaluation_dataflow_machine_type:
defaultValue: n1-standard-4
description: 'The dataflow machine type for evaluation
components.'
isOptional: true
parameterType: STRING
evaluation_dataflow_max_num_workers:
defaultValue: 100.0
description: 'The max number of Dataflow workers for
evaluation components.'
isOptional: true
parameterType: NUMBER_INTEGER
evaluation_dataflow_starting_num_workers:
defaultValue: 10.0
description: 'The initial number of Dataflow
workers for evaluation components.'
isOptional: true
parameterType: NUMBER_INTEGER
export_additional_model_without_custom_ops:
defaultValue: false
description: 'Whether to export additional
model without custom TensorFlow operators.'
isOptional: true
parameterType: BOOLEAN
fast_testing:
defaultValue: false
description: Internal flag used for presubmit tests.
isOptional: true
parameterType: BOOLEAN
location:
description: The GCP region that runs the pipeline components.
parameterType: STRING
model_description:
defaultValue: ''
description: The description name of the uploaded Vertex model,
isOptional: true
parameterType: STRING
model_display_name:
defaultValue: ''
description: The display name of the uploaded Vertex model,
isOptional: true
parameterType: STRING
optimization_objective:
description: 'For binary classification, "maximize-au-roc",
"minimize-log-loss", "maximize-au-prc", "maximize-precision-at-recall",
or
"maximize-recall-at-precision". For multi class classification,
"minimize-log-loss". For regression, "minimize-rmse", "minimize-mae", or
"minimize-rmsle".'
parameterType: STRING
optimization_objective_precision_value:
defaultValue: -1.0
description: 'Required when optimization_objective
is ''maximize-recall-at-precision''. Must be between 0 and 1, inclusive.'
isOptional: true
parameterType: NUMBER_DOUBLE
optimization_objective_recall_value:
defaultValue: -1.0
description: 'Required when optimization_objective is
''maximize-precision-at-recall''. Must be between 0 and 1, inclusive.'
isOptional: true
parameterType: NUMBER_DOUBLE
predefined_split_key:
defaultValue: ''
description: The predefined_split column name.
isOptional: true
parameterType: STRING
prediction_type:
description: 'The type of prediction the model is to produce.
"classification" or "regression".'
parameterType: STRING
project:
description: The GCP project that runs the pipeline components.
parameterType: STRING
quantiles:
description: 'Quantiles to use for probabilistic inference. Up to 5 quantiles
are allowed of values between 0 and 1, exclusive. Represents the quantiles
to use for that objective. Quantiles must be unique.'
isOptional: true
parameterType: LIST
root_dir:
description: The root GCS directory for the pipeline components.
parameterType: STRING
run_distillation:
defaultValue: false
description: 'Whether the distillation should be applied to the
training.'
isOptional: true
parameterType: BOOLEAN
run_evaluation:
defaultValue: false
description: Whether to run evaluation steps during training.
isOptional: true
parameterType: BOOLEAN
stage_1_num_parallel_trials:
defaultValue: 35.0
description: Number of parallel trails for stage 1.
isOptional: true
parameterType: NUMBER_INTEGER
stage_1_tuner_worker_pool_specs_override:
description: 'The dictionary for overriding
stage 1 tuner worker pool spec.'
isOptional: true
parameterType: LIST
stage_1_tuning_result_artifact_uri:
defaultValue: ''
description: 'The stage 1 tuning result artifact GCS
URI.'
isOptional: true
parameterType: STRING
stage_2_num_parallel_trials:
defaultValue: 35.0
description: Number of parallel trails for stage 2.
isOptional: true
parameterType: NUMBER_INTEGER
stage_2_num_selected_trials:
defaultValue: 5.0
description: Number of selected trails for stage 2.
isOptional: true
parameterType: NUMBER_INTEGER
stats_and_example_gen_dataflow_disk_size_gb:
defaultValue: 40.0
description: 'Dataflow worker''s disk size in
GB for stats_and_example_gen component.'
isOptional: true
parameterType: NUMBER_INTEGER
stats_and_example_gen_dataflow_machine_type:
defaultValue: n1-standard-16
description: 'The dataflow machine type for
stats_and_example_gen component.'
isOptional: true
parameterType: STRING
stats_and_example_gen_dataflow_max_num_workers:
defaultValue: 25.0
description: 'The max number of Dataflow
workers for stats_and_example_gen component.'
isOptional: true
parameterType: NUMBER_INTEGER
stratified_split_key:
defaultValue: ''
description: The stratified_split column name.
isOptional: true
parameterType: STRING
study_spec_parameters_override:
description: The list for overriding study spec.
isOptional: true
parameterType: LIST
target_column:
description: The target column name.
parameterType: STRING
test_fraction:
defaultValue: -1.0
description: float = The test fraction.
isOptional: true
parameterType: NUMBER_DOUBLE
timestamp_split_key:
defaultValue: ''
description: The timestamp_split column name.
isOptional: true
parameterType: STRING
train_budget_milli_node_hours:
description: 'The train budget of creating this model,
expressed in milli node hours i.e. 1,000 value in this field means 1 node
hour.'
parameterType: NUMBER_DOUBLE
training_fraction:
defaultValue: -1.0
description: The training fraction.
isOptional: true
parameterType: NUMBER_DOUBLE
transform_dataflow_disk_size_gb:
defaultValue: 40.0
description: 'Dataflow worker''s disk size in GB for
transform component.'
isOptional: true
parameterType: NUMBER_INTEGER
transform_dataflow_machine_type:
defaultValue: n1-standard-16
description: 'The dataflow machine type for transform
component.'
isOptional: true
parameterType: STRING
transform_dataflow_max_num_workers:
defaultValue: 25.0
description: 'The max number of Dataflow workers for
transform component.'
isOptional: true
parameterType: NUMBER_INTEGER
transformations:
description: 'The path to a GCS file containing the transformations to
apply.'
parameterType: STRING
validation_fraction:
defaultValue: -1.0
description: The validation fraction.
isOptional: true
parameterType: NUMBER_DOUBLE
weight_column:
defaultValue: ''
description: The weight column name.
isOptional: true
parameterType: STRING
outputDefinitions:
artifacts:
feature-attribution-2-feature_attributions:
artifactType:
schemaTitle: system.Metrics
schemaVersion: 0.0.1
feature-attribution-3-feature_attributions:
artifactType:
schemaTitle: system.Metrics
schemaVersion: 0.0.1
feature-attribution-feature_attributions:
artifactType:
schemaTitle: system.Metrics
schemaVersion: 0.0.1
model-evaluation-2-evaluation_metrics:
artifactType:
schemaTitle: system.Metrics
schemaVersion: 0.0.1
model-evaluation-3-evaluation_metrics:
artifactType:
schemaTitle: system.Metrics
schemaVersion: 0.0.1
model-evaluation-evaluation_metrics:
artifactType:
schemaTitle: system.Metrics
schemaVersion: 0.0.1
schemaVersion: 2.1.0
sdkVersion: kfp-2.0.0-rc.2
| 816 |
0 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/automl | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/automl/tabular/utils.py | """Util functions for AutoML Tabular pipeline."""
import json
import math
import os
import pathlib
from typing import Any, Dict, List, Optional, Tuple
import warnings
_DEFAULT_NUM_PARALLEL_TRAILS = 35
_DEFAULT_STAGE_2_NUM_SELECTED_TRAILS = 5
_NUM_FOLDS = 5
_DISTILL_TOTAL_TRIALS = 100
_EVALUATION_BATCH_PREDICT_MACHINE_TYPE = 'n1-highmem-8'
_EVALUATION_BATCH_PREDICT_STARTING_REPLICA_COUNT = 20
_EVALUATION_BATCH_PREDICT_MAX_REPLICA_COUNT = 20
_EVALUATION_BATCH_EXPLAIN_MACHINE_TYPE = 'n1-highmem-8'
_EVALUATION_BATCH_EXPLAIN_STARTING_REPLICA_COUNT = 10
_EVALUATION_BATCH_EXPLAIN_MAX_REPLICA_COUNT = 10
_EVALUATION_DATAFLOW_MACHINE_TYPE = 'n1-standard-4'
_EVALUATION_DATAFLOW_STARTING_NUM_WORKERS = 10
_EVALUATION_DATAFLOW_MAX_NUM_WORKERS = 100
_EVALUATION_DATAFLOW_DISK_SIZE_GB = 50
# Needed because we reference the AutoML Tabular V2 pipeline.
_GCPC_STAGING_PATH = pathlib.Path(
__file__
).parent.parent.parent.parent.resolve()
_GCPC_PREVIEW_TABULAR_PATH = (
_GCPC_STAGING_PATH / 'preview' / 'automl' / 'tabular'
)
# TODO(b/277393122): Once we finish L2L+FTE integration, add use_fte flag
# to signify FTE usage instead of the presence of num_selected_features.
def _get_default_pipeline_params(
project: str,
location: str,
root_dir: str,
target_column: str,
prediction_type: str,
optimization_objective: str,
transformations: str,
train_budget_milli_node_hours: float,
stage_1_num_parallel_trials: Optional[int] = None,
stage_2_num_parallel_trials: Optional[int] = None,
stage_2_num_selected_trials: Optional[int] = None,
data_source_csv_filenames: Optional[str] = None,
data_source_bigquery_table_path: Optional[str] = None,
predefined_split_key: Optional[str] = None,
timestamp_split_key: Optional[str] = None,
stratified_split_key: Optional[str] = None,
training_fraction: Optional[float] = None,
validation_fraction: Optional[float] = None,
test_fraction: Optional[float] = None,
weight_column: Optional[float] = None,
study_spec_parameters_override: Optional[List[Dict[str, Any]]] = None,
optimization_objective_recall_value: Optional[float] = None,
optimization_objective_precision_value: Optional[float] = None,
stage_1_tuner_worker_pool_specs_override: Optional[Dict[str, Any]] = None,
cv_trainer_worker_pool_specs_override: Optional[Dict[str, Any]] = None,
export_additional_model_without_custom_ops: bool = False,
stats_and_example_gen_dataflow_machine_type: Optional[str] = None,
stats_and_example_gen_dataflow_max_num_workers: Optional[int] = None,
stats_and_example_gen_dataflow_disk_size_gb: Optional[int] = None,
transform_dataflow_machine_type: Optional[str] = None,
transform_dataflow_max_num_workers: Optional[int] = None,
transform_dataflow_disk_size_gb: Optional[int] = None,
dataflow_subnetwork: Optional[str] = None,
dataflow_use_public_ips: bool = True,
encryption_spec_key_name: Optional[str] = None,
additional_experiments: Optional[Dict[str, Any]] = None,
dataflow_service_account: Optional[str] = None,
max_selected_features: Optional[int] = None,
apply_feature_selection_tuning: bool = False,
run_evaluation: bool = True,
evaluation_batch_predict_machine_type: Optional[str] = None,
evaluation_batch_predict_starting_replica_count: Optional[int] = None,
evaluation_batch_predict_max_replica_count: Optional[int] = None,
evaluation_batch_explain_machine_type: Optional[str] = None,
evaluation_batch_explain_starting_replica_count: Optional[int] = None,
evaluation_batch_explain_max_replica_count: Optional[int] = None,
evaluation_dataflow_machine_type: Optional[str] = None,
evaluation_dataflow_starting_num_workers: Optional[int] = None,
evaluation_dataflow_max_num_workers: Optional[int] = None,
evaluation_dataflow_disk_size_gb: Optional[int] = None,
run_distillation: bool = False,
distill_batch_predict_machine_type: Optional[str] = None,
distill_batch_predict_starting_replica_count: Optional[int] = None,
distill_batch_predict_max_replica_count: Optional[int] = None,
stage_1_tuning_result_artifact_uri: Optional[str] = None,
quantiles: Optional[List[float]] = None,
enable_probabilistic_inference: bool = False,
num_selected_features: Optional[int] = None,
model_display_name: str = '',
model_description: str = '',
) -> Dict[str, Any]:
"""Get the AutoML Tabular v1 default training pipeline.
Args:
project: The GCP project that runs the pipeline components.
location: The GCP region that runs the pipeline components.
root_dir: The root GCS directory for the pipeline components.
target_column: The target column name.
prediction_type: The type of prediction the model is to produce.
"classification" or "regression".
optimization_objective: For binary classification, "maximize-au-roc",
"minimize-log-loss", "maximize-au-prc", "maximize-precision-at-recall", or
"maximize-recall-at-precision". For multi class classification,
"minimize-log-loss". For regression, "minimize-rmse", "minimize-mae", or
"minimize-rmsle".
transformations: The path to a GCS file containing the transformations to
apply.
train_budget_milli_node_hours: The train budget of creating this model,
expressed in milli node hours i.e. 1,000 value in this field means 1 node
hour.
stage_1_num_parallel_trials: Number of parallel trails for stage 1.
stage_2_num_parallel_trials: Number of parallel trails for stage 2.
stage_2_num_selected_trials: Number of selected trials for stage 2.
data_source_csv_filenames: The CSV data source.
data_source_bigquery_table_path: The BigQuery data source.
predefined_split_key: The predefined_split column name.
timestamp_split_key: The timestamp_split column name.
stratified_split_key: The stratified_split column name.
training_fraction: The training fraction.
validation_fraction: The validation fraction.
test_fraction: float = The test fraction.
weight_column: The weight column name.
study_spec_parameters_override: The list for overriding study spec. The list
should be of format
https://github.com/googleapis/googleapis/blob/4e836c7c257e3e20b1de14d470993a2b1f4736a8/google/cloud/aiplatform/v1beta1/study.proto#L181.
optimization_objective_recall_value: Required when optimization_objective is
"maximize-precision-at-recall". Must be between 0 and 1, inclusive.
optimization_objective_precision_value: Required when optimization_objective
is "maximize-recall-at-precision". Must be between 0 and 1, inclusive.
stage_1_tuner_worker_pool_specs_override: The dictionary for overriding.
stage 1 tuner worker pool spec. The dictionary should be of format
https://github.com/googleapis/googleapis/blob/4e836c7c257e3e20b1de14d470993a2b1f4736a8/google/cloud/aiplatform/v1beta1/custom_job.proto#L172.
cv_trainer_worker_pool_specs_override: The dictionary for overriding stage
cv trainer worker pool spec. The dictionary should be of format
https://github.com/googleapis/googleapis/blob/4e836c7c257e3e20b1de14d470993a2b1f4736a8/google/cloud/aiplatform/v1beta1/custom_job.proto#L172.
export_additional_model_without_custom_ops: Whether to export additional
model without custom TensorFlow operators.
stats_and_example_gen_dataflow_machine_type: The dataflow machine type for
stats_and_example_gen component.
stats_and_example_gen_dataflow_max_num_workers: The max number of Dataflow
workers for stats_and_example_gen component.
stats_and_example_gen_dataflow_disk_size_gb: Dataflow worker's disk size in
GB for stats_and_example_gen component.
transform_dataflow_machine_type: The dataflow machine type for transform
component.
transform_dataflow_max_num_workers: The max number of Dataflow workers for
transform component.
transform_dataflow_disk_size_gb: Dataflow worker's disk size in GB for
transform component.
dataflow_subnetwork: Dataflow's fully qualified subnetwork name, when empty
the default subnetwork will be used. Example:
https://cloud.google.com/dataflow/docs/guides/specifying-networks#example_network_and_subnetwork_specifications
dataflow_use_public_ips: Specifies whether Dataflow workers use public IP
addresses.
encryption_spec_key_name: The KMS key name.
additional_experiments: Use this field to config private preview features.
dataflow_service_account: Custom service account to run dataflow jobs.
max_selected_features: number of features to select for training,
apply_feature_selection_tuning: tuning feature selection rate if true.
run_evaluation: Whether to run evaluation in the training pipeline.
evaluation_batch_predict_machine_type: The prediction server machine type
for batch predict components during evaluation.
evaluation_batch_predict_starting_replica_count: The initial number of
prediction server for batch predict components during evaluation.
evaluation_batch_predict_max_replica_count: The max number of prediction
server for batch predict components during evaluation.
evaluation_batch_explain_machine_type: The prediction server machine type
for batch explain components during evaluation.
evaluation_batch_explain_starting_replica_count: The initial number of
prediction server for batch explain components during evaluation.
evaluation_batch_explain_max_replica_count: The max number of prediction
server for batch explain components during evaluation.
evaluation_dataflow_machine_type: The dataflow machine type for evaluation
components.
evaluation_dataflow_starting_num_workers: The initial number of Dataflow
workers for evaluation components.
evaluation_dataflow_max_num_workers: The max number of Dataflow workers for
evaluation components.
evaluation_dataflow_disk_size_gb: Dataflow worker's disk size in GB for
evaluation components.
run_distillation: Whether to run distill in the training pipeline.
distill_batch_predict_machine_type: The prediction server machine type for
batch predict component in the model distillation.
distill_batch_predict_starting_replica_count: The initial number of
prediction server for batch predict component in the model distillation.
distill_batch_predict_max_replica_count: The max number of prediction server
for batch predict component in the model distillation.
stage_1_tuning_result_artifact_uri: The stage 1 tuning result artifact GCS
URI.
quantiles: Quantiles to use for probabilistic inference. Up to 5 quantiles
are allowed of values between 0 and 1, exclusive. Represents the quantiles
to use for that objective. Quantiles must be unique.
enable_probabilistic_inference: If probabilistic inference is enabled, the
model will fit a distribution that captures the uncertainty of a
prediction. At inference time, the predictive distribution is used to make
a point prediction that minimizes the optimization objective. For example,
the mean of a predictive distribution is the point prediction that
minimizes RMSE loss. If quantiles are specified, then the quantiles of the
distribution are also returned.
num_selected_features: Number of selected features for feature selection,
defaults to None, in which case all features are used. If specified,
enable_probabilistic_inference and run_distillation cannot be enabled.
model_display_name: The display name of the uploaded Vertex model.
model_description: The description for the uploaded model.
Returns:
Tuple of pipeline_definition_path and parameter_values.
"""
if not study_spec_parameters_override:
study_spec_parameters_override = []
if not stage_1_tuner_worker_pool_specs_override:
stage_1_tuner_worker_pool_specs_override = []
if not cv_trainer_worker_pool_specs_override:
cv_trainer_worker_pool_specs_override = []
if not quantiles:
quantiles = []
parameter_values = {}
parameters = {
'project': project,
'location': location,
'root_dir': root_dir,
'target_column': target_column,
'prediction_type': prediction_type,
'data_source_csv_filenames': data_source_csv_filenames,
'data_source_bigquery_table_path': data_source_bigquery_table_path,
'predefined_split_key': predefined_split_key,
'timestamp_split_key': timestamp_split_key,
'stratified_split_key': stratified_split_key,
'training_fraction': training_fraction,
'validation_fraction': validation_fraction,
'test_fraction': test_fraction,
'optimization_objective': optimization_objective,
'train_budget_milli_node_hours': train_budget_milli_node_hours,
'stage_1_num_parallel_trials': stage_1_num_parallel_trials,
'stage_2_num_parallel_trials': stage_2_num_parallel_trials,
'stage_2_num_selected_trials': stage_2_num_selected_trials,
'weight_column': weight_column,
'optimization_objective_recall_value': (
optimization_objective_recall_value
),
'optimization_objective_precision_value': (
optimization_objective_precision_value
),
'study_spec_parameters_override': study_spec_parameters_override,
'stage_1_tuner_worker_pool_specs_override': (
stage_1_tuner_worker_pool_specs_override
),
'cv_trainer_worker_pool_specs_override': (
cv_trainer_worker_pool_specs_override
),
'export_additional_model_without_custom_ops': (
export_additional_model_without_custom_ops
),
'dataflow_subnetwork': dataflow_subnetwork,
'dataflow_use_public_ips': dataflow_use_public_ips,
'dataflow_service_account': dataflow_service_account,
'encryption_spec_key_name': encryption_spec_key_name,
'max_selected_features': max_selected_features,
'stage_1_tuning_result_artifact_uri': stage_1_tuning_result_artifact_uri,
'quantiles': quantiles,
'enable_probabilistic_inference': enable_probabilistic_inference,
'model_display_name': model_display_name,
'model_description': model_description,
}
parameter_values.update(
{param: value for param, value in parameters.items() if value is not None}
)
if run_evaluation:
eval_parameters = {
'evaluation_batch_predict_machine_type': (
evaluation_batch_predict_machine_type
),
'evaluation_batch_predict_starting_replica_count': (
evaluation_batch_predict_starting_replica_count
),
'evaluation_batch_predict_max_replica_count': (
evaluation_batch_predict_max_replica_count
),
'evaluation_batch_explain_machine_type': (
evaluation_batch_explain_machine_type
),
'evaluation_batch_explain_starting_replica_count': (
evaluation_batch_explain_starting_replica_count
),
'evaluation_batch_explain_max_replica_count': (
evaluation_batch_explain_max_replica_count
),
'evaluation_dataflow_machine_type': evaluation_dataflow_machine_type,
'evaluation_dataflow_starting_num_workers': (
evaluation_dataflow_starting_num_workers
),
'evaluation_dataflow_max_num_workers': (
evaluation_dataflow_max_num_workers
),
'evaluation_dataflow_disk_size_gb': evaluation_dataflow_disk_size_gb,
'run_evaluation': run_evaluation,
}
parameter_values.update(
{
param: value
for param, value in eval_parameters.items()
if value is not None
}
)
# V1 pipeline without FTE
if num_selected_features is None:
if not additional_experiments:
additional_experiments = {}
parameters = {
'transformations': transformations,
'stats_and_example_gen_dataflow_machine_type': (
stats_and_example_gen_dataflow_machine_type
),
'stats_and_example_gen_dataflow_max_num_workers': (
stats_and_example_gen_dataflow_max_num_workers
),
'stats_and_example_gen_dataflow_disk_size_gb': (
stats_and_example_gen_dataflow_disk_size_gb
),
'transform_dataflow_machine_type': transform_dataflow_machine_type,
'transform_dataflow_max_num_workers': (
transform_dataflow_max_num_workers
),
'transform_dataflow_disk_size_gb': transform_dataflow_disk_size_gb,
'additional_experiments': additional_experiments,
}
parameter_values.update(
{
param: value
for param, value in parameters.items()
if value is not None
}
)
if apply_feature_selection_tuning:
parameter_values.update({
'apply_feature_selection_tuning': apply_feature_selection_tuning,
})
if run_distillation:
distillation_parameters = {
'distill_batch_predict_machine_type': (
distill_batch_predict_machine_type
),
'distill_batch_predict_starting_replica_count': (
distill_batch_predict_starting_replica_count
),
'distill_batch_predict_max_replica_count': (
distill_batch_predict_max_replica_count
),
'run_distillation': run_distillation,
}
parameter_values.update(
{
param: value
for param, value in distillation_parameters.items()
if value is not None
}
)
# V2 pipeline (with FTE)
else:
if run_distillation:
raise ValueError(
'Distillation is currently not supported'
' when num_selected_features is specified.'
)
parameters = {
'num_selected_features': num_selected_features,
'dataset_level_custom_transformation_definitions': [],
'dataset_level_transformations': [],
'tf_auto_transform_features': {},
'tf_custom_transformation_definitions': [],
'legacy_transformations_path': transformations,
'feature_transform_engine_dataflow_machine_type': (
transform_dataflow_machine_type
),
'feature_transform_engine_dataflow_max_num_workers': (
transform_dataflow_max_num_workers
),
'feature_transform_engine_dataflow_disk_size_gb': (
transform_dataflow_disk_size_gb
),
}
parameter_values.update(
{
param: value
for param, value in parameters.items()
if value is not None
}
)
return parameter_values
def get_automl_tabular_pipeline_and_parameters(
project: str,
location: str,
root_dir: str,
target_column: str,
prediction_type: str,
optimization_objective: str,
transformations: str,
train_budget_milli_node_hours: float,
stage_1_num_parallel_trials: Optional[int] = None,
stage_2_num_parallel_trials: Optional[int] = None,
stage_2_num_selected_trials: Optional[int] = None,
data_source_csv_filenames: Optional[str] = None,
data_source_bigquery_table_path: Optional[str] = None,
predefined_split_key: Optional[str] = None,
timestamp_split_key: Optional[str] = None,
stratified_split_key: Optional[str] = None,
training_fraction: Optional[float] = None,
validation_fraction: Optional[float] = None,
test_fraction: Optional[float] = None,
weight_column: Optional[str] = None,
study_spec_parameters_override: Optional[List[Dict[str, Any]]] = None,
optimization_objective_recall_value: Optional[float] = None,
optimization_objective_precision_value: Optional[float] = None,
stage_1_tuner_worker_pool_specs_override: Optional[Dict[str, Any]] = None,
cv_trainer_worker_pool_specs_override: Optional[Dict[str, Any]] = None,
export_additional_model_without_custom_ops: bool = False,
stats_and_example_gen_dataflow_machine_type: Optional[str] = None,
stats_and_example_gen_dataflow_max_num_workers: Optional[int] = None,
stats_and_example_gen_dataflow_disk_size_gb: Optional[int] = None,
transform_dataflow_machine_type: Optional[str] = None,
transform_dataflow_max_num_workers: Optional[int] = None,
transform_dataflow_disk_size_gb: Optional[int] = None,
dataflow_subnetwork: Optional[str] = None,
dataflow_use_public_ips: bool = True,
encryption_spec_key_name: Optional[str] = None,
additional_experiments: Optional[Dict[str, Any]] = None,
dataflow_service_account: Optional[str] = None,
run_evaluation: bool = True,
evaluation_batch_predict_machine_type: Optional[str] = None,
evaluation_batch_predict_starting_replica_count: Optional[int] = None,
evaluation_batch_predict_max_replica_count: Optional[int] = None,
evaluation_batch_explain_machine_type: Optional[str] = None,
evaluation_batch_explain_starting_replica_count: Optional[int] = None,
evaluation_batch_explain_max_replica_count: Optional[int] = None,
evaluation_dataflow_machine_type: Optional[str] = None,
evaluation_dataflow_starting_num_workers: Optional[int] = None,
evaluation_dataflow_max_num_workers: Optional[int] = None,
evaluation_dataflow_disk_size_gb: Optional[int] = None,
run_distillation: bool = False,
distill_batch_predict_machine_type: Optional[str] = None,
distill_batch_predict_starting_replica_count: Optional[int] = None,
distill_batch_predict_max_replica_count: Optional[int] = None,
stage_1_tuning_result_artifact_uri: Optional[str] = None,
quantiles: Optional[List[float]] = None,
enable_probabilistic_inference: bool = False,
num_selected_features: Optional[int] = None,
model_display_name: str = '',
model_description: str = '',
) -> Tuple[str, Dict[str, Any]]:
# fmt: off
"""Get the AutoML Tabular v1 default training pipeline.
Args:
project: The GCP project that runs the pipeline components.
location: The GCP region that runs the pipeline components.
root_dir: The root GCS directory for the pipeline components.
target_column: The target column name.
prediction_type: The type of prediction the model is to produce. "classification" or "regression".
optimization_objective: For binary classification, "maximize-au-roc", "minimize-log-loss", "maximize-au-prc", "maximize-precision-at-recall", or "maximize-recall-at-precision". For multi class classification, "minimize-log-loss". For regression, "minimize-rmse", "minimize-mae", or "minimize-rmsle".
transformations: The path to a GCS file containing the transformations to apply.
train_budget_milli_node_hours: The train budget of creating this model, expressed in milli node hours i.e. 1,000 value in this field means 1 node hour.
stage_1_num_parallel_trials: Number of parallel trails for stage 1.
stage_2_num_parallel_trials: Number of parallel trails for stage 2.
stage_2_num_selected_trials: Number of selected trials for stage 2.
data_source_csv_filenames: The CSV data source.
data_source_bigquery_table_path: The BigQuery data source.
predefined_split_key: The predefined_split column name.
timestamp_split_key: The timestamp_split column name.
stratified_split_key: The stratified_split column name.
training_fraction: The training fraction.
validation_fraction: The validation fraction.
test_fraction: float = The test fraction.
weight_column: The weight column name.
study_spec_parameters_override: The list for overriding study spec. The list should be of format: https://github.com/googleapis/googleapis/blob/4e836c7c257e3e20b1de14d470993a2b1f4736a8/google/cloud/aiplatform/v1beta1/study.proto#L181.
optimization_objective_recall_value: Required when optimization_objective is "maximize-precision-at-recall". Must be between 0 and 1, inclusive.
optimization_objective_precision_value: Required when optimization_objective is "maximize-recall-at-precision". Must be between 0 and 1, inclusive.
stage_1_tuner_worker_pool_specs_override: The dictionary for overriding. stage 1 tuner worker pool spec. The dictionary should be of format: https://github.com/googleapis/googleapis/blob/4e836c7c257e3e20b1de14d470993a2b1f4736a8/google/cloud/aiplatform/v1beta1/custom_job.proto#L172.
cv_trainer_worker_pool_specs_override: The dictionary for overriding stage cv trainer worker pool spec. The dictionary should be of format: https://github.com/googleapis/googleapis/blob/4e836c7c257e3e20b1de14d470993a2b1f4736a8/google/cloud/aiplatform/v1beta1/custom_job.proto#L172.
export_additional_model_without_custom_ops: Whether to export additional model without custom TensorFlow operators.
stats_and_example_gen_dataflow_machine_type: The dataflow machine type for stats_and_example_gen component.
stats_and_example_gen_dataflow_max_num_workers: The max number of Dataflow workers for stats_and_example_gen component.
stats_and_example_gen_dataflow_disk_size_gb: Dataflow worker's disk size in GB for stats_and_example_gen component.
transform_dataflow_machine_type: The dataflow machine type for transform component.
transform_dataflow_max_num_workers: The max number of Dataflow workers for transform component.
transform_dataflow_disk_size_gb: Dataflow worker's disk size in GB for transform component.
dataflow_subnetwork: Dataflow's fully qualified subnetwork name, when empty the default subnetwork will be used. Example: https://cloud.google.com/dataflow/docs/guides/specifying-networks#example_network_and_subnetwork_specifications
dataflow_use_public_ips: Specifies whether Dataflow workers use public IP addresses.
encryption_spec_key_name: The KMS key name.
additional_experiments: Use this field to config private preview features.
dataflow_service_account: Custom service account to run dataflow jobs.
run_evaluation: Whether to run evaluation in the training pipeline.
evaluation_batch_predict_machine_type: The prediction server machine type for batch predict components during evaluation.
evaluation_batch_predict_starting_replica_count: The initial number of prediction server for batch predict components during evaluation.
evaluation_batch_predict_max_replica_count: The max number of prediction server for batch predict components during evaluation.
evaluation_batch_explain_machine_type: The prediction server machine type for batch explain components during evaluation.
evaluation_batch_explain_starting_replica_count: The initial number of prediction server for batch explain components during evaluation.
evaluation_batch_explain_max_replica_count: The max number of prediction server for batch explain components during evaluation.
evaluation_dataflow_machine_type: The dataflow machine type for evaluation components.
evaluation_dataflow_starting_num_workers: The initial number of Dataflow workers for evaluation components.
evaluation_dataflow_max_num_workers: The max number of Dataflow workers for evaluation components.
evaluation_dataflow_disk_size_gb: Dataflow worker's disk size in GB for evaluation components.
run_distillation: Whether to run distill in the training pipeline.
distill_batch_predict_machine_type: The prediction server machine type for batch predict component in the model distillation.
distill_batch_predict_starting_replica_count: The initial number of prediction server for batch predict component in the model distillation.
distill_batch_predict_max_replica_count: The max number of prediction server for batch predict component in the model distillation.
stage_1_tuning_result_artifact_uri: The stage 1 tuning result artifact GCS URI.
quantiles: Quantiles to use for probabilistic inference. Up to 5 quantiles are allowed of values between 0 and 1, exclusive. Represents the quantiles to use for that objective. Quantiles must be unique.
enable_probabilistic_inference: If probabilistic inference is enabled, the model will fit a distribution that captures the uncertainty of a prediction. At inference time, the predictive distribution is used to make a point prediction that minimizes the optimization objective. For example, the mean of a predictive distribution is the point prediction that minimizes RMSE loss. If quantiles are specified, then the quantiles of the distribution are also returned.
num_selected_features: Number of selected features for feature selection, defaults to None, in which case all features are used.
model_display_name: The display name of the uploaded Vertex model.
model_description: The description for the uploaded model.
Returns:
Tuple of pipeline_definition_path and parameter_values.
"""
# fmt: on
parameter_values = _get_default_pipeline_params(
project=project,
location=location,
root_dir=root_dir,
target_column=target_column,
prediction_type=prediction_type,
optimization_objective=optimization_objective,
transformations=transformations,
train_budget_milli_node_hours=train_budget_milli_node_hours,
stage_1_num_parallel_trials=stage_1_num_parallel_trials,
stage_2_num_parallel_trials=stage_2_num_parallel_trials,
stage_2_num_selected_trials=stage_2_num_selected_trials,
data_source_csv_filenames=data_source_csv_filenames,
data_source_bigquery_table_path=data_source_bigquery_table_path,
predefined_split_key=predefined_split_key,
timestamp_split_key=timestamp_split_key,
stratified_split_key=stratified_split_key,
training_fraction=training_fraction,
validation_fraction=validation_fraction,
test_fraction=test_fraction,
weight_column=weight_column,
study_spec_parameters_override=study_spec_parameters_override,
optimization_objective_recall_value=optimization_objective_recall_value,
optimization_objective_precision_value=optimization_objective_precision_value,
stage_1_tuner_worker_pool_specs_override=stage_1_tuner_worker_pool_specs_override,
cv_trainer_worker_pool_specs_override=cv_trainer_worker_pool_specs_override,
export_additional_model_without_custom_ops=export_additional_model_without_custom_ops,
stats_and_example_gen_dataflow_machine_type=stats_and_example_gen_dataflow_machine_type,
stats_and_example_gen_dataflow_max_num_workers=stats_and_example_gen_dataflow_max_num_workers,
stats_and_example_gen_dataflow_disk_size_gb=stats_and_example_gen_dataflow_disk_size_gb,
transform_dataflow_machine_type=transform_dataflow_machine_type,
transform_dataflow_max_num_workers=transform_dataflow_max_num_workers,
transform_dataflow_disk_size_gb=transform_dataflow_disk_size_gb,
dataflow_subnetwork=dataflow_subnetwork,
dataflow_use_public_ips=dataflow_use_public_ips,
encryption_spec_key_name=encryption_spec_key_name,
additional_experiments=additional_experiments,
dataflow_service_account=dataflow_service_account,
run_evaluation=run_evaluation,
evaluation_batch_predict_machine_type=evaluation_batch_predict_machine_type,
evaluation_batch_predict_starting_replica_count=evaluation_batch_predict_starting_replica_count,
evaluation_batch_predict_max_replica_count=evaluation_batch_predict_max_replica_count,
evaluation_batch_explain_machine_type=evaluation_batch_explain_machine_type,
evaluation_batch_explain_starting_replica_count=evaluation_batch_explain_starting_replica_count,
evaluation_batch_explain_max_replica_count=evaluation_batch_explain_max_replica_count,
evaluation_dataflow_machine_type=evaluation_dataflow_machine_type,
evaluation_dataflow_starting_num_workers=evaluation_dataflow_starting_num_workers,
evaluation_dataflow_max_num_workers=evaluation_dataflow_max_num_workers,
evaluation_dataflow_disk_size_gb=evaluation_dataflow_disk_size_gb,
run_distillation=run_distillation,
distill_batch_predict_machine_type=distill_batch_predict_machine_type,
distill_batch_predict_starting_replica_count=distill_batch_predict_starting_replica_count,
distill_batch_predict_max_replica_count=distill_batch_predict_max_replica_count,
stage_1_tuning_result_artifact_uri=stage_1_tuning_result_artifact_uri,
quantiles=quantiles,
enable_probabilistic_inference=enable_probabilistic_inference,
num_selected_features=num_selected_features,
model_display_name=model_display_name,
model_description=model_description,
)
# V1 pipeline without FTE
if num_selected_features is None:
pipeline_definition_path = os.path.join(
pathlib.Path(__file__).parent.resolve(), 'automl_tabular_pipeline.yaml'
)
# V2 pipeline with FTE
else:
pipeline_definition_path = os.path.join(
_GCPC_PREVIEW_TABULAR_PATH,
'automl_tabular_v2_pipeline.yaml',
)
# V2 pipeline requires execution engine to be set.
if 'tf_transform_execution_engine' not in parameter_values:
parameter_values['tf_transform_execution_engine'] = 'dataflow'
return pipeline_definition_path, parameter_values
def input_dictionary_to_parameter(input_dict: Optional[Dict[str, Any]]) -> str:
"""Convert json input dict to encoded parameter string.
This function is required due to the limitation on YAML component definition
that YAML definition does not have a keyword for apply quote escape, so the
JSON argument's quote must be manually escaped using this function.
Args:
input_dict: The input json dictionary.
Returns:
The encoded string used for parameter.
"""
if not input_dict:
return ''
out = json.dumps(json.dumps(input_dict))
return out[1:-1] # remove the outside quotes, e.g., "foo" -> foo
def get_skip_evaluation_pipeline_and_parameters(
project: str,
location: str,
root_dir: str,
target_column_name: str,
prediction_type: str,
optimization_objective: str,
transformations: Dict[str, Any],
split_spec: Dict[str, Any],
data_source: Dict[str, Any],
train_budget_milli_node_hours: float,
stage_1_num_parallel_trials: int = _DEFAULT_NUM_PARALLEL_TRAILS,
stage_2_num_parallel_trials: int = _DEFAULT_NUM_PARALLEL_TRAILS,
stage_2_num_selected_trials: int = _DEFAULT_STAGE_2_NUM_SELECTED_TRAILS,
weight_column_name: str = '',
study_spec_override: Optional[Dict[str, Any]] = None,
optimization_objective_recall_value: float = -1,
optimization_objective_precision_value: float = -1,
stage_1_tuner_worker_pool_specs_override: Optional[Dict[str, Any]] = None,
cv_trainer_worker_pool_specs_override: Optional[Dict[str, Any]] = None,
export_additional_model_without_custom_ops: bool = False,
stats_and_example_gen_dataflow_machine_type: str = 'n1-standard-16',
stats_and_example_gen_dataflow_max_num_workers: int = 25,
stats_and_example_gen_dataflow_disk_size_gb: int = 40,
transform_dataflow_machine_type: str = 'n1-standard-16',
transform_dataflow_max_num_workers: int = 25,
transform_dataflow_disk_size_gb: int = 40,
dataflow_subnetwork: str = '',
dataflow_use_public_ips: bool = True,
encryption_spec_key_name: str = '',
additional_experiments: Optional[Dict[str, Any]] = None,
) -> Tuple[str, Dict[str, Any]]:
"""Get the AutoML Tabular training pipeline that skips evaluation.
Args:
project: The GCP project that runs the pipeline components.
location: The GCP region that runs the pipeline components.
root_dir: The root GCS directory for the pipeline components.
target_column_name: The target column name.
prediction_type: The type of prediction the model is to produce.
"classification" or "regression".
optimization_objective: For binary classification, "maximize-au-roc",
"minimize-log-loss", "maximize-au-prc", "maximize-precision-at-recall", or
"maximize-recall-at-precision". For multi class classification,
"minimize-log-loss". For regression, "minimize-rmse", "minimize-mae", or
"minimize-rmsle".
transformations: The transformations to apply.
split_spec: The split spec.
data_source: The data source.
train_budget_milli_node_hours: The train budget of creating this model,
expressed in milli node hours i.e. 1,000 value in this field means 1 node
hour.
stage_1_num_parallel_trials: Number of parallel trails for stage 1.
stage_2_num_parallel_trials: Number of parallel trails for stage 2.
stage_2_num_selected_trials: Number of selected trials for stage 2.
weight_column_name: The weight column name.
study_spec_override: The dictionary for overriding study spec. The
dictionary should be of format
https://github.com/googleapis/googleapis/blob/4e836c7c257e3e20b1de14d470993a2b1f4736a8/google/cloud/aiplatform/v1beta1/study.proto#L181.
optimization_objective_recall_value: Required when optimization_objective is
"maximize-precision-at-recall". Must be between 0 and 1, inclusive.
optimization_objective_precision_value: Required when optimization_objective
is "maximize-recall-at-precision". Must be between 0 and 1, inclusive.
stage_1_tuner_worker_pool_specs_override: The dictionary for overriding.
stage 1 tuner worker pool spec. The dictionary should be of format
https://github.com/googleapis/googleapis/blob/4e836c7c257e3e20b1de14d470993a2b1f4736a8/google/cloud/aiplatform/v1beta1/custom_job.proto#L172.
cv_trainer_worker_pool_specs_override: The dictionary for overriding stage
cv trainer worker pool spec. The dictionary should be of format
https://github.com/googleapis/googleapis/blob/4e836c7c257e3e20b1de14d470993a2b1f4736a8/google/cloud/aiplatform/v1beta1/custom_job.proto#L172.
export_additional_model_without_custom_ops: Whether to export additional
model without custom TensorFlow operators.
stats_and_example_gen_dataflow_machine_type: The dataflow machine type for
stats_and_example_gen component.
stats_and_example_gen_dataflow_max_num_workers: The max number of Dataflow
workers for stats_and_example_gen component.
stats_and_example_gen_dataflow_disk_size_gb: Dataflow worker's disk size in
GB for stats_and_example_gen component.
transform_dataflow_machine_type: The dataflow machine type for transform
component.
transform_dataflow_max_num_workers: The max number of Dataflow workers for
transform component.
transform_dataflow_disk_size_gb: Dataflow worker's disk size in GB for
transform component.
dataflow_subnetwork: Dataflow's fully qualified subnetwork name, when empty
the default subnetwork will be used. Example:
https://cloud.google.com/dataflow/docs/guides/specifying-networks#example_network_and_subnetwork_specifications
dataflow_use_public_ips: Specifies whether Dataflow workers use public IP
addresses.
encryption_spec_key_name: The KMS key name.
additional_experiments: Use this field to config private preview features.
Returns:
Tuple of pipeline_definition_path and parameter_values.
"""
return get_default_pipeline_and_parameters(
project=project,
location=location,
root_dir=root_dir,
target_column_name=target_column_name,
prediction_type=prediction_type,
optimization_objective=optimization_objective,
transformations=transformations,
split_spec=split_spec,
data_source=data_source,
train_budget_milli_node_hours=train_budget_milli_node_hours,
stage_1_num_parallel_trials=stage_1_num_parallel_trials,
stage_2_num_parallel_trials=stage_2_num_parallel_trials,
stage_2_num_selected_trials=stage_2_num_selected_trials,
weight_column_name=weight_column_name,
study_spec_override=study_spec_override,
optimization_objective_recall_value=optimization_objective_recall_value,
optimization_objective_precision_value=optimization_objective_precision_value,
stage_1_tuner_worker_pool_specs_override=stage_1_tuner_worker_pool_specs_override,
cv_trainer_worker_pool_specs_override=cv_trainer_worker_pool_specs_override,
export_additional_model_without_custom_ops=export_additional_model_without_custom_ops,
stats_and_example_gen_dataflow_machine_type=stats_and_example_gen_dataflow_machine_type,
stats_and_example_gen_dataflow_max_num_workers=stats_and_example_gen_dataflow_max_num_workers,
stats_and_example_gen_dataflow_disk_size_gb=stats_and_example_gen_dataflow_disk_size_gb,
transform_dataflow_machine_type=transform_dataflow_machine_type,
transform_dataflow_max_num_workers=transform_dataflow_max_num_workers,
transform_dataflow_disk_size_gb=transform_dataflow_disk_size_gb,
dataflow_subnetwork=dataflow_subnetwork,
dataflow_use_public_ips=dataflow_use_public_ips,
encryption_spec_key_name=encryption_spec_key_name,
additional_experiments=additional_experiments,
run_evaluation=False,
run_distillation=False,
)
def get_default_pipeline_and_parameters(
project: str,
location: str,
root_dir: str,
target_column_name: str,
prediction_type: str,
optimization_objective: str,
transformations: Dict[str, Any],
split_spec: Dict[str, Any],
data_source: Dict[str, Any],
train_budget_milli_node_hours: float,
stage_1_num_parallel_trials: int = _DEFAULT_NUM_PARALLEL_TRAILS,
stage_2_num_parallel_trials: int = _DEFAULT_NUM_PARALLEL_TRAILS,
stage_2_num_selected_trials: int = _DEFAULT_STAGE_2_NUM_SELECTED_TRAILS,
weight_column_name: str = '',
study_spec_override: Optional[Dict[str, Any]] = None,
optimization_objective_recall_value: float = -1,
optimization_objective_precision_value: float = -1,
stage_1_tuner_worker_pool_specs_override: Optional[Dict[str, Any]] = None,
cv_trainer_worker_pool_specs_override: Optional[Dict[str, Any]] = None,
export_additional_model_without_custom_ops: bool = False,
stats_and_example_gen_dataflow_machine_type: str = 'n1-standard-16',
stats_and_example_gen_dataflow_max_num_workers: int = 25,
stats_and_example_gen_dataflow_disk_size_gb: int = 40,
transform_dataflow_machine_type: str = 'n1-standard-16',
transform_dataflow_max_num_workers: int = 25,
transform_dataflow_disk_size_gb: int = 40,
dataflow_subnetwork: str = '',
dataflow_use_public_ips: bool = True,
encryption_spec_key_name: str = '',
additional_experiments: Optional[Dict[str, Any]] = None,
dataflow_service_account: str = '',
run_evaluation: bool = True,
evaluation_batch_predict_machine_type: str = _EVALUATION_BATCH_PREDICT_MACHINE_TYPE,
evaluation_batch_predict_starting_replica_count: int = _EVALUATION_BATCH_PREDICT_STARTING_REPLICA_COUNT,
evaluation_batch_predict_max_replica_count: int = _EVALUATION_BATCH_PREDICT_MAX_REPLICA_COUNT,
evaluation_dataflow_machine_type: str = _EVALUATION_DATAFLOW_MACHINE_TYPE,
evaluation_dataflow_max_num_workers: int = _EVALUATION_DATAFLOW_MAX_NUM_WORKERS,
evaluation_dataflow_disk_size_gb: int = _EVALUATION_DATAFLOW_DISK_SIZE_GB,
run_distillation: bool = False,
distill_batch_predict_machine_type: str = 'n1-standard-16',
distill_batch_predict_starting_replica_count: int = 25,
distill_batch_predict_max_replica_count: int = 25,
) -> Tuple[str, Dict[str, Any]]:
"""Get the AutoML Tabular default training pipeline.
Args:
project: The GCP project that runs the pipeline components.
location: The GCP region that runs the pipeline components.
root_dir: The root GCS directory for the pipeline components.
target_column_name: The target column name.
prediction_type: The type of prediction the model is to produce.
"classification" or "regression".
optimization_objective: For binary classification, "maximize-au-roc",
"minimize-log-loss", "maximize-au-prc", "maximize-precision-at-recall", or
"maximize-recall-at-precision". For multi class classification,
"minimize-log-loss". For regression, "minimize-rmse", "minimize-mae", or
"minimize-rmsle".
transformations: The transformations to apply.
split_spec: The split spec.
data_source: The data source.
train_budget_milli_node_hours: The train budget of creating this model,
expressed in milli node hours i.e. 1,000 value in this field means 1 node
hour.
stage_1_num_parallel_trials: Number of parallel trails for stage 1.
stage_2_num_parallel_trials: Number of parallel trails for stage 2.
stage_2_num_selected_trials: Number of selected trials for stage 2.
weight_column_name: The weight column name.
study_spec_override: The dictionary for overriding study spec. The
dictionary should be of format
https://github.com/googleapis/googleapis/blob/4e836c7c257e3e20b1de14d470993a2b1f4736a8/google/cloud/aiplatform/v1beta1/study.proto#L181.
optimization_objective_recall_value: Required when optimization_objective is
"maximize-precision-at-recall". Must be between 0 and 1, inclusive.
optimization_objective_precision_value: Required when optimization_objective
is "maximize-recall-at-precision". Must be between 0 and 1, inclusive.
stage_1_tuner_worker_pool_specs_override: The dictionary for overriding.
stage 1 tuner worker pool spec. The dictionary should be of format
https://github.com/googleapis/googleapis/blob/4e836c7c257e3e20b1de14d470993a2b1f4736a8/google/cloud/aiplatform/v1beta1/custom_job.proto#L172.
cv_trainer_worker_pool_specs_override: The dictionary for overriding stage
cv trainer worker pool spec. The dictionary should be of format
https://github.com/googleapis/googleapis/blob/4e836c7c257e3e20b1de14d470993a2b1f4736a8/google/cloud/aiplatform/v1beta1/custom_job.proto#L172.
export_additional_model_without_custom_ops: Whether to export additional
model without custom TensorFlow operators.
stats_and_example_gen_dataflow_machine_type: The dataflow machine type for
stats_and_example_gen component.
stats_and_example_gen_dataflow_max_num_workers: The max number of Dataflow
workers for stats_and_example_gen component.
stats_and_example_gen_dataflow_disk_size_gb: Dataflow worker's disk size in
GB for stats_and_example_gen component.
transform_dataflow_machine_type: The dataflow machine type for transform
component.
transform_dataflow_max_num_workers: The max number of Dataflow workers for
transform component.
transform_dataflow_disk_size_gb: Dataflow worker's disk size in GB for
transform component.
dataflow_subnetwork: Dataflow's fully qualified subnetwork name, when empty
the default subnetwork will be used. Example:
https://cloud.google.com/dataflow/docs/guides/specifying-networks#example_network_and_subnetwork_specifications
dataflow_use_public_ips: Specifies whether Dataflow workers use public IP
addresses.
encryption_spec_key_name: The KMS key name.
additional_experiments: Use this field to config private preview features.
dataflow_service_account: Custom service account to run dataflow jobs.
run_evaluation: Whether to run evaluation in the training pipeline.
evaluation_batch_predict_machine_type: The prediction server machine type
for batch predict components during evaluation.
evaluation_batch_predict_starting_replica_count: The initial number of
prediction server for batch predict components during evaluation.
evaluation_batch_predict_max_replica_count: The max number of prediction
server for batch predict components during evaluation.
evaluation_dataflow_machine_type: The dataflow machine type for evaluation
components.
evaluation_dataflow_max_num_workers: The max number of Dataflow workers for
evaluation components.
evaluation_dataflow_disk_size_gb: Dataflow worker's disk size in GB for
evaluation components.
run_distillation: Whether to run distill in the training pipeline.
distill_batch_predict_machine_type: The prediction server machine type for
batch predict component in the model distillation.
distill_batch_predict_starting_replica_count: The initial number of
prediction server for batch predict component in the model distillation.
distill_batch_predict_max_replica_count: The max number of prediction server
for batch predict component in the model distillation.
Returns:
Tuple of pipeline_definition_path and parameter_values.
"""
warnings.warn(
'This method is deprecated,'
' please use get_automl_tabular_pipeline_and_parameters instead.'
)
if stage_1_num_parallel_trials <= 0:
stage_1_num_parallel_trials = _DEFAULT_NUM_PARALLEL_TRAILS
if stage_2_num_parallel_trials <= 0:
stage_2_num_parallel_trials = _DEFAULT_NUM_PARALLEL_TRAILS
hours = float(train_budget_milli_node_hours) / 1000.0
multiplier = stage_1_num_parallel_trials * hours / 500.0
stage_1_single_run_max_secs = int(math.sqrt(multiplier) * 2400.0)
phase_2_rounds = int(
math.sqrt(multiplier) * 100 / stage_2_num_parallel_trials + 0.5
)
if phase_2_rounds < 1:
phase_2_rounds = 1
# All of magic number "1.3" above is because the trial doesn't always finish
# in time_per_trial. 1.3 is an empirical safety margin here.
stage_1_deadline_secs = int(
hours * 3600.0 - 1.3 * stage_1_single_run_max_secs * phase_2_rounds
)
if stage_1_deadline_secs < hours * 3600.0 * 0.5:
stage_1_deadline_secs = int(hours * 3600.0 * 0.5)
# Phase 1 deadline is the same as phase 2 deadline in this case. Phase 2
# can't finish in time after the deadline is cut, so adjust the time per
# trial to meet the deadline.
stage_1_single_run_max_secs = int(
stage_1_deadline_secs / (1.3 * phase_2_rounds)
)
reduce_search_space_mode = 'minimal'
if multiplier > 2:
reduce_search_space_mode = 'regular'
if multiplier > 4:
reduce_search_space_mode = 'full'
# Stage 2 number of trials is stage_1_num_selected_trials *
# _NUM_FOLDS, which should be equal to phase_2_rounds *
# stage_2_num_parallel_trials. Use this information to calculate
# stage_1_num_selected_trials:
stage_1_num_selected_trials = int(
phase_2_rounds * stage_2_num_parallel_trials / _NUM_FOLDS
)
stage_1_deadline_hours = stage_1_deadline_secs / 3600.0
stage_2_deadline_hours = hours - stage_1_deadline_hours
stage_2_single_run_max_secs = stage_1_single_run_max_secs
parameter_values = {
'project': project,
'location': location,
'root_dir': root_dir,
'target_column_name': target_column_name,
'prediction_type': prediction_type,
'optimization_objective': optimization_objective,
'transformations': input_dictionary_to_parameter(transformations),
'split_spec': input_dictionary_to_parameter(split_spec),
'data_source': input_dictionary_to_parameter(data_source),
'stage_1_deadline_hours': stage_1_deadline_hours,
'stage_1_num_parallel_trials': stage_1_num_parallel_trials,
'stage_1_num_selected_trials': stage_1_num_selected_trials,
'stage_1_single_run_max_secs': stage_1_single_run_max_secs,
'reduce_search_space_mode': reduce_search_space_mode,
'stage_2_deadline_hours': stage_2_deadline_hours,
'stage_2_num_parallel_trials': stage_2_num_parallel_trials,
'stage_2_num_selected_trials': stage_2_num_selected_trials,
'stage_2_single_run_max_secs': stage_2_single_run_max_secs,
'weight_column_name': weight_column_name,
'optimization_objective_recall_value': (
optimization_objective_recall_value
),
'optimization_objective_precision_value': (
optimization_objective_precision_value
),
'study_spec_override': input_dictionary_to_parameter(study_spec_override),
'stage_1_tuner_worker_pool_specs_override': input_dictionary_to_parameter(
stage_1_tuner_worker_pool_specs_override
),
'cv_trainer_worker_pool_specs_override': input_dictionary_to_parameter(
cv_trainer_worker_pool_specs_override
),
'export_additional_model_without_custom_ops': (
export_additional_model_without_custom_ops
),
'stats_and_example_gen_dataflow_machine_type': (
stats_and_example_gen_dataflow_machine_type
),
'stats_and_example_gen_dataflow_max_num_workers': (
stats_and_example_gen_dataflow_max_num_workers
),
'stats_and_example_gen_dataflow_disk_size_gb': (
stats_and_example_gen_dataflow_disk_size_gb
),
'transform_dataflow_machine_type': transform_dataflow_machine_type,
'transform_dataflow_max_num_workers': transform_dataflow_max_num_workers,
'transform_dataflow_disk_size_gb': transform_dataflow_disk_size_gb,
'dataflow_subnetwork': dataflow_subnetwork,
'dataflow_use_public_ips': dataflow_use_public_ips,
'encryption_spec_key_name': encryption_spec_key_name,
}
if additional_experiments:
parameter_values.update(
{
'additional_experiments': input_dictionary_to_parameter(
additional_experiments
)
}
)
if run_evaluation:
parameter_values.update({
'dataflow_service_account': dataflow_service_account,
'evaluation_batch_predict_machine_type': (
evaluation_batch_predict_machine_type
),
'evaluation_batch_predict_starting_replica_count': (
evaluation_batch_predict_starting_replica_count
),
'evaluation_batch_predict_max_replica_count': (
evaluation_batch_predict_max_replica_count
),
'evaluation_dataflow_machine_type': evaluation_dataflow_machine_type,
'evaluation_dataflow_max_num_workers': (
evaluation_dataflow_max_num_workers
),
'evaluation_dataflow_disk_size_gb': evaluation_dataflow_disk_size_gb,
'run_evaluation': run_evaluation,
})
if run_distillation:
# All of magic number "1.3" above is because the trial doesn't always finish
# in time_per_trial. 1.3 is an empirical safety margin here.
distill_stage_1_deadline_hours = (
math.ceil(
float(_DISTILL_TOTAL_TRIALS)
/ parameter_values['stage_1_num_parallel_trials']
)
* parameter_values['stage_1_single_run_max_secs']
* 1.3
/ 3600.0
)
parameter_values.update({
'distill_stage_1_deadline_hours': distill_stage_1_deadline_hours,
'distill_batch_predict_machine_type': (
distill_batch_predict_machine_type
),
'distill_batch_predict_starting_replica_count': (
distill_batch_predict_starting_replica_count
),
'distill_batch_predict_max_replica_count': (
distill_batch_predict_max_replica_count
),
'run_distillation': run_distillation,
})
pipeline_definition_path = os.path.join(
pathlib.Path(__file__).parent.resolve(),
'deprecated/default_pipeline.json',
)
return pipeline_definition_path, parameter_values
def get_skip_architecture_search_pipeline_and_parameters(
project: str,
location: str,
root_dir: str,
target_column: str,
prediction_type: str,
optimization_objective: str,
transformations: str,
train_budget_milli_node_hours: float,
stage_1_tuning_result_artifact_uri: str,
stage_2_num_parallel_trials: Optional[int] = None,
stage_2_num_selected_trials: Optional[int] = None,
data_source_csv_filenames: Optional[str] = None,
data_source_bigquery_table_path: Optional[str] = None,
predefined_split_key: Optional[str] = None,
timestamp_split_key: Optional[str] = None,
stratified_split_key: Optional[str] = None,
training_fraction: Optional[float] = None,
validation_fraction: Optional[float] = None,
test_fraction: Optional[float] = None,
weight_column: Optional[str] = None,
optimization_objective_recall_value: Optional[float] = None,
optimization_objective_precision_value: Optional[float] = None,
cv_trainer_worker_pool_specs_override: Optional[Dict[str, Any]] = None,
export_additional_model_without_custom_ops: bool = False,
stats_and_example_gen_dataflow_machine_type: Optional[str] = None,
stats_and_example_gen_dataflow_max_num_workers: Optional[int] = None,
stats_and_example_gen_dataflow_disk_size_gb: Optional[int] = None,
transform_dataflow_machine_type: Optional[str] = None,
transform_dataflow_max_num_workers: Optional[int] = None,
transform_dataflow_disk_size_gb: Optional[int] = None,
dataflow_subnetwork: Optional[str] = None,
dataflow_use_public_ips: bool = True,
encryption_spec_key_name: Optional[str] = None,
additional_experiments: Optional[Dict[str, Any]] = None,
dataflow_service_account: Optional[str] = None,
run_evaluation: bool = True,
evaluation_batch_predict_machine_type: Optional[str] = None,
evaluation_batch_predict_starting_replica_count: Optional[int] = None,
evaluation_batch_predict_max_replica_count: Optional[int] = None,
evaluation_batch_explain_machine_type: Optional[str] = None,
evaluation_batch_explain_starting_replica_count: Optional[int] = None,
evaluation_batch_explain_max_replica_count: Optional[int] = None,
evaluation_dataflow_machine_type: Optional[str] = None,
evaluation_dataflow_starting_num_workers: Optional[int] = None,
evaluation_dataflow_max_num_workers: Optional[int] = None,
evaluation_dataflow_disk_size_gb: Optional[int] = None,
) -> Tuple[str, Dict[str, Any]]:
"""Get the AutoML Tabular training pipeline that skips architecture search.
Args:
project: The GCP project that runs the pipeline components.
location: The GCP region that runs the pipeline components.
root_dir: The root GCS directory for the pipeline components.
target_column: The target column name.
prediction_type: The type of prediction the model is to produce.
"classification" or "regression".
optimization_objective: For binary classification, "maximize-au-roc",
"minimize-log-loss", "maximize-au-prc", "maximize-precision-at-recall", or
"maximize-recall-at-precision". For multi class classification,
"minimize-log-loss". For regression, "minimize-rmse", "minimize-mae", or
"minimize-rmsle".
transformations: The transformations to apply.
train_budget_milli_node_hours: The train budget of creating this model,
expressed in milli node hours i.e. 1,000 value in this field means 1 node
hour.
stage_1_tuning_result_artifact_uri: The stage 1 tuning result artifact GCS
URI.
stage_2_num_parallel_trials: Number of parallel trails for stage 2.
stage_2_num_selected_trials: Number of selected trials for stage 2.
data_source_csv_filenames: The CSV data source.
data_source_bigquery_table_path: The BigQuery data source.
predefined_split_key: The predefined_split column name.
timestamp_split_key: The timestamp_split column name.
stratified_split_key: The stratified_split column name.
training_fraction: The training fraction.
validation_fraction: The validation fraction.
test_fraction: float = The test fraction.
weight_column: The weight column name.
optimization_objective_recall_value: Required when optimization_objective is
"maximize-precision-at-recall". Must be between 0 and 1, inclusive.
optimization_objective_precision_value: Required when optimization_objective
is "maximize-recall-at-precision". Must be between 0 and 1, inclusive.
cv_trainer_worker_pool_specs_override: The dictionary for overriding stage
cv trainer worker pool spec. The dictionary should be of format
https://github.com/googleapis/googleapis/blob/4e836c7c257e3e20b1de14d470993a2b1f4736a8/google/cloud/aiplatform/v1beta1/custom_job.proto#L172.
export_additional_model_without_custom_ops: Whether to export additional
model without custom TensorFlow operators.
stats_and_example_gen_dataflow_machine_type: The dataflow machine type for
stats_and_example_gen component.
stats_and_example_gen_dataflow_max_num_workers: The max number of Dataflow
workers for stats_and_example_gen component.
stats_and_example_gen_dataflow_disk_size_gb: Dataflow worker's disk size in
GB for stats_and_example_gen component.
transform_dataflow_machine_type: The dataflow machine type for transform
component.
transform_dataflow_max_num_workers: The max number of Dataflow workers for
transform component.
transform_dataflow_disk_size_gb: Dataflow worker's disk size in GB for
transform component.
dataflow_subnetwork: Dataflow's fully qualified subnetwork name, when empty
the default subnetwork will be used. Example:
https://cloud.google.com/dataflow/docs/guides/specifying-networks#example_network_and_subnetwork_specifications
dataflow_use_public_ips: Specifies whether Dataflow workers use public IP
addresses.
encryption_spec_key_name: The KMS key name.
additional_experiments: Use this field to config private preview features.
dataflow_service_account: Custom service account to run dataflow jobs.
run_evaluation: Whether to run evaluation in the training pipeline.
evaluation_batch_predict_machine_type: The prediction server machine type
for batch predict components during evaluation.
evaluation_batch_predict_starting_replica_count: The initial number of
prediction server for batch predict components during evaluation.
evaluation_batch_predict_max_replica_count: The max number of prediction
server for batch predict components during evaluation.
evaluation_batch_explain_machine_type: The prediction server machine type
for batch explain components during evaluation.
evaluation_batch_explain_starting_replica_count: The initial number of
prediction server for batch explain components during evaluation.
evaluation_batch_explain_max_replica_count: The max number of prediction
server for batch explain components during evaluation.
evaluation_dataflow_machine_type: The dataflow machine type for evaluation
components.
evaluation_dataflow_starting_num_workers: The initial number of Dataflow
workers for evaluation components.
evaluation_dataflow_max_num_workers: The max number of Dataflow workers for
evaluation components.
evaluation_dataflow_disk_size_gb: Dataflow worker's disk size in GB for
evaluation components.
Returns:
Tuple of pipeline_definition_path and parameter_values.
"""
return get_automl_tabular_pipeline_and_parameters(
project=project,
location=location,
root_dir=root_dir,
target_column=target_column,
prediction_type=prediction_type,
optimization_objective=optimization_objective,
transformations=transformations,
train_budget_milli_node_hours=train_budget_milli_node_hours,
stage_1_num_parallel_trials=None,
stage_2_num_parallel_trials=stage_2_num_parallel_trials,
stage_2_num_selected_trials=stage_2_num_selected_trials,
data_source_csv_filenames=data_source_csv_filenames,
data_source_bigquery_table_path=data_source_bigquery_table_path,
predefined_split_key=predefined_split_key,
timestamp_split_key=timestamp_split_key,
stratified_split_key=stratified_split_key,
training_fraction=training_fraction,
validation_fraction=validation_fraction,
test_fraction=test_fraction,
weight_column=weight_column,
study_spec_parameters_override=[],
optimization_objective_recall_value=optimization_objective_recall_value,
optimization_objective_precision_value=optimization_objective_precision_value,
stage_1_tuner_worker_pool_specs_override={},
cv_trainer_worker_pool_specs_override=cv_trainer_worker_pool_specs_override,
export_additional_model_without_custom_ops=export_additional_model_without_custom_ops,
stats_and_example_gen_dataflow_machine_type=stats_and_example_gen_dataflow_machine_type,
stats_and_example_gen_dataflow_max_num_workers=stats_and_example_gen_dataflow_max_num_workers,
stats_and_example_gen_dataflow_disk_size_gb=stats_and_example_gen_dataflow_disk_size_gb,
transform_dataflow_machine_type=transform_dataflow_machine_type,
transform_dataflow_max_num_workers=transform_dataflow_max_num_workers,
transform_dataflow_disk_size_gb=transform_dataflow_disk_size_gb,
dataflow_subnetwork=dataflow_subnetwork,
dataflow_use_public_ips=dataflow_use_public_ips,
encryption_spec_key_name=encryption_spec_key_name,
additional_experiments=additional_experiments,
dataflow_service_account=dataflow_service_account,
run_evaluation=run_evaluation,
evaluation_batch_predict_machine_type=evaluation_batch_predict_machine_type,
evaluation_batch_predict_starting_replica_count=evaluation_batch_predict_starting_replica_count,
evaluation_batch_predict_max_replica_count=evaluation_batch_predict_max_replica_count,
evaluation_batch_explain_machine_type=evaluation_batch_explain_machine_type,
evaluation_batch_explain_starting_replica_count=evaluation_batch_explain_starting_replica_count,
evaluation_batch_explain_max_replica_count=evaluation_batch_explain_max_replica_count,
evaluation_dataflow_machine_type=evaluation_dataflow_machine_type,
evaluation_dataflow_starting_num_workers=evaluation_dataflow_starting_num_workers,
evaluation_dataflow_max_num_workers=evaluation_dataflow_max_num_workers,
evaluation_dataflow_disk_size_gb=evaluation_dataflow_disk_size_gb,
run_distillation=None,
distill_batch_predict_machine_type=None,
distill_batch_predict_starting_replica_count=None,
distill_batch_predict_max_replica_count=None,
stage_1_tuning_result_artifact_uri=stage_1_tuning_result_artifact_uri,
quantiles=[],
enable_probabilistic_inference=False,
)
def get_distill_skip_evaluation_pipeline_and_parameters(
project: str,
location: str,
root_dir: str,
target_column_name: str,
prediction_type: str,
optimization_objective: str,
transformations: Dict[str, Any],
split_spec: Dict[str, Any],
data_source: Dict[str, Any],
train_budget_milli_node_hours: float,
stage_1_num_parallel_trials: int = _DEFAULT_NUM_PARALLEL_TRAILS,
stage_2_num_parallel_trials: int = _DEFAULT_NUM_PARALLEL_TRAILS,
stage_2_num_selected_trials: int = _DEFAULT_STAGE_2_NUM_SELECTED_TRAILS,
weight_column_name: str = '',
study_spec_override: Optional[Dict[str, Any]] = None,
optimization_objective_recall_value: float = -1,
optimization_objective_precision_value: float = -1,
stage_1_tuner_worker_pool_specs_override: Optional[Dict[str, Any]] = None,
cv_trainer_worker_pool_specs_override: Optional[Dict[str, Any]] = None,
export_additional_model_without_custom_ops: bool = False,
stats_and_example_gen_dataflow_machine_type: str = 'n1-standard-16',
stats_and_example_gen_dataflow_max_num_workers: int = 25,
stats_and_example_gen_dataflow_disk_size_gb: int = 40,
transform_dataflow_machine_type: str = 'n1-standard-16',
transform_dataflow_max_num_workers: int = 25,
transform_dataflow_disk_size_gb: int = 40,
dataflow_subnetwork: str = '',
dataflow_use_public_ips: bool = True,
encryption_spec_key_name: str = '',
additional_experiments: Optional[Dict[str, Any]] = None,
distill_batch_predict_machine_type: str = 'n1-standard-16',
distill_batch_predict_starting_replica_count: int = 25,
distill_batch_predict_max_replica_count: int = 25,
) -> Tuple[str, Dict[str, Any]]:
"""Get the AutoML Tabular training pipeline that distill and skips evaluation.
Args:
project: The GCP project that runs the pipeline components.
location: The GCP region that runs the pipeline components.
root_dir: The root GCS directory for the pipeline components.
target_column_name: The target column name.
prediction_type: The type of prediction the model is to produce.
"classification" or "regression".
optimization_objective: For binary classification, "maximize-au-roc",
"minimize-log-loss", "maximize-au-prc", "maximize-precision-at-recall", or
"maximize-recall-at-precision". For multi class classification,
"minimize-log-loss". For regression, "minimize-rmse", "minimize-mae", or
"minimize-rmsle".
transformations: The transformations to apply.
split_spec: The split spec.
data_source: The data source.
train_budget_milli_node_hours: The train budget of creating this model,
expressed in milli node hours i.e. 1,000 value in this field means 1 node
hour.
stage_1_num_parallel_trials: Number of parallel trails for stage 1.
stage_2_num_parallel_trials: Number of parallel trails for stage 2.
stage_2_num_selected_trials: Number of selected trials for stage 2.
weight_column_name: The weight column name.
study_spec_override: The dictionary for overriding study spec. The
dictionary should be of format
https://github.com/googleapis/googleapis/blob/4e836c7c257e3e20b1de14d470993a2b1f4736a8/google/cloud/aiplatform/v1beta1/study.proto#L181.
optimization_objective_recall_value: Required when optimization_objective is
"maximize-precision-at-recall". Must be between 0 and 1, inclusive.
optimization_objective_precision_value: Required when optimization_objective
is "maximize-recall-at-precision". Must be between 0 and 1, inclusive.
stage_1_tuner_worker_pool_specs_override: The dictionary for overriding.
stage 1 tuner worker pool spec. The dictionary should be of format
https://github.com/googleapis/googleapis/blob/4e836c7c257e3e20b1de14d470993a2b1f4736a8/google/cloud/aiplatform/v1beta1/custom_job.proto#L172.
cv_trainer_worker_pool_specs_override: The dictionary for overriding stage
cv trainer worker pool spec. The dictionary should be of format
https://github.com/googleapis/googleapis/blob/4e836c7c257e3e20b1de14d470993a2b1f4736a8/google/cloud/aiplatform/v1beta1/custom_job.proto#L172.
export_additional_model_without_custom_ops: Whether to export additional
model without custom TensorFlow operators.
stats_and_example_gen_dataflow_machine_type: The dataflow machine type for
stats_and_example_gen component.
stats_and_example_gen_dataflow_max_num_workers: The max number of Dataflow
workers for stats_and_example_gen component.
stats_and_example_gen_dataflow_disk_size_gb: Dataflow worker's disk size in
GB for stats_and_example_gen component.
transform_dataflow_machine_type: The dataflow machine type for transform
component.
transform_dataflow_max_num_workers: The max number of Dataflow workers for
transform component.
transform_dataflow_disk_size_gb: Dataflow worker's disk size in GB for
transform component.
dataflow_subnetwork: Dataflow's fully qualified subnetwork name, when empty
the default subnetwork will be used. Example:
https://cloud.google.com/dataflow/docs/guides/specifying-networks#example_network_and_subnetwork_specifications
dataflow_use_public_ips: Specifies whether Dataflow workers use public IP
addresses.
encryption_spec_key_name: The KMS key name.
additional_experiments: Use this field to config private preview features.
distill_batch_predict_machine_type: The prediction server machine type for
batch predict component in the model distillation.
distill_batch_predict_starting_replica_count: The initial number of
prediction server for batch predict component in the model distillation.
distill_batch_predict_max_replica_count: The max number of prediction server
for batch predict component in the model distillation.
Returns:
Tuple of pipeline_definition_path and parameter_values.
"""
warnings.warn(
'Depreciated. Please use get_automl_tabular_pipeline_and_parameters.'
)
return get_default_pipeline_and_parameters(
project=project,
location=location,
root_dir=root_dir,
target_column_name=target_column_name,
prediction_type=prediction_type,
optimization_objective=optimization_objective,
transformations=transformations,
split_spec=split_spec,
data_source=data_source,
train_budget_milli_node_hours=train_budget_milli_node_hours,
stage_1_num_parallel_trials=stage_1_num_parallel_trials,
stage_2_num_parallel_trials=stage_2_num_parallel_trials,
stage_2_num_selected_trials=stage_2_num_selected_trials,
weight_column_name=weight_column_name,
study_spec_override=study_spec_override,
optimization_objective_recall_value=optimization_objective_recall_value,
optimization_objective_precision_value=optimization_objective_precision_value,
stage_1_tuner_worker_pool_specs_override=stage_1_tuner_worker_pool_specs_override,
cv_trainer_worker_pool_specs_override=cv_trainer_worker_pool_specs_override,
export_additional_model_without_custom_ops=export_additional_model_without_custom_ops,
stats_and_example_gen_dataflow_machine_type=stats_and_example_gen_dataflow_machine_type,
stats_and_example_gen_dataflow_max_num_workers=stats_and_example_gen_dataflow_max_num_workers,
stats_and_example_gen_dataflow_disk_size_gb=stats_and_example_gen_dataflow_disk_size_gb,
transform_dataflow_machine_type=transform_dataflow_machine_type,
transform_dataflow_max_num_workers=transform_dataflow_max_num_workers,
transform_dataflow_disk_size_gb=transform_dataflow_disk_size_gb,
dataflow_subnetwork=dataflow_subnetwork,
dataflow_use_public_ips=dataflow_use_public_ips,
encryption_spec_key_name=encryption_spec_key_name,
additional_experiments=additional_experiments,
distill_batch_predict_machine_type=distill_batch_predict_machine_type,
distill_batch_predict_starting_replica_count=distill_batch_predict_starting_replica_count,
distill_batch_predict_max_replica_count=distill_batch_predict_max_replica_count,
run_evaluation=False,
run_distillation=True,
)
| 817 |
0 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/automl | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/automl/tabular/stats_and_example_gen.py | # Copyright 2023 The Kubeflow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""AutoML Stats and Example Generation component spec."""
from typing import Optional
from kfp import dsl
from kfp.dsl import Artifact
from kfp.dsl import Dataset
from kfp.dsl import Output
@dsl.container_component
def tabular_stats_and_example_gen(
project: str,
location: str,
root_dir: str,
target_column_name: str,
prediction_type: str,
transformations: str,
dataset_schema: Output[Artifact],
dataset_stats: Output[Artifact],
train_split: Output[Dataset],
eval_split: Output[Dataset],
test_split: Output[Dataset],
test_split_json: dsl.OutputPath(list),
downsampled_test_split_json: dsl.OutputPath(list),
instance_baseline: Output[Artifact],
metadata: Output[Artifact],
gcp_resources: dsl.OutputPath(str),
weight_column_name: Optional[str] = '',
optimization_objective: Optional[str] = '',
optimization_objective_recall_value: Optional[float] = -1,
optimization_objective_precision_value: Optional[float] = -1,
transformations_path: Optional[str] = '',
request_type: Optional[str] = 'COLUMN_STATS_ONLY',
dataflow_machine_type: Optional[str] = 'n1-standard-16',
dataflow_max_num_workers: Optional[int] = 25,
dataflow_disk_size_gb: Optional[int] = 40,
dataflow_subnetwork: Optional[str] = '',
dataflow_use_public_ips: Optional[bool] = True,
dataflow_service_account: Optional[str] = '',
encryption_spec_key_name: Optional[str] = '',
run_distillation: Optional[bool] = False,
additional_experiments: Optional[str] = '',
additional_experiments_json: Optional[dict] = {},
data_source_csv_filenames: Optional[str] = '',
data_source_bigquery_table_path: Optional[str] = '',
predefined_split_key: Optional[str] = '',
timestamp_split_key: Optional[str] = '',
stratified_split_key: Optional[str] = '',
training_fraction: Optional[float] = -1,
validation_fraction: Optional[float] = -1,
test_fraction: Optional[float] = -1,
quantiles: Optional[list] = [],
enable_probabilistic_inference: Optional[bool] = False,
):
# fmt: off
"""Generates stats and training instances for tabular data.
Args:
project: Project to run dataset statistics and example generation.
location: Location for running dataset statistics and example generation.
root_dir: The Cloud Storage location to store the output.
target_column_name: The target column name.
weight_column_name: The weight column name.
prediction_type: The prediction type. Supported values: "classification", "regression".
optimization_objective: Objective function the model is optimizing towards. The training process creates a model that maximizes/minimizes the value of the objective function over the validation set. The supported optimization objectives depend on the prediction type. If the field is not set, a default objective function is used. classification: "maximize-au-roc" (default) - Maximize the area under the receiver operating characteristic (ROC) curve. "minimize-log-loss" - Minimize log loss. "maximize-au-prc" - Maximize the area under the precision-recall curve. "maximize-precision-at-recall" - Maximize precision for a specified recall value. "maximize-recall-at-precision" - Maximize recall for a specified precision value. classification (multi-class): "minimize-log-loss" (default) - Minimize log loss. regression: "minimize-rmse" (default) - Minimize root-mean-squared error (RMSE). "minimize-mae" - Minimize mean-absolute error (MAE). "minimize-rmsle" - Minimize root-mean-squared log error (RMSLE).
optimization_objective_recall_value: Required when optimization_objective is "maximize-precision-at-recall". Must be between 0 and 1, inclusive.
optimization_objective_precision_value: Required when optimization_objective is "maximize-recall-at-precision". Must be between 0 and 1, inclusive.
transformations: Quote escaped JSON string for transformations. Each transformation will apply transform function to given input column. And the result will be used for training. When creating transformation for BigQuery Struct column, the column should be flattened using "." as the delimiter.
transformations_path: Path to a GCS file containing JSON string for transformations.
dataflow_machine_type: The machine type used for dataflow jobs. If not set, default to n1-standard-16.
dataflow_max_num_workers: The number of workers to run the dataflow job. If not set, default to 25.
dataflow_disk_size_gb: The disk size, in gigabytes, to use on each Dataflow worker instance. If not set, default to 40.
dataflow_subnetwork: Dataflow's fully qualified subnetwork name, when empty the default subnetwork will be used. More details: https://cloud.google.com/dataflow/docs/guides/specifying-networks#example_network_and_subnetwork_specifications
dataflow_use_public_ips: Specifies whether Dataflow workers use public IP addresses.
dataflow_service_account: Custom service account to run dataflow jobs.
encryption_spec_key_name: Customer-managed encryption key.
run_distillation: True if in distillation mode. The default value is false.
Returns:
dataset_schema: The schema of the dataset.
dataset_stats: The stats of the dataset.
train_split: The train split.
eval_split: The eval split.
test_split: The test split.
test_split_json: The test split JSON object.
downsampled_test_split_json: The downsampled test split JSON object.
instance_baseline: The instance baseline used to calculate explanations.
metadata: The tabular example gen metadata.
gcp_resources: GCP resources created by this component. For more details, see https://github.com/kubeflow/pipelines/blob/master/components/google-cloud/google_cloud_pipeline_components/proto/README.md.
"""
# fmt: on
return dsl.ContainerSpec(
image='gcr.io/ml-pipeline/google-cloud-pipeline-components:1.0.44',
command=[
'python3',
'-u',
'-m',
'google_cloud_pipeline_components.container.v1.custom_job.launcher',
],
args=[
'--type',
'CustomJob',
'--project',
project,
'--location',
location,
'--gcp_resources',
gcp_resources,
'--payload',
dsl.ConcatPlaceholder(
items=[
(
'{"display_name":'
f' "tabular-stats-and-example-gen-{dsl.PIPELINE_JOB_ID_PLACEHOLDER}-{dsl.PIPELINE_TASK_ID_PLACEHOLDER}",'
' "encryption_spec": {"kms_key_name":"'
),
encryption_spec_key_name,
(
'"}, "job_spec": {"worker_pool_specs": [{"replica_count":'
' 1, "machine_spec": {"machine_type": "n1-standard-8"},'
' "container_spec": {"image_uri":"'
),
'us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/training:20240808_0625',
'", "args": ["stats_generator",',
'"--train_spec={\\"prediction_type\\": \\"',
prediction_type,
'\\", \\"target_column\\": \\"',
target_column_name,
'\\", \\"optimization_objective\\": \\"',
optimization_objective,
'\\", \\"weight_column_name\\": \\"',
weight_column_name,
'\\", \\"transformations\\": ',
transformations,
', \\"quantiles\\": ',
quantiles,
', \\"enable_probabilistic_inference\\": ',
enable_probabilistic_inference,
'}", "--transformations_override_path=',
transformations_path,
'", "--data_source_csv_filenames=',
data_source_csv_filenames,
'", "--data_source_bigquery_table_path=',
data_source_bigquery_table_path,
'", "--predefined_split_key=',
predefined_split_key,
'", "--timestamp_split_key=',
timestamp_split_key,
'", "--stratified_split_key=',
stratified_split_key,
'", "--training_fraction=',
training_fraction,
'", "--validation_fraction=',
validation_fraction,
'", "--test_fraction=',
test_fraction,
'", "--target_column=',
target_column_name,
'", "--request_type=',
request_type,
'", "--optimization_objective_recall_value=',
optimization_objective_recall_value,
'", "--optimization_objective_precision_value=',
optimization_objective_precision_value,
'", "--example_gen_gcs_output_prefix=',
root_dir,
(
f'/{dsl.PIPELINE_JOB_ID_PLACEHOLDER}/{dsl.PIPELINE_TASK_ID_PLACEHOLDER}/example_gen_output",'
' "--dataset_stats_dir='
),
root_dir,
(
f'/{dsl.PIPELINE_JOB_ID_PLACEHOLDER}/{dsl.PIPELINE_TASK_ID_PLACEHOLDER}/stats/",'
' "--stats_result_path='
),
dataset_stats.uri,
'", "--dataset_schema_path=',
dataset_schema.uri,
(
f'", "--job_name=tabular-stats-and-example-gen-{dsl.PIPELINE_JOB_ID_PLACEHOLDER}-{dsl.PIPELINE_TASK_ID_PLACEHOLDER}'
),
'", "--dataflow_project=',
project,
'", "--error_file_path=',
root_dir,
(
f'/{dsl.PIPELINE_JOB_ID_PLACEHOLDER}/{dsl.PIPELINE_TASK_ID_PLACEHOLDER}/error.pb",'
' "--dataflow_staging_dir='
),
root_dir,
(
f'/{dsl.PIPELINE_JOB_ID_PLACEHOLDER}/{dsl.PIPELINE_TASK_ID_PLACEHOLDER}/dataflow_staging",'
' "--dataflow_tmp_dir='
),
root_dir,
(
f'/{dsl.PIPELINE_JOB_ID_PLACEHOLDER}/{dsl.PIPELINE_TASK_ID_PLACEHOLDER}/dataflow_tmp",'
' "--dataflow_max_num_workers='
),
dataflow_max_num_workers,
'", "--dataflow_worker_container_image=',
'us-docker.pkg.dev/vertex-ai/automl-tabular/dataflow-worker:20240808_0625',
'", "--dataflow_machine_type=',
dataflow_machine_type,
'", "--dataflow_disk_size_gb=',
dataflow_disk_size_gb,
'", "--dataflow_kms_key=',
encryption_spec_key_name,
'", "--dataflow_subnetwork_fully_qualified=',
dataflow_subnetwork,
'", "--dataflow_use_public_ips=',
dataflow_use_public_ips,
'", "--dataflow_service_account=',
dataflow_service_account,
'", "--is_distill=',
run_distillation,
'", "--additional_experiments=',
additional_experiments,
'", "--metadata_path=',
metadata.uri,
'", "--train_split=',
train_split.uri,
'", "--eval_split=',
eval_split.uri,
'", "--test_split=',
test_split.uri,
'", "--test_split_for_batch_prediction_component=',
test_split_json,
(
'", "--downsampled_test_split_for_batch_prediction_component='
),
downsampled_test_split_json,
'", "--instance_baseline_path=',
instance_baseline.uri,
'", "--lro_job_info=',
root_dir,
(
f'/{dsl.PIPELINE_JOB_ID_PLACEHOLDER}/lro",'
' "--gcp_resources_path='
),
gcp_resources,
(
'", "--parse_json=true",'
' "--generate_additional_downsample_test_split=true",'
' "--executor_input={{$.json_escape[1]}}"]}}]}}'
),
]
),
],
)
| 818 |
0 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/automl | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/automl/tabular/ensemble.py | # Copyright 2023 The Kubeflow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""AutoML Tabular Ensemble component spec."""
from typing import Optional
from google_cloud_pipeline_components.types.artifact_types import UnmanagedContainerModel
from kfp import dsl
from kfp.dsl import Artifact
from kfp.dsl import Dataset
from kfp.dsl import Input
from kfp.dsl import Output
@dsl.container_component
def automl_tabular_ensemble(
project: str,
location: str,
root_dir: str,
transform_output: Input[Artifact],
metadata: Input[Artifact],
dataset_schema: Input[Artifact],
tuning_result_input: Input[Artifact],
instance_baseline: Input[Artifact],
gcp_resources: dsl.OutputPath(str),
model_architecture: Output[Artifact],
model: Output[Artifact],
unmanaged_container_model: Output[UnmanagedContainerModel],
model_without_custom_ops: Output[Artifact],
explanation_metadata: dsl.OutputPath(dict),
explanation_metadata_artifact: Output[Artifact],
explanation_parameters: dsl.OutputPath(dict),
warmup_data: Optional[Input[Dataset]] = None,
encryption_spec_key_name: Optional[str] = '',
export_additional_model_without_custom_ops: Optional[bool] = False,
):
# fmt: off
"""Ensembles AutoML Tabular models.
Args:
project: Project to run Cross-validation trainer.
location: Location for running the Cross-validation trainer.
root_dir: The Cloud Storage location to store the output.
transform_output: The transform output artifact.
metadata: The tabular example gen metadata.
dataset_schema: The schema of the dataset.
tuning_result_input: AutoML Tabular tuning result.
instance_baseline: The instance baseline used to calculate explanations.
warmup_data: The warm up data. Ensemble component will save the warm up data together with the model artifact, used to warm up the model when prediction server starts.
encryption_spec_key_name: Customer-managed encryption key.
export_additional_model_without_custom_ops: True if export an additional model without custom TF operators to the `model_without_custom_ops` output.
Returns:
gcp_resources: GCP resources created by this component. For more details, see https://github.com/kubeflow/pipelines/blob/master/components/google-cloud/google_cloud_pipeline_components/proto/README.md.
model_architecture: The architecture of the output model.
model: The output model.
model_without_custom_ops: The output model without custom TF operators, this output will be empty unless `export_additional_model_without_custom_ops` is set.
model_uri: The URI of the output model.
instance_schema_uri: The URI of the instance schema.
prediction_schema_uri: The URI of the prediction schema.
explanation_metadata: The explanation metadata used by Vertex online and batch explanations.
explanation_metadata: The explanation parameters used by Vertex online and batch explanations.
"""
# fmt: on
return dsl.ContainerSpec(
image='gcr.io/ml-pipeline/google-cloud-pipeline-components:1.0.44',
command=[
'python3',
'-u',
'-m',
'google_cloud_pipeline_components.container.v1.custom_job.launcher',
],
args=[
'--type',
'CustomJob',
'--project',
project,
'--location',
location,
'--gcp_resources',
gcp_resources,
'--payload',
dsl.ConcatPlaceholder(
items=[
(
'{"display_name":'
f' "automl-tabular-ensemble-{dsl.PIPELINE_JOB_ID_PLACEHOLDER}-{dsl.PIPELINE_TASK_ID_PLACEHOLDER}",'
' "encryption_spec": {"kms_key_name":"'
),
encryption_spec_key_name,
(
'"}, "job_spec": {"worker_pool_specs": [{"replica_count":'
' 1, "machine_spec": {"machine_type": "n1-highmem-8"},'
' "container_spec": {"image_uri":"'
),
'us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/training:20240808_0625',
'", "args": ["ensemble", "--transform_output_path=',
transform_output.uri,
'", "--model_output_path=',
root_dir,
(
f'/{dsl.PIPELINE_JOB_ID_PLACEHOLDER}/{dsl.PIPELINE_TASK_ID_PLACEHOLDER}/model",'
' "--custom_model_output_path='
),
root_dir,
(
f'/{dsl.PIPELINE_JOB_ID_PLACEHOLDER}/{dsl.PIPELINE_TASK_ID_PLACEHOLDER}/custom_model",'
' "--error_file_path='
),
root_dir,
(
f'/{dsl.PIPELINE_JOB_ID_PLACEHOLDER}/{dsl.PIPELINE_TASK_ID_PLACEHOLDER}/error.pb",'
' "--export_custom_model='
),
export_additional_model_without_custom_ops,
'", "--metadata_path=',
metadata.uri,
'", "--dataset_schema_path=',
dataset_schema.uri,
'", "--tuning_result_input_path=',
tuning_result_input.uri,
'", "--instance_baseline_path=',
instance_baseline.uri,
'", "--warmup_data=',
warmup_data.uri,
'", "--prediction_docker_uri=',
'us-docker.pkg.dev/vertex-ai/automl-tabular/prediction-server:20240808_0625',
'", "--model_path=',
model.uri,
'", "--custom_model_path=',
model_without_custom_ops.uri,
'", "--explanation_metadata_path=',
explanation_metadata,
',',
explanation_metadata_artifact.uri,
'", "--explanation_parameters_path=',
explanation_parameters,
'", "--model_architecture_path=',
model_architecture.uri,
(
'", "--use_json=true",'
' "--executor_input={{$.json_escape[1]}}"]}}]}}'
),
]
),
],
)
| 819 |
0 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/automl | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/automl/tabular/transform.py | # Copyright 2023 The Kubeflow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""AutoML Transform component spec."""
from typing import Optional
from kfp import dsl
from kfp.dsl import Artifact
from kfp.dsl import Dataset
from kfp.dsl import Input
from kfp.dsl import Output
@dsl.container_component
def automl_tabular_transform(
project: str,
location: str,
root_dir: str,
metadata: Input[Artifact],
dataset_schema: Input[Artifact],
train_split: Input[Dataset],
eval_split: Input[Dataset],
test_split: Input[Dataset],
materialized_train_split: Output[Artifact],
materialized_eval_split: Output[Artifact],
materialized_test_split: Output[Artifact],
training_schema_uri: Output[Artifact],
transform_output: Output[Artifact],
gcp_resources: dsl.OutputPath(str),
dataflow_machine_type: Optional[str] = 'n1-standard-16',
dataflow_max_num_workers: Optional[int] = 25,
dataflow_disk_size_gb: Optional[int] = 40,
dataflow_subnetwork: Optional[str] = '',
dataflow_use_public_ips: Optional[bool] = True,
dataflow_service_account: Optional[str] = '',
encryption_spec_key_name: Optional[str] = '',
):
# fmt: off
"""Transforms raw features to engineered features.
Args:
project: Project to run Cross-validation trainer.
location: Location for running the Cross-validation trainer.
root_dir: The Cloud Storage location to store the output.
metadata: The tabular example gen metadata.
dataset_schema: The schema of the dataset.
train_split: The train split.
eval_split: The eval split.
test_split: The test split.
dataflow_machine_type: The machine type used for dataflow jobs. If not set, default to n1-standard-16.
dataflow_max_num_workers: The number of workers to run the dataflow job. If not set, default to 25.
dataflow_disk_size_gb: The disk size, in gigabytes, to use on each Dataflow worker instance. If not set, default to 40.
dataflow_subnetwork: Dataflow's fully qualified subnetwork name, when empty the default subnetwork will be used. More details: https://cloud.google.com/dataflow/docs/guides/specifying-networks#example_network_and_subnetwork_specifications
dataflow_use_public_ips: Specifies whether Dataflow workers use public IP addresses.
dataflow_service_account: Custom service account to run dataflow jobs.
encryption_spec_key_name: Customer-managed encryption key.
Returns:
materialized_train_split: The materialized train split.
materialized_eval_split: The materialized eval split.
materialized_eval_split: The materialized test split.
training_schema_uri: The training schema.
transform_output: The transform output artifact.
gcp_resources: GCP resources created by this component. For more details, see https://github.com/kubeflow/pipelines/blob/master/components/google-cloud/google_cloud_pipeline_components/proto/README.md.
"""
# fmt: on
return dsl.ContainerSpec(
image='gcr.io/ml-pipeline/google-cloud-pipeline-components:1.0.44',
command=[
'python3',
'-u',
'-m',
'google_cloud_pipeline_components.container.v1.custom_job.launcher',
],
args=[
'--type',
'CustomJob',
'--project',
project,
'--location',
location,
'--gcp_resources',
gcp_resources,
'--payload',
dsl.ConcatPlaceholder(
items=[
(
'{"display_name":'
f' "automl-tabular-transform-{dsl.PIPELINE_JOB_ID_PLACEHOLDER}-{dsl.PIPELINE_TASK_ID_PLACEHOLDER}",'
' "encryption_spec": {"kms_key_name":"'
),
encryption_spec_key_name,
(
'"}, "job_spec": {"worker_pool_specs": [{"replica_count":'
' 1, "machine_spec": {"machine_type": "n1-standard-8"},'
' "container_spec": {"image_uri":"'
),
'us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/training:20240808_0625',
(
'", "args": ["transform", "--is_mp=true",'
' "--transform_output_artifact_path='
),
transform_output.uri,
'", "--transform_output_path=',
root_dir,
(
f'/{dsl.PIPELINE_JOB_ID_PLACEHOLDER}/{dsl.PIPELINE_TASK_ID_PLACEHOLDER}/transform",'
' "--materialized_splits_output_path='
),
root_dir,
(
f'/{dsl.PIPELINE_JOB_ID_PLACEHOLDER}/{dsl.PIPELINE_TASK_ID_PLACEHOLDER}/transform_materialized",'
' "--metadata_path='
),
metadata.uri,
'", "--dataset_schema_path=',
dataset_schema.uri,
'", "--train_split=',
train_split.uri,
'", "--eval_split=',
eval_split.uri,
'", "--test_split=',
test_split.uri,
'", "--materialized_train_split=',
materialized_train_split.uri,
'", "--materialized_eval_split=',
materialized_eval_split.uri,
'", "--materialized_test_split=',
materialized_test_split.uri,
'", "--training_schema_path=',
training_schema_uri.uri,
(
f'", "--job_name=automl-tabular-transform-{dsl.PIPELINE_JOB_ID_PLACEHOLDER}-{dsl.PIPELINE_TASK_ID_PLACEHOLDER}'
),
'", "--dataflow_project=',
project,
'", "--error_file_path=',
root_dir,
(
f'/{dsl.PIPELINE_JOB_ID_PLACEHOLDER}/{dsl.PIPELINE_TASK_ID_PLACEHOLDER}/error.pb",'
' "--dataflow_staging_dir='
),
root_dir,
(
f'/{dsl.PIPELINE_JOB_ID_PLACEHOLDER}/{dsl.PIPELINE_TASK_ID_PLACEHOLDER}/dataflow_staging",'
' "--dataflow_tmp_dir='
),
root_dir,
(
f'/{dsl.PIPELINE_JOB_ID_PLACEHOLDER}/{dsl.PIPELINE_TASK_ID_PLACEHOLDER}/dataflow_tmp",'
' "--dataflow_max_num_workers='
),
dataflow_max_num_workers,
'", "--dataflow_machine_type=',
dataflow_machine_type,
'", "--dataflow_worker_container_image=',
'us-docker.pkg.dev/vertex-ai/automl-tabular/dataflow-worker:20240808_0625',
'", "--dataflow_disk_size_gb=',
dataflow_disk_size_gb,
'", "--dataflow_subnetwork_fully_qualified=',
dataflow_subnetwork,
'", "--dataflow_use_public_ips=',
dataflow_use_public_ips,
'", "--dataflow_kms_key=',
encryption_spec_key_name,
'", "--dataflow_service_account=',
dataflow_service_account,
'", "--lro_job_info=',
root_dir,
(
f'/{dsl.PIPELINE_JOB_ID_PLACEHOLDER}/lro",'
' "--gcp_resources_path='
),
gcp_resources,
'"]}}]}}',
]
),
],
)
| 820 |
0 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/automl/tabular | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/automl/tabular/deprecated/default_pipeline.json | {
"pipelineSpec": {
"components": {
"comp-automl-tabular-cv-trainer": {
"executorLabel": "exec-automl-tabular-cv-trainer",
"inputDefinitions": {
"artifacts": {
"materialized_cv_splits": {
"artifactType": {
"schemaTitle": "system.Artifact",
"schemaVersion": "0.0.1"
}
},
"metadata": {
"artifactType": {
"schemaTitle": "system.Artifact",
"schemaVersion": "0.0.1"
}
},
"transform_output": {
"artifactType": {
"schemaTitle": "system.Artifact",
"schemaVersion": "0.0.1"
}
},
"tuning_result_input": {
"artifactType": {
"schemaTitle": "system.Artifact",
"schemaVersion": "0.0.1"
}
}
},
"parameters": {
"deadline_hours": {
"type": "DOUBLE"
},
"encryption_spec_key_name": {
"type": "STRING"
},
"location": {
"type": "STRING"
},
"num_parallel_trials": {
"type": "INT"
},
"num_selected_trials": {
"type": "INT"
},
"project": {
"type": "STRING"
},
"root_dir": {
"type": "STRING"
},
"single_run_max_secs": {
"type": "INT"
},
"worker_pool_specs_override": {
"type": "STRING"
},
"worker_pool_specs_override_json": {
"type": "STRING"
}
}
},
"outputDefinitions": {
"artifacts": {
"tuning_result_output": {
"artifactType": {
"schemaTitle": "system.Artifact",
"schemaVersion": "0.0.1"
}
}
},
"parameters": {
"gcp_resources": {
"type": "STRING"
}
}
}
},
"comp-automl-tabular-ensemble": {
"executorLabel": "exec-automl-tabular-ensemble",
"inputDefinitions": {
"artifacts": {
"dataset_schema": {
"artifactType": {
"schemaTitle": "system.Artifact",
"schemaVersion": "0.0.1"
}
},
"instance_baseline": {
"artifactType": {
"schemaTitle": "system.Artifact",
"schemaVersion": "0.0.1"
}
},
"metadata": {
"artifactType": {
"schemaTitle": "system.Artifact",
"schemaVersion": "0.0.1"
}
},
"transform_output": {
"artifactType": {
"schemaTitle": "system.Artifact",
"schemaVersion": "0.0.1"
}
},
"tuning_result_input": {
"artifactType": {
"schemaTitle": "system.Artifact",
"schemaVersion": "0.0.1"
}
},
"warmup_data": {
"artifactType": {
"schemaTitle": "system.Dataset",
"schemaVersion": "0.0.1"
}
}
},
"parameters": {
"encryption_spec_key_name": {
"type": "STRING"
},
"export_additional_model_without_custom_ops": {
"type": "STRING"
},
"location": {
"type": "STRING"
},
"project": {
"type": "STRING"
},
"root_dir": {
"type": "STRING"
}
}
},
"outputDefinitions": {
"artifacts": {
"explanation_metadata_artifact": {
"artifactType": {
"schemaTitle": "system.Artifact",
"schemaVersion": "0.0.1"
}
},
"model": {
"artifactType": {
"schemaTitle": "system.Artifact",
"schemaVersion": "0.0.1"
}
},
"model_architecture": {
"artifactType": {
"schemaTitle": "system.Artifact",
"schemaVersion": "0.0.1"
}
},
"model_without_custom_ops": {
"artifactType": {
"schemaTitle": "system.Artifact",
"schemaVersion": "0.0.1"
}
},
"unmanaged_container_model": {
"artifactType": {
"schemaTitle": "google.UnmanagedContainerModel",
"schemaVersion": "0.0.1"
}
}
},
"parameters": {
"explanation_metadata": {
"type": "STRING"
},
"explanation_parameters": {
"type": "STRING"
},
"gcp_resources": {
"type": "STRING"
}
}
}
},
"comp-automl-tabular-ensemble-2": {
"executorLabel": "exec-automl-tabular-ensemble-2",
"inputDefinitions": {
"artifacts": {
"dataset_schema": {
"artifactType": {
"schemaTitle": "system.Artifact",
"schemaVersion": "0.0.1"
}
},
"instance_baseline": {
"artifactType": {
"schemaTitle": "system.Artifact",
"schemaVersion": "0.0.1"
}
},
"metadata": {
"artifactType": {
"schemaTitle": "system.Artifact",
"schemaVersion": "0.0.1"
}
},
"transform_output": {
"artifactType": {
"schemaTitle": "system.Artifact",
"schemaVersion": "0.0.1"
}
},
"tuning_result_input": {
"artifactType": {
"schemaTitle": "system.Artifact",
"schemaVersion": "0.0.1"
}
},
"warmup_data": {
"artifactType": {
"schemaTitle": "system.Dataset",
"schemaVersion": "0.0.1"
}
}
},
"parameters": {
"encryption_spec_key_name": {
"type": "STRING"
},
"export_additional_model_without_custom_ops": {
"type": "STRING"
},
"location": {
"type": "STRING"
},
"project": {
"type": "STRING"
},
"root_dir": {
"type": "STRING"
}
}
},
"outputDefinitions": {
"artifacts": {
"explanation_metadata_artifact": {
"artifactType": {
"schemaTitle": "system.Artifact",
"schemaVersion": "0.0.1"
}
},
"model": {
"artifactType": {
"schemaTitle": "system.Artifact",
"schemaVersion": "0.0.1"
}
},
"model_architecture": {
"artifactType": {
"schemaTitle": "system.Artifact",
"schemaVersion": "0.0.1"
}
},
"model_without_custom_ops": {
"artifactType": {
"schemaTitle": "system.Artifact",
"schemaVersion": "0.0.1"
}
},
"unmanaged_container_model": {
"artifactType": {
"schemaTitle": "google.UnmanagedContainerModel",
"schemaVersion": "0.0.1"
}
}
},
"parameters": {
"explanation_metadata": {
"type": "STRING"
},
"explanation_parameters": {
"type": "STRING"
},
"gcp_resources": {
"type": "STRING"
}
}
}
},
"comp-automl-tabular-finalizer": {
"executorLabel": "exec-automl-tabular-finalizer",
"inputDefinitions": {
"parameters": {
"encryption_spec_key_name": {
"type": "STRING"
},
"location": {
"type": "STRING"
},
"project": {
"type": "STRING"
},
"root_dir": {
"type": "STRING"
}
}
},
"outputDefinitions": {
"parameters": {
"gcp_resources": {
"type": "STRING"
}
}
}
},
"comp-automl-tabular-infra-validator": {
"executorLabel": "exec-automl-tabular-infra-validator",
"inputDefinitions": {
"artifacts": {
"unmanaged_container_model": {
"artifactType": {
"schemaTitle": "google.UnmanagedContainerModel",
"schemaVersion": "0.0.1"
}
}
}
}
},
"comp-automl-tabular-infra-validator-2": {
"executorLabel": "exec-automl-tabular-infra-validator-2",
"inputDefinitions": {
"artifacts": {
"unmanaged_container_model": {
"artifactType": {
"schemaTitle": "google.UnmanagedContainerModel",
"schemaVersion": "0.0.1"
}
}
}
}
},
"comp-automl-tabular-stage-1-tuner": {
"executorLabel": "exec-automl-tabular-stage-1-tuner",
"inputDefinitions": {
"artifacts": {
"materialized_eval_split": {
"artifactType": {
"schemaTitle": "system.Artifact",
"schemaVersion": "0.0.1"
}
},
"materialized_train_split": {
"artifactType": {
"schemaTitle": "system.Artifact",
"schemaVersion": "0.0.1"
}
},
"metadata": {
"artifactType": {
"schemaTitle": "system.Artifact",
"schemaVersion": "0.0.1"
}
},
"transform_output": {
"artifactType": {
"schemaTitle": "system.Artifact",
"schemaVersion": "0.0.1"
}
}
},
"parameters": {
"deadline_hours": {
"type": "DOUBLE"
},
"disable_early_stopping": {
"type": "STRING"
},
"encryption_spec_key_name": {
"type": "STRING"
},
"location": {
"type": "STRING"
},
"num_parallel_trials": {
"type": "INT"
},
"num_selected_trials": {
"type": "INT"
},
"project": {
"type": "STRING"
},
"reduce_search_space_mode": {
"type": "STRING"
},
"root_dir": {
"type": "STRING"
},
"run_distillation": {
"type": "STRING"
},
"single_run_max_secs": {
"type": "INT"
},
"study_spec_override": {
"type": "STRING"
},
"study_spec_parameters_override": {
"type": "STRING"
},
"study_spec_parameters_override_json": {
"type": "STRING"
},
"tune_feature_selection_rate": {
"type": "STRING"
},
"worker_pool_specs_override": {
"type": "STRING"
},
"worker_pool_specs_override_json": {
"type": "STRING"
}
}
},
"outputDefinitions": {
"artifacts": {
"tuning_result_output": {
"artifactType": {
"schemaTitle": "system.Artifact",
"schemaVersion": "0.0.1"
}
}
},
"parameters": {
"gcp_resources": {
"type": "STRING"
}
}
}
},
"comp-automl-tabular-stage-1-tuner-2": {
"executorLabel": "exec-automl-tabular-stage-1-tuner-2",
"inputDefinitions": {
"artifacts": {
"materialized_eval_split": {
"artifactType": {
"schemaTitle": "system.Artifact",
"schemaVersion": "0.0.1"
}
},
"materialized_train_split": {
"artifactType": {
"schemaTitle": "system.Artifact",
"schemaVersion": "0.0.1"
}
},
"metadata": {
"artifactType": {
"schemaTitle": "system.Artifact",
"schemaVersion": "0.0.1"
}
},
"transform_output": {
"artifactType": {
"schemaTitle": "system.Artifact",
"schemaVersion": "0.0.1"
}
}
},
"parameters": {
"deadline_hours": {
"type": "DOUBLE"
},
"disable_early_stopping": {
"type": "STRING"
},
"encryption_spec_key_name": {
"type": "STRING"
},
"location": {
"type": "STRING"
},
"num_parallel_trials": {
"type": "INT"
},
"num_selected_trials": {
"type": "INT"
},
"project": {
"type": "STRING"
},
"reduce_search_space_mode": {
"type": "STRING"
},
"root_dir": {
"type": "STRING"
},
"run_distillation": {
"type": "STRING"
},
"single_run_max_secs": {
"type": "INT"
},
"study_spec_override": {
"type": "STRING"
},
"study_spec_parameters_override": {
"type": "STRING"
},
"study_spec_parameters_override_json": {
"type": "STRING"
},
"tune_feature_selection_rate": {
"type": "STRING"
},
"worker_pool_specs_override": {
"type": "STRING"
},
"worker_pool_specs_override_json": {
"type": "STRING"
}
}
},
"outputDefinitions": {
"artifacts": {
"tuning_result_output": {
"artifactType": {
"schemaTitle": "system.Artifact",
"schemaVersion": "0.0.1"
}
}
},
"parameters": {
"gcp_resources": {
"type": "STRING"
}
}
}
},
"comp-automl-tabular-transform": {
"executorLabel": "exec-automl-tabular-transform",
"inputDefinitions": {
"artifacts": {
"dataset_schema": {
"artifactType": {
"schemaTitle": "system.Artifact",
"schemaVersion": "0.0.1"
}
},
"eval_split": {
"artifactType": {
"schemaTitle": "system.Dataset",
"schemaVersion": "0.0.1"
}
},
"metadata": {
"artifactType": {
"schemaTitle": "system.Artifact",
"schemaVersion": "0.0.1"
}
},
"test_split": {
"artifactType": {
"schemaTitle": "system.Dataset",
"schemaVersion": "0.0.1"
}
},
"train_split": {
"artifactType": {
"schemaTitle": "system.Dataset",
"schemaVersion": "0.0.1"
}
}
},
"parameters": {
"dataflow_disk_size_gb": {
"type": "INT"
},
"dataflow_machine_type": {
"type": "STRING"
},
"dataflow_max_num_workers": {
"type": "INT"
},
"dataflow_service_account": {
"type": "STRING"
},
"dataflow_subnetwork": {
"type": "STRING"
},
"dataflow_use_public_ips": {
"type": "STRING"
},
"encryption_spec_key_name": {
"type": "STRING"
},
"location": {
"type": "STRING"
},
"project": {
"type": "STRING"
},
"root_dir": {
"type": "STRING"
}
}
},
"outputDefinitions": {
"artifacts": {
"materialized_eval_split": {
"artifactType": {
"schemaTitle": "system.Artifact",
"schemaVersion": "0.0.1"
}
},
"materialized_test_split": {
"artifactType": {
"schemaTitle": "system.Artifact",
"schemaVersion": "0.0.1"
}
},
"materialized_train_split": {
"artifactType": {
"schemaTitle": "system.Artifact",
"schemaVersion": "0.0.1"
}
},
"training_schema_uri": {
"artifactType": {
"schemaTitle": "system.Artifact",
"schemaVersion": "0.0.1"
}
},
"transform_output": {
"artifactType": {
"schemaTitle": "system.Artifact",
"schemaVersion": "0.0.1"
}
}
},
"parameters": {
"gcp_resources": {
"type": "STRING"
}
}
}
},
"comp-automl-tabular-transform-2": {
"executorLabel": "exec-automl-tabular-transform-2",
"inputDefinitions": {
"artifacts": {
"dataset_schema": {
"artifactType": {
"schemaTitle": "system.Artifact",
"schemaVersion": "0.0.1"
}
},
"eval_split": {
"artifactType": {
"schemaTitle": "system.Dataset",
"schemaVersion": "0.0.1"
}
},
"metadata": {
"artifactType": {
"schemaTitle": "system.Artifact",
"schemaVersion": "0.0.1"
}
},
"test_split": {
"artifactType": {
"schemaTitle": "system.Dataset",
"schemaVersion": "0.0.1"
}
},
"train_split": {
"artifactType": {
"schemaTitle": "system.Dataset",
"schemaVersion": "0.0.1"
}
}
},
"parameters": {
"dataflow_disk_size_gb": {
"type": "INT"
},
"dataflow_machine_type": {
"type": "STRING"
},
"dataflow_max_num_workers": {
"type": "INT"
},
"dataflow_service_account": {
"type": "STRING"
},
"dataflow_subnetwork": {
"type": "STRING"
},
"dataflow_use_public_ips": {
"type": "STRING"
},
"encryption_spec_key_name": {
"type": "STRING"
},
"location": {
"type": "STRING"
},
"project": {
"type": "STRING"
},
"root_dir": {
"type": "STRING"
}
}
},
"outputDefinitions": {
"artifacts": {
"materialized_eval_split": {
"artifactType": {
"schemaTitle": "system.Artifact",
"schemaVersion": "0.0.1"
}
},
"materialized_test_split": {
"artifactType": {
"schemaTitle": "system.Artifact",
"schemaVersion": "0.0.1"
}
},
"materialized_train_split": {
"artifactType": {
"schemaTitle": "system.Artifact",
"schemaVersion": "0.0.1"
}
},
"training_schema_uri": {
"artifactType": {
"schemaTitle": "system.Artifact",
"schemaVersion": "0.0.1"
}
},
"transform_output": {
"artifactType": {
"schemaTitle": "system.Artifact",
"schemaVersion": "0.0.1"
}
}
},
"parameters": {
"gcp_resources": {
"type": "STRING"
}
}
}
},
"comp-bool-identity": {
"executorLabel": "exec-bool-identity",
"inputDefinitions": {
"parameters": {
"value": {
"type": "STRING"
}
}
},
"outputDefinitions": {
"parameters": {
"Output": {
"type": "STRING"
}
}
}
},
"comp-bool-identity-2": {
"executorLabel": "exec-bool-identity-2",
"inputDefinitions": {
"parameters": {
"value": {
"type": "STRING"
}
}
},
"outputDefinitions": {
"parameters": {
"Output": {
"type": "STRING"
}
}
}
},
"comp-condition-is-distill-4": {
"dag": {
"outputs": {
"artifacts": {
"model-evaluation-3-evaluation_metrics": {
"artifactSelectors": [
{
"outputArtifactKey": "model-evaluation-3-evaluation_metrics",
"producerSubtask": "condition-is-evaluation-5"
}
]
},
"model-evaluation-4-evaluation_metrics": {
"artifactSelectors": [
{
"outputArtifactKey": "model-evaluation-4-evaluation_metrics",
"producerSubtask": "condition-is-evaluation-5"
}
]
}
}
},
"tasks": {
"automl-tabular-ensemble-2": {
"cachingOptions": {
"enableCache": true
},
"componentRef": {
"name": "comp-automl-tabular-ensemble-2"
},
"dependentTasks": [
"automl-tabular-stage-1-tuner-2",
"automl-tabular-transform-2"
],
"inputs": {
"artifacts": {
"dataset_schema": {
"componentInputArtifact": "pipelineparam--tabular-stats-and-example-gen-dataset_schema"
},
"instance_baseline": {
"componentInputArtifact": "pipelineparam--tabular-stats-and-example-gen-instance_baseline"
},
"metadata": {
"componentInputArtifact": "pipelineparam--tabular-stats-and-example-gen-metadata"
},
"transform_output": {
"taskOutputArtifact": {
"outputArtifactKey": "transform_output",
"producerTask": "automl-tabular-transform-2"
}
},
"tuning_result_input": {
"taskOutputArtifact": {
"outputArtifactKey": "tuning_result_output",
"producerTask": "automl-tabular-stage-1-tuner-2"
}
},
"warmup_data": {
"componentInputArtifact": "pipelineparam--tabular-stats-and-example-gen-eval_split"
}
},
"parameters": {
"encryption_spec_key_name": {
"componentInputParameter": "pipelineparam--encryption_spec_key_name"
},
"export_additional_model_without_custom_ops": {
"componentInputParameter": "pipelineparam--export_additional_model_without_custom_ops"
},
"location": {
"componentInputParameter": "pipelineparam--location"
},
"project": {
"componentInputParameter": "pipelineparam--project"
},
"root_dir": {
"componentInputParameter": "pipelineparam--root_dir"
}
}
},
"taskInfo": {
"name": "automl-tabular-ensemble-2"
}
},
"automl-tabular-infra-validator-2": {
"cachingOptions": {
"enableCache": true
},
"componentRef": {
"name": "comp-automl-tabular-infra-validator-2"
},
"dependentTasks": [
"automl-tabular-ensemble-2"
],
"inputs": {
"artifacts": {
"unmanaged_container_model": {
"taskOutputArtifact": {
"outputArtifactKey": "unmanaged_container_model",
"producerTask": "automl-tabular-ensemble-2"
}
}
}
},
"taskInfo": {
"name": "automl-tabular-infra-validator-2"
}
},
"automl-tabular-stage-1-tuner-2": {
"cachingOptions": {
"enableCache": true
},
"componentRef": {
"name": "comp-automl-tabular-stage-1-tuner-2"
},
"dependentTasks": [
"automl-tabular-transform-2"
],
"inputs": {
"artifacts": {
"materialized_eval_split": {
"taskOutputArtifact": {
"outputArtifactKey": "materialized_eval_split",
"producerTask": "automl-tabular-transform-2"
}
},
"materialized_train_split": {
"taskOutputArtifact": {
"outputArtifactKey": "materialized_train_split",
"producerTask": "automl-tabular-transform-2"
}
},
"metadata": {
"componentInputArtifact": "pipelineparam--tabular-stats-and-example-gen-metadata"
},
"transform_output": {
"taskOutputArtifact": {
"outputArtifactKey": "transform_output",
"producerTask": "automl-tabular-transform-2"
}
}
},
"parameters": {
"deadline_hours": {
"componentInputParameter": "pipelineparam--distill_stage_1_deadline_hours"
},
"disable_early_stopping": {
"componentInputParameter": "pipelineparam--disable_early_stopping"
},
"encryption_spec_key_name": {
"componentInputParameter": "pipelineparam--encryption_spec_key_name"
},
"location": {
"componentInputParameter": "pipelineparam--location"
},
"num_parallel_trials": {
"componentInputParameter": "pipelineparam--stage_1_num_parallel_trials"
},
"num_selected_trials": {
"runtimeValue": {
"constantValue": {
"intValue": "1"
}
}
},
"project": {
"componentInputParameter": "pipelineparam--project"
},
"reduce_search_space_mode": {
"componentInputParameter": "pipelineparam--reduce_search_space_mode"
},
"root_dir": {
"componentInputParameter": "pipelineparam--root_dir"
},
"run_distillation": {
"runtimeValue": {
"constantValue": {
"intValue": "1"
}
}
},
"single_run_max_secs": {
"componentInputParameter": "pipelineparam--stage_1_single_run_max_secs"
},
"study_spec_override": {
"runtimeValue": {
"constantValue": {
"stringValue": ""
}
}
},
"study_spec_parameters_override": {
"runtimeValue": {
"constantValue": {
"stringValue": "[]"
}
}
},
"study_spec_parameters_override_json": {
"runtimeValue": {
"constantValue": {
"stringValue": "{}"
}
}
},
"tune_feature_selection_rate": {
"runtimeValue": {
"constantValue": {
"stringValue": "false"
}
}
},
"worker_pool_specs_override": {
"componentInputParameter": "pipelineparam--stage_1_tuner_worker_pool_specs_override"
},
"worker_pool_specs_override_json": {
"runtimeValue": {
"constantValue": {
"stringValue": "[]"
}
}
}
}
},
"taskInfo": {
"name": "automl-tabular-stage-1-tuner-2"
}
},
"automl-tabular-transform-2": {
"cachingOptions": {
"enableCache": true
},
"componentRef": {
"name": "comp-automl-tabular-transform-2"
},
"dependentTasks": [
"write-bp-result-path",
"write-bp-result-path-2"
],
"inputs": {
"artifacts": {
"dataset_schema": {
"componentInputArtifact": "pipelineparam--tabular-stats-and-example-gen-dataset_schema"
},
"eval_split": {
"taskOutputArtifact": {
"outputArtifactKey": "result",
"producerTask": "write-bp-result-path-2"
}
},
"metadata": {
"componentInputArtifact": "pipelineparam--tabular-stats-and-example-gen-metadata"
},
"test_split": {
"componentInputArtifact": "pipelineparam--tabular-stats-and-example-gen-test_split"
},
"train_split": {
"taskOutputArtifact": {
"outputArtifactKey": "result",
"producerTask": "write-bp-result-path"
}
}
},
"parameters": {
"dataflow_disk_size_gb": {
"componentInputParameter": "pipelineparam--transform_dataflow_disk_size_gb"
},
"dataflow_machine_type": {
"componentInputParameter": "pipelineparam--transform_dataflow_machine_type"
},
"dataflow_max_num_workers": {
"componentInputParameter": "pipelineparam--transform_dataflow_max_num_workers"
},
"dataflow_service_account": {
"runtimeValue": {
"constantValue": {
"stringValue": ""
}
}
},
"dataflow_subnetwork": {
"runtimeValue": {
"constantValue": {
"stringValue": ""
}
}
},
"dataflow_use_public_ips": {
"runtimeValue": {
"constantValue": {
"stringValue": "true"
}
}
},
"encryption_spec_key_name": {
"componentInputParameter": "pipelineparam--encryption_spec_key_name"
},
"location": {
"componentInputParameter": "pipelineparam--location"
},
"project": {
"componentInputParameter": "pipelineparam--project"
},
"root_dir": {
"componentInputParameter": "pipelineparam--root_dir"
}
}
},
"taskInfo": {
"name": "automl-tabular-transform-2"
}
},
"condition-is-evaluation-5": {
"componentRef": {
"name": "comp-condition-is-evaluation-5"
},
"dependentTasks": [
"automl-tabular-ensemble-2",
"model-upload-3"
],
"inputs": {
"artifacts": {
"pipelineparam--automl-tabular-ensemble-2-explanation_metadata_artifact": {
"taskOutputArtifact": {
"outputArtifactKey": "explanation_metadata_artifact",
"producerTask": "automl-tabular-ensemble-2"
}
},
"pipelineparam--automl-tabular-ensemble-2-unmanaged_container_model": {
"taskOutputArtifact": {
"outputArtifactKey": "unmanaged_container_model",
"producerTask": "automl-tabular-ensemble-2"
}
},
"pipelineparam--model-upload-3-model": {
"taskOutputArtifact": {
"outputArtifactKey": "model",
"producerTask": "model-upload-3"
}
}
},
"parameters": {
"pipelineparam--automl-tabular-ensemble-2-explanation_parameters": {
"taskOutputParameter": {
"outputParameterKey": "explanation_parameters",
"producerTask": "automl-tabular-ensemble-2"
}
},
"pipelineparam--bool-identity-2-Output": {
"componentInputParameter": "pipelineparam--bool-identity-2-Output"
},
"pipelineparam--bool-identity-Output": {
"componentInputParameter": "pipelineparam--bool-identity-Output"
},
"pipelineparam--dataflow_service_account": {
"componentInputParameter": "pipelineparam--dataflow_service_account"
},
"pipelineparam--dataflow_subnetwork": {
"componentInputParameter": "pipelineparam--dataflow_subnetwork"
},
"pipelineparam--dataflow_use_public_ips": {
"componentInputParameter": "pipelineparam--dataflow_use_public_ips"
},
"pipelineparam--encryption_spec_key_name": {
"componentInputParameter": "pipelineparam--encryption_spec_key_name"
},
"pipelineparam--evaluation_batch_predict_machine_type": {
"componentInputParameter": "pipelineparam--evaluation_batch_predict_machine_type"
},
"pipelineparam--evaluation_batch_predict_max_replica_count": {
"componentInputParameter": "pipelineparam--evaluation_batch_predict_max_replica_count"
},
"pipelineparam--evaluation_batch_predict_starting_replica_count": {
"componentInputParameter": "pipelineparam--evaluation_batch_predict_starting_replica_count"
},
"pipelineparam--evaluation_dataflow_disk_size_gb": {
"componentInputParameter": "pipelineparam--evaluation_dataflow_disk_size_gb"
},
"pipelineparam--evaluation_dataflow_machine_type": {
"componentInputParameter": "pipelineparam--evaluation_dataflow_machine_type"
},
"pipelineparam--evaluation_dataflow_max_num_workers": {
"componentInputParameter": "pipelineparam--evaluation_dataflow_max_num_workers"
},
"pipelineparam--location": {
"componentInputParameter": "pipelineparam--location"
},
"pipelineparam--prediction_type": {
"componentInputParameter": "pipelineparam--prediction_type"
},
"pipelineparam--project": {
"componentInputParameter": "pipelineparam--project"
},
"pipelineparam--root_dir": {
"componentInputParameter": "pipelineparam--root_dir"
},
"pipelineparam--tabular-stats-and-example-gen-downsampled_test_split_json": {
"componentInputParameter": "pipelineparam--tabular-stats-and-example-gen-downsampled_test_split_json"
},
"pipelineparam--tabular-stats-and-example-gen-test_split_json": {
"componentInputParameter": "pipelineparam--tabular-stats-and-example-gen-test_split_json"
},
"pipelineparam--target_column_name": {
"componentInputParameter": "pipelineparam--target_column_name"
}
}
},
"taskInfo": {
"name": "condition-is-evaluation-5"
},
"triggerPolicy": {
"condition": "inputs.parameters['pipelineparam--bool-identity-Output'].string_value == 'true'"
}
},
"model-batch-predict-2": {
"cachingOptions": {
"enableCache": true
},
"componentRef": {
"name": "comp-model-batch-predict-2"
},
"dependentTasks": [
"model-upload-2",
"read-input-uri"
],
"inputs": {
"artifacts": {
"model": {
"taskOutputArtifact": {
"outputArtifactKey": "model",
"producerTask": "model-upload-2"
}
}
},
"parameters": {
"accelerator_count": {
"runtimeValue": {
"constantValue": {
"intValue": "0"
}
}
},
"accelerator_type": {
"runtimeValue": {
"constantValue": {
"stringValue": ""
}
}
},
"bigquery_destination_output_uri": {
"runtimeValue": {
"constantValue": {
"stringValue": ""
}
}
},
"bigquery_source_input_uri": {
"runtimeValue": {
"constantValue": {
"stringValue": ""
}
}
},
"encryption_spec_key_name": {
"componentInputParameter": "pipelineparam--encryption_spec_key_name"
},
"explanation_metadata": {
"runtimeValue": {
"constantValue": {
"stringValue": "{}"
}
}
},
"explanation_parameters": {
"runtimeValue": {
"constantValue": {
"stringValue": "{}"
}
}
},
"gcs_destination_output_uri_prefix": {
"componentInputParameter": "pipelineparam--root_dir"
},
"gcs_source_uris": {
"taskOutputParameter": {
"outputParameterKey": "Output",
"producerTask": "read-input-uri"
}
},
"generate_explanation": {
"runtimeValue": {
"constantValue": {
"intValue": "0"
}
}
},
"instances_format": {
"runtimeValue": {
"constantValue": {
"stringValue": "tf-record"
}
}
},
"job_display_name": {
"runtimeValue": {
"constantValue": {
"stringValue": "batch-predict-train-split"
}
}
},
"labels": {
"runtimeValue": {
"constantValue": {
"stringValue": "{}"
}
}
},
"location": {
"componentInputParameter": "pipelineparam--location"
},
"machine_type": {
"componentInputParameter": "pipelineparam--distill_batch_predict_machine_type"
},
"manual_batch_tuning_parameters_batch_size": {
"runtimeValue": {
"constantValue": {
"intValue": "0"
}
}
},
"max_replica_count": {
"componentInputParameter": "pipelineparam--distill_batch_predict_max_replica_count"
},
"model_parameters": {
"runtimeValue": {
"constantValue": {
"stringValue": "{}"
}
}
},
"predictions_format": {
"runtimeValue": {
"constantValue": {
"stringValue": "tf-record"
}
}
},
"project": {
"componentInputParameter": "pipelineparam--project"
},
"starting_replica_count": {
"componentInputParameter": "pipelineparam--distill_batch_predict_starting_replica_count"
}
}
},
"taskInfo": {
"name": "model-batch-predict-2"
}
},
"model-batch-predict-3": {
"cachingOptions": {
"enableCache": true
},
"componentRef": {
"name": "comp-model-batch-predict-3"
},
"dependentTasks": [
"model-upload-2",
"read-input-uri-2"
],
"inputs": {
"artifacts": {
"model": {
"taskOutputArtifact": {
"outputArtifactKey": "model",
"producerTask": "model-upload-2"
}
}
},
"parameters": {
"accelerator_count": {
"runtimeValue": {
"constantValue": {
"intValue": "0"
}
}
},
"accelerator_type": {
"runtimeValue": {
"constantValue": {
"stringValue": ""
}
}
},
"bigquery_destination_output_uri": {
"runtimeValue": {
"constantValue": {
"stringValue": ""
}
}
},
"bigquery_source_input_uri": {
"runtimeValue": {
"constantValue": {
"stringValue": ""
}
}
},
"encryption_spec_key_name": {
"componentInputParameter": "pipelineparam--encryption_spec_key_name"
},
"explanation_metadata": {
"runtimeValue": {
"constantValue": {
"stringValue": "{}"
}
}
},
"explanation_parameters": {
"runtimeValue": {
"constantValue": {
"stringValue": "{}"
}
}
},
"gcs_destination_output_uri_prefix": {
"componentInputParameter": "pipelineparam--root_dir"
},
"gcs_source_uris": {
"taskOutputParameter": {
"outputParameterKey": "Output",
"producerTask": "read-input-uri-2"
}
},
"generate_explanation": {
"runtimeValue": {
"constantValue": {
"intValue": "0"
}
}
},
"instances_format": {
"runtimeValue": {
"constantValue": {
"stringValue": "tf-record"
}
}
},
"job_display_name": {
"runtimeValue": {
"constantValue": {
"stringValue": "batch-predict-eval-split"
}
}
},
"labels": {
"runtimeValue": {
"constantValue": {
"stringValue": "{}"
}
}
},
"location": {
"componentInputParameter": "pipelineparam--location"
},
"machine_type": {
"componentInputParameter": "pipelineparam--distill_batch_predict_machine_type"
},
"manual_batch_tuning_parameters_batch_size": {
"runtimeValue": {
"constantValue": {
"intValue": "0"
}
}
},
"max_replica_count": {
"componentInputParameter": "pipelineparam--distill_batch_predict_max_replica_count"
},
"model_parameters": {
"runtimeValue": {
"constantValue": {
"stringValue": "{}"
}
}
},
"predictions_format": {
"runtimeValue": {
"constantValue": {
"stringValue": "tf-record"
}
}
},
"project": {
"componentInputParameter": "pipelineparam--project"
},
"starting_replica_count": {
"componentInputParameter": "pipelineparam--distill_batch_predict_starting_replica_count"
}
}
},
"taskInfo": {
"name": "model-batch-predict-3"
}
},
"model-upload-2": {
"cachingOptions": {
"enableCache": true
},
"componentRef": {
"name": "comp-model-upload-2"
},
"dependentTasks": [
"set-model-can-skip-validation"
],
"inputs": {
"artifacts": {
"explanation_metadata_artifact": {
"componentInputArtifact": "pipelineparam--automl-tabular-ensemble-explanation_metadata_artifact"
},
"unmanaged_container_model": {
"componentInputArtifact": "pipelineparam--automl-tabular-ensemble-unmanaged_container_model"
}
},
"parameters": {
"description": {
"runtimeValue": {
"constantValue": {
"stringValue": ""
}
}
},
"display_name": {
"runtimeValue": {
"constantValue": {
"stringValue": "automl-tabular-model-upload-{{$.pipeline_job_uuid}}-{{$.pipeline_task_uuid}}"
}
}
},
"encryption_spec_key_name": {
"componentInputParameter": "pipelineparam--encryption_spec_key_name"
},
"explanation_metadata": {
"runtimeValue": {
"constantValue": {
"stringValue": "{}"
}
}
},
"explanation_parameters": {
"componentInputParameter": "pipelineparam--automl-tabular-ensemble-explanation_parameters"
},
"labels": {
"runtimeValue": {
"constantValue": {
"stringValue": "{}"
}
}
},
"location": {
"componentInputParameter": "pipelineparam--location"
},
"project": {
"componentInputParameter": "pipelineparam--project"
}
}
},
"taskInfo": {
"name": "model-upload-2"
}
},
"model-upload-3": {
"cachingOptions": {
"enableCache": true
},
"componentRef": {
"name": "comp-model-upload-3"
},
"dependentTasks": [
"automl-tabular-ensemble-2",
"automl-tabular-infra-validator-2"
],
"inputs": {
"artifacts": {
"explanation_metadata_artifact": {
"taskOutputArtifact": {
"outputArtifactKey": "explanation_metadata_artifact",
"producerTask": "automl-tabular-ensemble-2"
}
},
"unmanaged_container_model": {
"taskOutputArtifact": {
"outputArtifactKey": "unmanaged_container_model",
"producerTask": "automl-tabular-ensemble-2"
}
}
},
"parameters": {
"description": {
"runtimeValue": {
"constantValue": {
"stringValue": ""
}
}
},
"display_name": {
"runtimeValue": {
"constantValue": {
"stringValue": "automl-tabular-distill-model-upload-{{$.pipeline_job_uuid}}-{{$.pipeline_task_uuid}}"
}
}
},
"encryption_spec_key_name": {
"componentInputParameter": "pipelineparam--encryption_spec_key_name"
},
"explanation_metadata": {
"runtimeValue": {
"constantValue": {
"stringValue": "{}"
}
}
},
"explanation_parameters": {
"taskOutputParameter": {
"outputParameterKey": "explanation_parameters",
"producerTask": "automl-tabular-ensemble-2"
}
},
"labels": {
"runtimeValue": {
"constantValue": {
"stringValue": "{}"
}
}
},
"location": {
"componentInputParameter": "pipelineparam--location"
},
"project": {
"componentInputParameter": "pipelineparam--project"
}
}
},
"taskInfo": {
"name": "model-upload-3"
}
},
"read-input-uri": {
"cachingOptions": {
"enableCache": true
},
"componentRef": {
"name": "comp-read-input-uri"
},
"inputs": {
"artifacts": {
"split_uri": {
"componentInputArtifact": "pipelineparam--tabular-stats-and-example-gen-train_split"
}
}
},
"taskInfo": {
"name": "read-input-uri"
}
},
"read-input-uri-2": {
"cachingOptions": {
"enableCache": true
},
"componentRef": {
"name": "comp-read-input-uri-2"
},
"inputs": {
"artifacts": {
"split_uri": {
"componentInputArtifact": "pipelineparam--tabular-stats-and-example-gen-eval_split"
}
}
},
"taskInfo": {
"name": "read-input-uri-2"
}
},
"set-model-can-skip-validation": {
"cachingOptions": {
"enableCache": true
},
"componentRef": {
"name": "comp-set-model-can-skip-validation"
},
"inputs": {
"artifacts": {
"model": {
"componentInputArtifact": "pipelineparam--automl-tabular-ensemble-unmanaged_container_model"
}
}
},
"taskInfo": {
"name": "set-model-can-skip-validation"
}
},
"write-bp-result-path": {
"cachingOptions": {
"enableCache": true
},
"componentRef": {
"name": "comp-write-bp-result-path"
},
"dependentTasks": [
"model-batch-predict-2"
],
"inputs": {
"artifacts": {
"bp_job": {
"taskOutputArtifact": {
"outputArtifactKey": "batchpredictionjob",
"producerTask": "model-batch-predict-2"
}
}
}
},
"taskInfo": {
"name": "write-bp-result-path"
}
},
"write-bp-result-path-2": {
"cachingOptions": {
"enableCache": true
},
"componentRef": {
"name": "comp-write-bp-result-path-2"
},
"dependentTasks": [
"model-batch-predict-3"
],
"inputs": {
"artifacts": {
"bp_job": {
"taskOutputArtifact": {
"outputArtifactKey": "batchpredictionjob",
"producerTask": "model-batch-predict-3"
}
}
}
},
"taskInfo": {
"name": "write-bp-result-path-2"
}
}
}
},
"inputDefinitions": {
"artifacts": {
"pipelineparam--automl-tabular-ensemble-explanation_metadata_artifact": {
"artifactType": {
"schemaTitle": "system.Artifact",
"schemaVersion": "0.0.1"
}
},
"pipelineparam--automl-tabular-ensemble-unmanaged_container_model": {
"artifactType": {
"schemaTitle": "google.UnmanagedContainerModel",
"schemaVersion": "0.0.1"
}
},
"pipelineparam--tabular-stats-and-example-gen-dataset_schema": {
"artifactType": {
"schemaTitle": "system.Artifact",
"schemaVersion": "0.0.1"
}
},
"pipelineparam--tabular-stats-and-example-gen-eval_split": {
"artifactType": {
"schemaTitle": "system.Dataset",
"schemaVersion": "0.0.1"
}
},
"pipelineparam--tabular-stats-and-example-gen-instance_baseline": {
"artifactType": {
"schemaTitle": "system.Artifact",
"schemaVersion": "0.0.1"
}
},
"pipelineparam--tabular-stats-and-example-gen-metadata": {
"artifactType": {
"schemaTitle": "system.Artifact",
"schemaVersion": "0.0.1"
}
},
"pipelineparam--tabular-stats-and-example-gen-test_split": {
"artifactType": {
"schemaTitle": "system.Dataset",
"schemaVersion": "0.0.1"
}
},
"pipelineparam--tabular-stats-and-example-gen-train_split": {
"artifactType": {
"schemaTitle": "system.Dataset",
"schemaVersion": "0.0.1"
}
}
},
"parameters": {
"pipelineparam--automl-tabular-ensemble-explanation_parameters": {
"type": "STRING"
},
"pipelineparam--bool-identity-2-Output": {
"type": "STRING"
},
"pipelineparam--bool-identity-Output": {
"type": "STRING"
},
"pipelineparam--dataflow_service_account": {
"type": "STRING"
},
"pipelineparam--dataflow_subnetwork": {
"type": "STRING"
},
"pipelineparam--dataflow_use_public_ips": {
"type": "STRING"
},
"pipelineparam--disable_early_stopping": {
"type": "STRING"
},
"pipelineparam--distill_batch_predict_machine_type": {
"type": "STRING"
},
"pipelineparam--distill_batch_predict_max_replica_count": {
"type": "INT"
},
"pipelineparam--distill_batch_predict_starting_replica_count": {
"type": "INT"
},
"pipelineparam--distill_stage_1_deadline_hours": {
"type": "DOUBLE"
},
"pipelineparam--encryption_spec_key_name": {
"type": "STRING"
},
"pipelineparam--evaluation_batch_predict_machine_type": {
"type": "STRING"
},
"pipelineparam--evaluation_batch_predict_max_replica_count": {
"type": "INT"
},
"pipelineparam--evaluation_batch_predict_starting_replica_count": {
"type": "INT"
},
"pipelineparam--evaluation_dataflow_disk_size_gb": {
"type": "INT"
},
"pipelineparam--evaluation_dataflow_machine_type": {
"type": "STRING"
},
"pipelineparam--evaluation_dataflow_max_num_workers": {
"type": "INT"
},
"pipelineparam--export_additional_model_without_custom_ops": {
"type": "STRING"
},
"pipelineparam--location": {
"type": "STRING"
},
"pipelineparam--prediction_type": {
"type": "STRING"
},
"pipelineparam--project": {
"type": "STRING"
},
"pipelineparam--reduce_search_space_mode": {
"type": "STRING"
},
"pipelineparam--root_dir": {
"type": "STRING"
},
"pipelineparam--stage_1_num_parallel_trials": {
"type": "INT"
},
"pipelineparam--stage_1_single_run_max_secs": {
"type": "INT"
},
"pipelineparam--stage_1_tuner_worker_pool_specs_override": {
"type": "STRING"
},
"pipelineparam--tabular-stats-and-example-gen-downsampled_test_split_json": {
"type": "STRING"
},
"pipelineparam--tabular-stats-and-example-gen-test_split_json": {
"type": "STRING"
},
"pipelineparam--target_column_name": {
"type": "STRING"
},
"pipelineparam--transform_dataflow_disk_size_gb": {
"type": "INT"
},
"pipelineparam--transform_dataflow_machine_type": {
"type": "STRING"
},
"pipelineparam--transform_dataflow_max_num_workers": {
"type": "INT"
}
}
},
"outputDefinitions": {
"artifacts": {
"model-evaluation-3-evaluation_metrics": {
"artifactType": {
"schemaTitle": "system.Metrics",
"schemaVersion": "0.0.1"
}
},
"model-evaluation-4-evaluation_metrics": {
"artifactType": {
"schemaTitle": "system.Metrics",
"schemaVersion": "0.0.1"
}
}
}
}
},
"comp-condition-is-evaluation-3": {
"dag": {
"outputs": {
"artifacts": {
"model-evaluation-2-evaluation_metrics": {
"artifactSelectors": [
{
"outputArtifactKey": "evaluation_metrics",
"producerSubtask": "model-evaluation-2"
}
]
},
"model-evaluation-evaluation_metrics": {
"artifactSelectors": [
{
"outputArtifactKey": "evaluation_metrics",
"producerSubtask": "model-evaluation"
}
]
}
}
},
"tasks": {
"model-batch-explanation": {
"cachingOptions": {
"enableCache": true
},
"componentRef": {
"name": "comp-model-batch-explanation"
},
"inputs": {
"artifacts": {
"explanation_metadata_artifact": {
"componentInputArtifact": "pipelineparam--automl-tabular-ensemble-explanation_metadata_artifact"
},
"unmanaged_container_model": {
"componentInputArtifact": "pipelineparam--automl-tabular-ensemble-unmanaged_container_model"
}
},
"parameters": {
"accelerator_count": {
"runtimeValue": {
"constantValue": {
"intValue": "0"
}
}
},
"accelerator_type": {
"runtimeValue": {
"constantValue": {
"stringValue": ""
}
}
},
"bigquery_destination_output_uri": {
"runtimeValue": {
"constantValue": {
"stringValue": ""
}
}
},
"bigquery_source_input_uri": {
"runtimeValue": {
"constantValue": {
"stringValue": ""
}
}
},
"encryption_spec_key_name": {
"componentInputParameter": "pipelineparam--encryption_spec_key_name"
},
"explanation_metadata": {
"runtimeValue": {
"constantValue": {
"stringValue": "{}"
}
}
},
"explanation_parameters": {
"componentInputParameter": "pipelineparam--automl-tabular-ensemble-explanation_parameters"
},
"gcs_destination_output_uri_prefix": {
"componentInputParameter": "pipelineparam--root_dir"
},
"gcs_source_uris": {
"componentInputParameter": "pipelineparam--tabular-stats-and-example-gen-downsampled_test_split_json"
},
"generate_explanation": {
"runtimeValue": {
"constantValue": {
"intValue": "1"
}
}
},
"instances_format": {
"runtimeValue": {
"constantValue": {
"stringValue": "tf-record"
}
}
},
"job_display_name": {
"runtimeValue": {
"constantValue": {
"stringValue": "batch-explain-evaluation-{{$.pipeline_job_uuid}}-{{$.pipeline_task_uuid}}"
}
}
},
"labels": {
"runtimeValue": {
"constantValue": {
"stringValue": "{}"
}
}
},
"location": {
"componentInputParameter": "pipelineparam--location"
},
"machine_type": {
"componentInputParameter": "pipelineparam--evaluation_batch_predict_machine_type"
},
"manual_batch_tuning_parameters_batch_size": {
"runtimeValue": {
"constantValue": {
"intValue": "0"
}
}
},
"max_replica_count": {
"componentInputParameter": "pipelineparam--evaluation_batch_predict_max_replica_count"
},
"model_parameters": {
"runtimeValue": {
"constantValue": {
"stringValue": "{}"
}
}
},
"predictions_format": {
"runtimeValue": {
"constantValue": {
"stringValue": "jsonl"
}
}
},
"project": {
"componentInputParameter": "pipelineparam--project"
},
"starting_replica_count": {
"componentInputParameter": "pipelineparam--evaluation_batch_predict_starting_replica_count"
}
}
},
"taskInfo": {
"name": "model-batch-explanation"
}
},
"model-batch-predict": {
"cachingOptions": {
"enableCache": true
},
"componentRef": {
"name": "comp-model-batch-predict"
},
"inputs": {
"artifacts": {
"unmanaged_container_model": {
"componentInputArtifact": "pipelineparam--automl-tabular-ensemble-unmanaged_container_model"
}
},
"parameters": {
"accelerator_count": {
"runtimeValue": {
"constantValue": {
"intValue": "0"
}
}
},
"accelerator_type": {
"runtimeValue": {
"constantValue": {
"stringValue": ""
}
}
},
"bigquery_destination_output_uri": {
"runtimeValue": {
"constantValue": {
"stringValue": ""
}
}
},
"bigquery_source_input_uri": {
"runtimeValue": {
"constantValue": {
"stringValue": ""
}
}
},
"encryption_spec_key_name": {
"componentInputParameter": "pipelineparam--encryption_spec_key_name"
},
"explanation_metadata": {
"runtimeValue": {
"constantValue": {
"stringValue": "{}"
}
}
},
"explanation_parameters": {
"runtimeValue": {
"constantValue": {
"stringValue": "{}"
}
}
},
"gcs_destination_output_uri_prefix": {
"componentInputParameter": "pipelineparam--root_dir"
},
"gcs_source_uris": {
"componentInputParameter": "pipelineparam--tabular-stats-and-example-gen-test_split_json"
},
"generate_explanation": {
"runtimeValue": {
"constantValue": {
"intValue": "0"
}
}
},
"instances_format": {
"runtimeValue": {
"constantValue": {
"stringValue": "tf-record"
}
}
},
"job_display_name": {
"runtimeValue": {
"constantValue": {
"stringValue": "batch-predict-evaluation-{{$.pipeline_job_uuid}}-{{$.pipeline_task_uuid}}"
}
}
},
"labels": {
"runtimeValue": {
"constantValue": {
"stringValue": "{}"
}
}
},
"location": {
"componentInputParameter": "pipelineparam--location"
},
"machine_type": {
"componentInputParameter": "pipelineparam--evaluation_batch_predict_machine_type"
},
"manual_batch_tuning_parameters_batch_size": {
"runtimeValue": {
"constantValue": {
"intValue": "0"
}
}
},
"max_replica_count": {
"componentInputParameter": "pipelineparam--evaluation_batch_predict_max_replica_count"
},
"model_parameters": {
"runtimeValue": {
"constantValue": {
"stringValue": "{}"
}
}
},
"predictions_format": {
"runtimeValue": {
"constantValue": {
"stringValue": "jsonl"
}
}
},
"project": {
"componentInputParameter": "pipelineparam--project"
},
"starting_replica_count": {
"componentInputParameter": "pipelineparam--evaluation_batch_predict_starting_replica_count"
}
}
},
"taskInfo": {
"name": "model-batch-predict"
}
},
"model-evaluation": {
"cachingOptions": {
"enableCache": true
},
"componentRef": {
"name": "comp-model-evaluation"
},
"dependentTasks": [
"model-batch-predict"
],
"inputs": {
"artifacts": {
"batch_prediction_job": {
"taskOutputArtifact": {
"outputArtifactKey": "batchpredictionjob",
"producerTask": "model-batch-predict"
}
}
},
"parameters": {
"class_names": {
"runtimeValue": {
"constantValue": {
"stringValue": "{}"
}
}
},
"classification_type": {
"runtimeValue": {
"constantValue": {
"stringValue": ""
}
}
},
"dataflow_disk_size": {
"componentInputParameter": "pipelineparam--evaluation_dataflow_disk_size_gb"
},
"dataflow_machine_type": {
"componentInputParameter": "pipelineparam--evaluation_dataflow_machine_type"
},
"dataflow_max_workers_num": {
"componentInputParameter": "pipelineparam--evaluation_dataflow_max_num_workers"
},
"dataflow_service_account": {
"componentInputParameter": "pipelineparam--dataflow_service_account"
},
"dataflow_subnetwork": {
"componentInputParameter": "pipelineparam--dataflow_subnetwork"
},
"dataflow_use_public_ips": {
"componentInputParameter": "pipelineparam--dataflow_use_public_ips"
},
"dataflow_workers_num": {
"runtimeValue": {
"constantValue": {
"intValue": "1"
}
}
},
"encryption_spec_key_name": {
"componentInputParameter": "pipelineparam--encryption_spec_key_name"
},
"example_weight_column": {
"runtimeValue": {
"constantValue": {
"stringValue": ""
}
}
},
"generate_feature_attribution": {
"runtimeValue": {
"constantValue": {
"intValue": "0"
}
}
},
"ground_truth_column": {
"componentInputParameter": "pipelineparam--target_column_name"
},
"ground_truth_format": {
"runtimeValue": {
"constantValue": {
"stringValue": "jsonl"
}
}
},
"ground_truth_gcs_source": {
"runtimeValue": {
"constantValue": {
"stringValue": "{}"
}
}
},
"key_columns": {
"runtimeValue": {
"constantValue": {
"stringValue": "{}"
}
}
},
"location": {
"componentInputParameter": "pipelineparam--location"
},
"positive_classes": {
"runtimeValue": {
"constantValue": {
"stringValue": "{}"
}
}
},
"prediction_id_column": {
"runtimeValue": {
"constantValue": {
"stringValue": ""
}
}
},
"prediction_label_column": {
"runtimeValue": {
"constantValue": {
"stringValue": ""
}
}
},
"prediction_score_column": {
"runtimeValue": {
"constantValue": {
"stringValue": ""
}
}
},
"predictions_format": {
"runtimeValue": {
"constantValue": {
"stringValue": "jsonl"
}
}
},
"problem_type": {
"componentInputParameter": "pipelineparam--prediction_type"
},
"project": {
"componentInputParameter": "pipelineparam--project"
},
"root_dir": {
"componentInputParameter": "pipelineparam--root_dir"
}
}
},
"taskInfo": {
"name": "model-evaluation"
}
},
"model-evaluation-2": {
"cachingOptions": {
"enableCache": true
},
"componentRef": {
"name": "comp-model-evaluation-2"
},
"dependentTasks": [
"model-batch-explanation"
],
"inputs": {
"artifacts": {
"batch_prediction_job": {
"taskOutputArtifact": {
"outputArtifactKey": "batchpredictionjob",
"producerTask": "model-batch-explanation"
}
}
},
"parameters": {
"class_names": {
"runtimeValue": {
"constantValue": {
"stringValue": "{}"
}
}
},
"classification_type": {
"runtimeValue": {
"constantValue": {
"stringValue": ""
}
}
},
"dataflow_disk_size": {
"componentInputParameter": "pipelineparam--evaluation_dataflow_disk_size_gb"
},
"dataflow_machine_type": {
"componentInputParameter": "pipelineparam--evaluation_dataflow_machine_type"
},
"dataflow_max_workers_num": {
"componentInputParameter": "pipelineparam--evaluation_dataflow_max_num_workers"
},
"dataflow_service_account": {
"componentInputParameter": "pipelineparam--dataflow_service_account"
},
"dataflow_subnetwork": {
"componentInputParameter": "pipelineparam--dataflow_subnetwork"
},
"dataflow_use_public_ips": {
"componentInputParameter": "pipelineparam--dataflow_use_public_ips"
},
"dataflow_workers_num": {
"runtimeValue": {
"constantValue": {
"intValue": "1"
}
}
},
"encryption_spec_key_name": {
"componentInputParameter": "pipelineparam--encryption_spec_key_name"
},
"example_weight_column": {
"runtimeValue": {
"constantValue": {
"stringValue": ""
}
}
},
"generate_feature_attribution": {
"runtimeValue": {
"constantValue": {
"intValue": "1"
}
}
},
"ground_truth_column": {
"componentInputParameter": "pipelineparam--target_column_name"
},
"ground_truth_format": {
"runtimeValue": {
"constantValue": {
"stringValue": "jsonl"
}
}
},
"ground_truth_gcs_source": {
"runtimeValue": {
"constantValue": {
"stringValue": "{}"
}
}
},
"key_columns": {
"runtimeValue": {
"constantValue": {
"stringValue": "{}"
}
}
},
"location": {
"componentInputParameter": "pipelineparam--location"
},
"positive_classes": {
"runtimeValue": {
"constantValue": {
"stringValue": "{}"
}
}
},
"prediction_id_column": {
"runtimeValue": {
"constantValue": {
"stringValue": ""
}
}
},
"prediction_label_column": {
"runtimeValue": {
"constantValue": {
"stringValue": ""
}
}
},
"prediction_score_column": {
"runtimeValue": {
"constantValue": {
"stringValue": ""
}
}
},
"predictions_format": {
"runtimeValue": {
"constantValue": {
"stringValue": "jsonl"
}
}
},
"problem_type": {
"componentInputParameter": "pipelineparam--prediction_type"
},
"project": {
"componentInputParameter": "pipelineparam--project"
},
"root_dir": {
"componentInputParameter": "pipelineparam--root_dir"
}
}
},
"taskInfo": {
"name": "model-evaluation-2"
}
},
"model-evaluation-import": {
"cachingOptions": {
"enableCache": true
},
"componentRef": {
"name": "comp-model-evaluation-import"
},
"dependentTasks": [
"model-evaluation",
"model-evaluation-2"
],
"inputs": {
"artifacts": {
"explanation": {
"taskOutputArtifact": {
"outputArtifactKey": "evaluation_metrics",
"producerTask": "model-evaluation-2"
}
},
"metrics": {
"taskOutputArtifact": {
"outputArtifactKey": "evaluation_metrics",
"producerTask": "model-evaluation"
}
},
"model": {
"componentInputArtifact": "pipelineparam--model-upload-model"
}
},
"parameters": {
"dataset_path": {
"runtimeValue": {
"constantValue": {
"stringValue": ""
}
}
},
"dataset_paths": {
"runtimeValue": {
"constantValue": {
"stringValue": "[]"
}
}
},
"dataset_type": {
"runtimeValue": {
"constantValue": {
"stringValue": ""
}
}
},
"display_name": {
"runtimeValue": {
"constantValue": {
"stringValue": ""
}
}
},
"problem_type": {
"componentInputParameter": "pipelineparam--prediction_type"
}
}
},
"taskInfo": {
"name": "model-evaluation-import"
}
}
}
},
"inputDefinitions": {
"artifacts": {
"pipelineparam--automl-tabular-ensemble-explanation_metadata_artifact": {
"artifactType": {
"schemaTitle": "system.Artifact",
"schemaVersion": "0.0.1"
}
},
"pipelineparam--automl-tabular-ensemble-unmanaged_container_model": {
"artifactType": {
"schemaTitle": "google.UnmanagedContainerModel",
"schemaVersion": "0.0.1"
}
},
"pipelineparam--model-upload-model": {
"artifactType": {
"schemaTitle": "google.VertexModel",
"schemaVersion": "0.0.1"
}
}
},
"parameters": {
"pipelineparam--automl-tabular-ensemble-explanation_parameters": {
"type": "STRING"
},
"pipelineparam--bool-identity-2-Output": {
"type": "STRING"
},
"pipelineparam--bool-identity-Output": {
"type": "STRING"
},
"pipelineparam--dataflow_service_account": {
"type": "STRING"
},
"pipelineparam--dataflow_subnetwork": {
"type": "STRING"
},
"pipelineparam--dataflow_use_public_ips": {
"type": "STRING"
},
"pipelineparam--encryption_spec_key_name": {
"type": "STRING"
},
"pipelineparam--evaluation_batch_predict_machine_type": {
"type": "STRING"
},
"pipelineparam--evaluation_batch_predict_max_replica_count": {
"type": "INT"
},
"pipelineparam--evaluation_batch_predict_starting_replica_count": {
"type": "INT"
},
"pipelineparam--evaluation_dataflow_disk_size_gb": {
"type": "INT"
},
"pipelineparam--evaluation_dataflow_machine_type": {
"type": "STRING"
},
"pipelineparam--evaluation_dataflow_max_num_workers": {
"type": "INT"
},
"pipelineparam--location": {
"type": "STRING"
},
"pipelineparam--prediction_type": {
"type": "STRING"
},
"pipelineparam--project": {
"type": "STRING"
},
"pipelineparam--root_dir": {
"type": "STRING"
},
"pipelineparam--tabular-stats-and-example-gen-downsampled_test_split_json": {
"type": "STRING"
},
"pipelineparam--tabular-stats-and-example-gen-test_split_json": {
"type": "STRING"
},
"pipelineparam--target_column_name": {
"type": "STRING"
}
}
},
"outputDefinitions": {
"artifacts": {
"model-evaluation-2-evaluation_metrics": {
"artifactType": {
"schemaTitle": "system.Metrics",
"schemaVersion": "0.0.1"
}
},
"model-evaluation-evaluation_metrics": {
"artifactType": {
"schemaTitle": "system.Metrics",
"schemaVersion": "0.0.1"
}
}
}
}
},
"comp-condition-is-evaluation-5": {
"dag": {
"outputs": {
"artifacts": {
"model-evaluation-3-evaluation_metrics": {
"artifactSelectors": [
{
"outputArtifactKey": "evaluation_metrics",
"producerSubtask": "model-evaluation-3"
}
]
},
"model-evaluation-4-evaluation_metrics": {
"artifactSelectors": [
{
"outputArtifactKey": "evaluation_metrics",
"producerSubtask": "model-evaluation-4"
}
]
}
}
},
"tasks": {
"model-batch-explanation-2": {
"cachingOptions": {
"enableCache": true
},
"componentRef": {
"name": "comp-model-batch-explanation-2"
},
"inputs": {
"artifacts": {
"explanation_metadata_artifact": {
"componentInputArtifact": "pipelineparam--automl-tabular-ensemble-2-explanation_metadata_artifact"
},
"unmanaged_container_model": {
"componentInputArtifact": "pipelineparam--automl-tabular-ensemble-2-unmanaged_container_model"
}
},
"parameters": {
"accelerator_count": {
"runtimeValue": {
"constantValue": {
"intValue": "0"
}
}
},
"accelerator_type": {
"runtimeValue": {
"constantValue": {
"stringValue": ""
}
}
},
"bigquery_destination_output_uri": {
"runtimeValue": {
"constantValue": {
"stringValue": ""
}
}
},
"bigquery_source_input_uri": {
"runtimeValue": {
"constantValue": {
"stringValue": ""
}
}
},
"encryption_spec_key_name": {
"componentInputParameter": "pipelineparam--encryption_spec_key_name"
},
"explanation_metadata": {
"runtimeValue": {
"constantValue": {
"stringValue": "{}"
}
}
},
"explanation_parameters": {
"componentInputParameter": "pipelineparam--automl-tabular-ensemble-2-explanation_parameters"
},
"gcs_destination_output_uri_prefix": {
"componentInputParameter": "pipelineparam--root_dir"
},
"gcs_source_uris": {
"componentInputParameter": "pipelineparam--tabular-stats-and-example-gen-downsampled_test_split_json"
},
"generate_explanation": {
"runtimeValue": {
"constantValue": {
"intValue": "1"
}
}
},
"instances_format": {
"runtimeValue": {
"constantValue": {
"stringValue": "tf-record"
}
}
},
"job_display_name": {
"runtimeValue": {
"constantValue": {
"stringValue": "batch-explain-evaluation-{{$.pipeline_job_uuid}}-{{$.pipeline_task_uuid}}"
}
}
},
"labels": {
"runtimeValue": {
"constantValue": {
"stringValue": "{}"
}
}
},
"location": {
"componentInputParameter": "pipelineparam--location"
},
"machine_type": {
"componentInputParameter": "pipelineparam--evaluation_batch_predict_machine_type"
},
"manual_batch_tuning_parameters_batch_size": {
"runtimeValue": {
"constantValue": {
"intValue": "0"
}
}
},
"max_replica_count": {
"componentInputParameter": "pipelineparam--evaluation_batch_predict_max_replica_count"
},
"model_parameters": {
"runtimeValue": {
"constantValue": {
"stringValue": "{}"
}
}
},
"predictions_format": {
"runtimeValue": {
"constantValue": {
"stringValue": "jsonl"
}
}
},
"project": {
"componentInputParameter": "pipelineparam--project"
},
"starting_replica_count": {
"componentInputParameter": "pipelineparam--evaluation_batch_predict_starting_replica_count"
}
}
},
"taskInfo": {
"name": "model-batch-explanation-2"
}
},
"model-batch-predict-4": {
"cachingOptions": {
"enableCache": true
},
"componentRef": {
"name": "comp-model-batch-predict-4"
},
"inputs": {
"artifacts": {
"unmanaged_container_model": {
"componentInputArtifact": "pipelineparam--automl-tabular-ensemble-2-unmanaged_container_model"
}
},
"parameters": {
"accelerator_count": {
"runtimeValue": {
"constantValue": {
"intValue": "0"
}
}
},
"accelerator_type": {
"runtimeValue": {
"constantValue": {
"stringValue": ""
}
}
},
"bigquery_destination_output_uri": {
"runtimeValue": {
"constantValue": {
"stringValue": ""
}
}
},
"bigquery_source_input_uri": {
"runtimeValue": {
"constantValue": {
"stringValue": ""
}
}
},
"encryption_spec_key_name": {
"componentInputParameter": "pipelineparam--encryption_spec_key_name"
},
"explanation_metadata": {
"runtimeValue": {
"constantValue": {
"stringValue": "{}"
}
}
},
"explanation_parameters": {
"runtimeValue": {
"constantValue": {
"stringValue": "{}"
}
}
},
"gcs_destination_output_uri_prefix": {
"componentInputParameter": "pipelineparam--root_dir"
},
"gcs_source_uris": {
"componentInputParameter": "pipelineparam--tabular-stats-and-example-gen-test_split_json"
},
"generate_explanation": {
"runtimeValue": {
"constantValue": {
"intValue": "0"
}
}
},
"instances_format": {
"runtimeValue": {
"constantValue": {
"stringValue": "tf-record"
}
}
},
"job_display_name": {
"runtimeValue": {
"constantValue": {
"stringValue": "batch-predict-evaluation-{{$.pipeline_job_uuid}}-{{$.pipeline_task_uuid}}"
}
}
},
"labels": {
"runtimeValue": {
"constantValue": {
"stringValue": "{}"
}
}
},
"location": {
"componentInputParameter": "pipelineparam--location"
},
"machine_type": {
"componentInputParameter": "pipelineparam--evaluation_batch_predict_machine_type"
},
"manual_batch_tuning_parameters_batch_size": {
"runtimeValue": {
"constantValue": {
"intValue": "0"
}
}
},
"max_replica_count": {
"componentInputParameter": "pipelineparam--evaluation_batch_predict_max_replica_count"
},
"model_parameters": {
"runtimeValue": {
"constantValue": {
"stringValue": "{}"
}
}
},
"predictions_format": {
"runtimeValue": {
"constantValue": {
"stringValue": "jsonl"
}
}
},
"project": {
"componentInputParameter": "pipelineparam--project"
},
"starting_replica_count": {
"componentInputParameter": "pipelineparam--evaluation_batch_predict_starting_replica_count"
}
}
},
"taskInfo": {
"name": "model-batch-predict-4"
}
},
"model-evaluation-3": {
"cachingOptions": {
"enableCache": true
},
"componentRef": {
"name": "comp-model-evaluation-3"
},
"dependentTasks": [
"model-batch-predict-4"
],
"inputs": {
"artifacts": {
"batch_prediction_job": {
"taskOutputArtifact": {
"outputArtifactKey": "batchpredictionjob",
"producerTask": "model-batch-predict-4"
}
}
},
"parameters": {
"class_names": {
"runtimeValue": {
"constantValue": {
"stringValue": "{}"
}
}
},
"classification_type": {
"runtimeValue": {
"constantValue": {
"stringValue": ""
}
}
},
"dataflow_disk_size": {
"componentInputParameter": "pipelineparam--evaluation_dataflow_disk_size_gb"
},
"dataflow_machine_type": {
"componentInputParameter": "pipelineparam--evaluation_dataflow_machine_type"
},
"dataflow_max_workers_num": {
"componentInputParameter": "pipelineparam--evaluation_dataflow_max_num_workers"
},
"dataflow_service_account": {
"componentInputParameter": "pipelineparam--dataflow_service_account"
},
"dataflow_subnetwork": {
"componentInputParameter": "pipelineparam--dataflow_subnetwork"
},
"dataflow_use_public_ips": {
"componentInputParameter": "pipelineparam--dataflow_use_public_ips"
},
"dataflow_workers_num": {
"runtimeValue": {
"constantValue": {
"intValue": "1"
}
}
},
"encryption_spec_key_name": {
"componentInputParameter": "pipelineparam--encryption_spec_key_name"
},
"example_weight_column": {
"runtimeValue": {
"constantValue": {
"stringValue": ""
}
}
},
"generate_feature_attribution": {
"runtimeValue": {
"constantValue": {
"intValue": "0"
}
}
},
"ground_truth_column": {
"componentInputParameter": "pipelineparam--target_column_name"
},
"ground_truth_format": {
"runtimeValue": {
"constantValue": {
"stringValue": "jsonl"
}
}
},
"ground_truth_gcs_source": {
"runtimeValue": {
"constantValue": {
"stringValue": "{}"
}
}
},
"key_columns": {
"runtimeValue": {
"constantValue": {
"stringValue": "{}"
}
}
},
"location": {
"componentInputParameter": "pipelineparam--location"
},
"positive_classes": {
"runtimeValue": {
"constantValue": {
"stringValue": "{}"
}
}
},
"prediction_id_column": {
"runtimeValue": {
"constantValue": {
"stringValue": ""
}
}
},
"prediction_label_column": {
"runtimeValue": {
"constantValue": {
"stringValue": ""
}
}
},
"prediction_score_column": {
"runtimeValue": {
"constantValue": {
"stringValue": ""
}
}
},
"predictions_format": {
"runtimeValue": {
"constantValue": {
"stringValue": "jsonl"
}
}
},
"problem_type": {
"componentInputParameter": "pipelineparam--prediction_type"
},
"project": {
"componentInputParameter": "pipelineparam--project"
},
"root_dir": {
"componentInputParameter": "pipelineparam--root_dir"
}
}
},
"taskInfo": {
"name": "model-evaluation-3"
}
},
"model-evaluation-4": {
"cachingOptions": {
"enableCache": true
},
"componentRef": {
"name": "comp-model-evaluation-4"
},
"dependentTasks": [
"model-batch-explanation-2"
],
"inputs": {
"artifacts": {
"batch_prediction_job": {
"taskOutputArtifact": {
"outputArtifactKey": "batchpredictionjob",
"producerTask": "model-batch-explanation-2"
}
}
},
"parameters": {
"class_names": {
"runtimeValue": {
"constantValue": {
"stringValue": "{}"
}
}
},
"classification_type": {
"runtimeValue": {
"constantValue": {
"stringValue": ""
}
}
},
"dataflow_disk_size": {
"componentInputParameter": "pipelineparam--evaluation_dataflow_disk_size_gb"
},
"dataflow_machine_type": {
"componentInputParameter": "pipelineparam--evaluation_dataflow_machine_type"
},
"dataflow_max_workers_num": {
"componentInputParameter": "pipelineparam--evaluation_dataflow_max_num_workers"
},
"dataflow_service_account": {
"componentInputParameter": "pipelineparam--dataflow_service_account"
},
"dataflow_subnetwork": {
"componentInputParameter": "pipelineparam--dataflow_subnetwork"
},
"dataflow_use_public_ips": {
"componentInputParameter": "pipelineparam--dataflow_use_public_ips"
},
"dataflow_workers_num": {
"runtimeValue": {
"constantValue": {
"intValue": "1"
}
}
},
"encryption_spec_key_name": {
"componentInputParameter": "pipelineparam--encryption_spec_key_name"
},
"example_weight_column": {
"runtimeValue": {
"constantValue": {
"stringValue": ""
}
}
},
"generate_feature_attribution": {
"runtimeValue": {
"constantValue": {
"intValue": "1"
}
}
},
"ground_truth_column": {
"componentInputParameter": "pipelineparam--target_column_name"
},
"ground_truth_format": {
"runtimeValue": {
"constantValue": {
"stringValue": "jsonl"
}
}
},
"ground_truth_gcs_source": {
"runtimeValue": {
"constantValue": {
"stringValue": "{}"
}
}
},
"key_columns": {
"runtimeValue": {
"constantValue": {
"stringValue": "{}"
}
}
},
"location": {
"componentInputParameter": "pipelineparam--location"
},
"positive_classes": {
"runtimeValue": {
"constantValue": {
"stringValue": "{}"
}
}
},
"prediction_id_column": {
"runtimeValue": {
"constantValue": {
"stringValue": ""
}
}
},
"prediction_label_column": {
"runtimeValue": {
"constantValue": {
"stringValue": ""
}
}
},
"prediction_score_column": {
"runtimeValue": {
"constantValue": {
"stringValue": ""
}
}
},
"predictions_format": {
"runtimeValue": {
"constantValue": {
"stringValue": "jsonl"
}
}
},
"problem_type": {
"componentInputParameter": "pipelineparam--prediction_type"
},
"project": {
"componentInputParameter": "pipelineparam--project"
},
"root_dir": {
"componentInputParameter": "pipelineparam--root_dir"
}
}
},
"taskInfo": {
"name": "model-evaluation-4"
}
},
"model-evaluation-import-2": {
"cachingOptions": {
"enableCache": true
},
"componentRef": {
"name": "comp-model-evaluation-import-2"
},
"dependentTasks": [
"model-evaluation-3",
"model-evaluation-4"
],
"inputs": {
"artifacts": {
"explanation": {
"taskOutputArtifact": {
"outputArtifactKey": "evaluation_metrics",
"producerTask": "model-evaluation-4"
}
},
"metrics": {
"taskOutputArtifact": {
"outputArtifactKey": "evaluation_metrics",
"producerTask": "model-evaluation-3"
}
},
"model": {
"componentInputArtifact": "pipelineparam--model-upload-3-model"
}
},
"parameters": {
"dataset_path": {
"runtimeValue": {
"constantValue": {
"stringValue": ""
}
}
},
"dataset_paths": {
"runtimeValue": {
"constantValue": {
"stringValue": "[]"
}
}
},
"dataset_type": {
"runtimeValue": {
"constantValue": {
"stringValue": ""
}
}
},
"display_name": {
"runtimeValue": {
"constantValue": {
"stringValue": ""
}
}
},
"problem_type": {
"componentInputParameter": "pipelineparam--prediction_type"
}
}
},
"taskInfo": {
"name": "model-evaluation-import-2"
}
}
}
},
"inputDefinitions": {
"artifacts": {
"pipelineparam--automl-tabular-ensemble-2-explanation_metadata_artifact": {
"artifactType": {
"schemaTitle": "system.Artifact",
"schemaVersion": "0.0.1"
}
},
"pipelineparam--automl-tabular-ensemble-2-unmanaged_container_model": {
"artifactType": {
"schemaTitle": "google.UnmanagedContainerModel",
"schemaVersion": "0.0.1"
}
},
"pipelineparam--model-upload-3-model": {
"artifactType": {
"schemaTitle": "google.VertexModel",
"schemaVersion": "0.0.1"
}
}
},
"parameters": {
"pipelineparam--automl-tabular-ensemble-2-explanation_parameters": {
"type": "STRING"
},
"pipelineparam--bool-identity-2-Output": {
"type": "STRING"
},
"pipelineparam--bool-identity-Output": {
"type": "STRING"
},
"pipelineparam--dataflow_service_account": {
"type": "STRING"
},
"pipelineparam--dataflow_subnetwork": {
"type": "STRING"
},
"pipelineparam--dataflow_use_public_ips": {
"type": "STRING"
},
"pipelineparam--encryption_spec_key_name": {
"type": "STRING"
},
"pipelineparam--evaluation_batch_predict_machine_type": {
"type": "STRING"
},
"pipelineparam--evaluation_batch_predict_max_replica_count": {
"type": "INT"
},
"pipelineparam--evaluation_batch_predict_starting_replica_count": {
"type": "INT"
},
"pipelineparam--evaluation_dataflow_disk_size_gb": {
"type": "INT"
},
"pipelineparam--evaluation_dataflow_machine_type": {
"type": "STRING"
},
"pipelineparam--evaluation_dataflow_max_num_workers": {
"type": "INT"
},
"pipelineparam--location": {
"type": "STRING"
},
"pipelineparam--prediction_type": {
"type": "STRING"
},
"pipelineparam--project": {
"type": "STRING"
},
"pipelineparam--root_dir": {
"type": "STRING"
},
"pipelineparam--tabular-stats-and-example-gen-downsampled_test_split_json": {
"type": "STRING"
},
"pipelineparam--tabular-stats-and-example-gen-test_split_json": {
"type": "STRING"
},
"pipelineparam--target_column_name": {
"type": "STRING"
}
}
},
"outputDefinitions": {
"artifacts": {
"model-evaluation-3-evaluation_metrics": {
"artifactType": {
"schemaTitle": "system.Metrics",
"schemaVersion": "0.0.1"
}
},
"model-evaluation-4-evaluation_metrics": {
"artifactType": {
"schemaTitle": "system.Metrics",
"schemaVersion": "0.0.1"
}
}
}
}
},
"comp-condition-no-distill-2": {
"dag": {
"outputs": {
"artifacts": {
"model-evaluation-2-evaluation_metrics": {
"artifactSelectors": [
{
"outputArtifactKey": "model-evaluation-2-evaluation_metrics",
"producerSubtask": "condition-is-evaluation-3"
}
]
},
"model-evaluation-evaluation_metrics": {
"artifactSelectors": [
{
"outputArtifactKey": "model-evaluation-evaluation_metrics",
"producerSubtask": "condition-is-evaluation-3"
}
]
}
}
},
"tasks": {
"condition-is-evaluation-3": {
"componentRef": {
"name": "comp-condition-is-evaluation-3"
},
"dependentTasks": [
"model-upload"
],
"inputs": {
"artifacts": {
"pipelineparam--automl-tabular-ensemble-explanation_metadata_artifact": {
"componentInputArtifact": "pipelineparam--automl-tabular-ensemble-explanation_metadata_artifact"
},
"pipelineparam--automl-tabular-ensemble-unmanaged_container_model": {
"componentInputArtifact": "pipelineparam--automl-tabular-ensemble-unmanaged_container_model"
},
"pipelineparam--model-upload-model": {
"taskOutputArtifact": {
"outputArtifactKey": "model",
"producerTask": "model-upload"
}
}
},
"parameters": {
"pipelineparam--automl-tabular-ensemble-explanation_parameters": {
"componentInputParameter": "pipelineparam--automl-tabular-ensemble-explanation_parameters"
},
"pipelineparam--bool-identity-2-Output": {
"componentInputParameter": "pipelineparam--bool-identity-2-Output"
},
"pipelineparam--bool-identity-Output": {
"componentInputParameter": "pipelineparam--bool-identity-Output"
},
"pipelineparam--dataflow_service_account": {
"componentInputParameter": "pipelineparam--dataflow_service_account"
},
"pipelineparam--dataflow_subnetwork": {
"componentInputParameter": "pipelineparam--dataflow_subnetwork"
},
"pipelineparam--dataflow_use_public_ips": {
"componentInputParameter": "pipelineparam--dataflow_use_public_ips"
},
"pipelineparam--encryption_spec_key_name": {
"componentInputParameter": "pipelineparam--encryption_spec_key_name"
},
"pipelineparam--evaluation_batch_predict_machine_type": {
"componentInputParameter": "pipelineparam--evaluation_batch_predict_machine_type"
},
"pipelineparam--evaluation_batch_predict_max_replica_count": {
"componentInputParameter": "pipelineparam--evaluation_batch_predict_max_replica_count"
},
"pipelineparam--evaluation_batch_predict_starting_replica_count": {
"componentInputParameter": "pipelineparam--evaluation_batch_predict_starting_replica_count"
},
"pipelineparam--evaluation_dataflow_disk_size_gb": {
"componentInputParameter": "pipelineparam--evaluation_dataflow_disk_size_gb"
},
"pipelineparam--evaluation_dataflow_machine_type": {
"componentInputParameter": "pipelineparam--evaluation_dataflow_machine_type"
},
"pipelineparam--evaluation_dataflow_max_num_workers": {
"componentInputParameter": "pipelineparam--evaluation_dataflow_max_num_workers"
},
"pipelineparam--location": {
"componentInputParameter": "pipelineparam--location"
},
"pipelineparam--prediction_type": {
"componentInputParameter": "pipelineparam--prediction_type"
},
"pipelineparam--project": {
"componentInputParameter": "pipelineparam--project"
},
"pipelineparam--root_dir": {
"componentInputParameter": "pipelineparam--root_dir"
},
"pipelineparam--tabular-stats-and-example-gen-downsampled_test_split_json": {
"componentInputParameter": "pipelineparam--tabular-stats-and-example-gen-downsampled_test_split_json"
},
"pipelineparam--tabular-stats-and-example-gen-test_split_json": {
"componentInputParameter": "pipelineparam--tabular-stats-and-example-gen-test_split_json"
},
"pipelineparam--target_column_name": {
"componentInputParameter": "pipelineparam--target_column_name"
}
}
},
"taskInfo": {
"name": "condition-is-evaluation-3"
},
"triggerPolicy": {
"condition": "inputs.parameters['pipelineparam--bool-identity-Output'].string_value == 'true'"
}
},
"model-upload": {
"cachingOptions": {
"enableCache": true
},
"componentRef": {
"name": "comp-model-upload"
},
"inputs": {
"artifacts": {
"explanation_metadata_artifact": {
"componentInputArtifact": "pipelineparam--automl-tabular-ensemble-explanation_metadata_artifact"
},
"unmanaged_container_model": {
"componentInputArtifact": "pipelineparam--automl-tabular-ensemble-unmanaged_container_model"
}
},
"parameters": {
"description": {
"runtimeValue": {
"constantValue": {
"stringValue": ""
}
}
},
"display_name": {
"runtimeValue": {
"constantValue": {
"stringValue": "automl-tabular-model-upload-{{$.pipeline_job_uuid}}-{{$.pipeline_task_uuid}}"
}
}
},
"encryption_spec_key_name": {
"componentInputParameter": "pipelineparam--encryption_spec_key_name"
},
"explanation_metadata": {
"runtimeValue": {
"constantValue": {
"stringValue": "{}"
}
}
},
"explanation_parameters": {
"componentInputParameter": "pipelineparam--automl-tabular-ensemble-explanation_parameters"
},
"labels": {
"runtimeValue": {
"constantValue": {
"stringValue": "{}"
}
}
},
"location": {
"componentInputParameter": "pipelineparam--location"
},
"project": {
"componentInputParameter": "pipelineparam--project"
}
}
},
"taskInfo": {
"name": "model-upload"
}
}
}
},
"inputDefinitions": {
"artifacts": {
"pipelineparam--automl-tabular-ensemble-explanation_metadata_artifact": {
"artifactType": {
"schemaTitle": "system.Artifact",
"schemaVersion": "0.0.1"
}
},
"pipelineparam--automl-tabular-ensemble-unmanaged_container_model": {
"artifactType": {
"schemaTitle": "google.UnmanagedContainerModel",
"schemaVersion": "0.0.1"
}
}
},
"parameters": {
"pipelineparam--automl-tabular-ensemble-explanation_parameters": {
"type": "STRING"
},
"pipelineparam--bool-identity-2-Output": {
"type": "STRING"
},
"pipelineparam--bool-identity-Output": {
"type": "STRING"
},
"pipelineparam--dataflow_service_account": {
"type": "STRING"
},
"pipelineparam--dataflow_subnetwork": {
"type": "STRING"
},
"pipelineparam--dataflow_use_public_ips": {
"type": "STRING"
},
"pipelineparam--encryption_spec_key_name": {
"type": "STRING"
},
"pipelineparam--evaluation_batch_predict_machine_type": {
"type": "STRING"
},
"pipelineparam--evaluation_batch_predict_max_replica_count": {
"type": "INT"
},
"pipelineparam--evaluation_batch_predict_starting_replica_count": {
"type": "INT"
},
"pipelineparam--evaluation_dataflow_disk_size_gb": {
"type": "INT"
},
"pipelineparam--evaluation_dataflow_machine_type": {
"type": "STRING"
},
"pipelineparam--evaluation_dataflow_max_num_workers": {
"type": "INT"
},
"pipelineparam--location": {
"type": "STRING"
},
"pipelineparam--prediction_type": {
"type": "STRING"
},
"pipelineparam--project": {
"type": "STRING"
},
"pipelineparam--root_dir": {
"type": "STRING"
},
"pipelineparam--tabular-stats-and-example-gen-downsampled_test_split_json": {
"type": "STRING"
},
"pipelineparam--tabular-stats-and-example-gen-test_split_json": {
"type": "STRING"
},
"pipelineparam--target_column_name": {
"type": "STRING"
}
}
},
"outputDefinitions": {
"artifacts": {
"model-evaluation-2-evaluation_metrics": {
"artifactType": {
"schemaTitle": "system.Metrics",
"schemaVersion": "0.0.1"
}
},
"model-evaluation-evaluation_metrics": {
"artifactType": {
"schemaTitle": "system.Metrics",
"schemaVersion": "0.0.1"
}
}
}
}
},
"comp-exit-handler-1": {
"dag": {
"outputs": {
"artifacts": {
"model-evaluation-2-evaluation_metrics": {
"artifactSelectors": [
{
"outputArtifactKey": "model-evaluation-2-evaluation_metrics",
"producerSubtask": "condition-no-distill-2"
}
]
},
"model-evaluation-3-evaluation_metrics": {
"artifactSelectors": [
{
"outputArtifactKey": "model-evaluation-3-evaluation_metrics",
"producerSubtask": "condition-is-distill-4"
}
]
},
"model-evaluation-4-evaluation_metrics": {
"artifactSelectors": [
{
"outputArtifactKey": "model-evaluation-4-evaluation_metrics",
"producerSubtask": "condition-is-distill-4"
}
]
},
"model-evaluation-evaluation_metrics": {
"artifactSelectors": [
{
"outputArtifactKey": "model-evaluation-evaluation_metrics",
"producerSubtask": "condition-no-distill-2"
}
]
}
}
},
"tasks": {
"automl-tabular-cv-trainer": {
"cachingOptions": {
"enableCache": true
},
"componentRef": {
"name": "comp-automl-tabular-cv-trainer"
},
"dependentTasks": [
"automl-tabular-stage-1-tuner",
"automl-tabular-transform",
"merge-materialized-splits",
"tabular-stats-and-example-gen"
],
"inputs": {
"artifacts": {
"materialized_cv_splits": {
"taskOutputArtifact": {
"outputArtifactKey": "splits",
"producerTask": "merge-materialized-splits"
}
},
"metadata": {
"taskOutputArtifact": {
"outputArtifactKey": "metadata",
"producerTask": "tabular-stats-and-example-gen"
}
},
"transform_output": {
"taskOutputArtifact": {
"outputArtifactKey": "transform_output",
"producerTask": "automl-tabular-transform"
}
},
"tuning_result_input": {
"taskOutputArtifact": {
"outputArtifactKey": "tuning_result_output",
"producerTask": "automl-tabular-stage-1-tuner"
}
}
},
"parameters": {
"deadline_hours": {
"componentInputParameter": "pipelineparam--stage_2_deadline_hours"
},
"encryption_spec_key_name": {
"componentInputParameter": "pipelineparam--encryption_spec_key_name"
},
"location": {
"componentInputParameter": "pipelineparam--location"
},
"num_parallel_trials": {
"componentInputParameter": "pipelineparam--stage_2_num_parallel_trials"
},
"num_selected_trials": {
"componentInputParameter": "pipelineparam--stage_2_num_selected_trials"
},
"project": {
"componentInputParameter": "pipelineparam--project"
},
"root_dir": {
"componentInputParameter": "pipelineparam--root_dir"
},
"single_run_max_secs": {
"componentInputParameter": "pipelineparam--stage_2_single_run_max_secs"
},
"worker_pool_specs_override": {
"componentInputParameter": "pipelineparam--cv_trainer_worker_pool_specs_override"
},
"worker_pool_specs_override_json": {
"runtimeValue": {
"constantValue": {
"stringValue": "[]"
}
}
}
}
},
"taskInfo": {
"name": "automl-tabular-cv-trainer"
}
},
"automl-tabular-ensemble": {
"cachingOptions": {
"enableCache": true
},
"componentRef": {
"name": "comp-automl-tabular-ensemble"
},
"dependentTasks": [
"automl-tabular-cv-trainer",
"automl-tabular-transform",
"tabular-stats-and-example-gen"
],
"inputs": {
"artifacts": {
"dataset_schema": {
"taskOutputArtifact": {
"outputArtifactKey": "dataset_schema",
"producerTask": "tabular-stats-and-example-gen"
}
},
"instance_baseline": {
"taskOutputArtifact": {
"outputArtifactKey": "instance_baseline",
"producerTask": "tabular-stats-and-example-gen"
}
},
"metadata": {
"taskOutputArtifact": {
"outputArtifactKey": "metadata",
"producerTask": "tabular-stats-and-example-gen"
}
},
"transform_output": {
"taskOutputArtifact": {
"outputArtifactKey": "transform_output",
"producerTask": "automl-tabular-transform"
}
},
"tuning_result_input": {
"taskOutputArtifact": {
"outputArtifactKey": "tuning_result_output",
"producerTask": "automl-tabular-cv-trainer"
}
},
"warmup_data": {
"taskOutputArtifact": {
"outputArtifactKey": "eval_split",
"producerTask": "tabular-stats-and-example-gen"
}
}
},
"parameters": {
"encryption_spec_key_name": {
"componentInputParameter": "pipelineparam--encryption_spec_key_name"
},
"export_additional_model_without_custom_ops": {
"componentInputParameter": "pipelineparam--export_additional_model_without_custom_ops"
},
"location": {
"componentInputParameter": "pipelineparam--location"
},
"project": {
"componentInputParameter": "pipelineparam--project"
},
"root_dir": {
"componentInputParameter": "pipelineparam--root_dir"
}
}
},
"taskInfo": {
"name": "automl-tabular-ensemble"
}
},
"automl-tabular-infra-validator": {
"cachingOptions": {
"enableCache": true
},
"componentRef": {
"name": "comp-automl-tabular-infra-validator"
},
"dependentTasks": [
"automl-tabular-ensemble"
],
"inputs": {
"artifacts": {
"unmanaged_container_model": {
"taskOutputArtifact": {
"outputArtifactKey": "unmanaged_container_model",
"producerTask": "automl-tabular-ensemble"
}
}
}
},
"taskInfo": {
"name": "automl-tabular-infra-validator"
}
},
"automl-tabular-stage-1-tuner": {
"cachingOptions": {
"enableCache": true
},
"componentRef": {
"name": "comp-automl-tabular-stage-1-tuner"
},
"dependentTasks": [
"automl-tabular-transform",
"tabular-stats-and-example-gen"
],
"inputs": {
"artifacts": {
"materialized_eval_split": {
"taskOutputArtifact": {
"outputArtifactKey": "materialized_eval_split",
"producerTask": "automl-tabular-transform"
}
},
"materialized_train_split": {
"taskOutputArtifact": {
"outputArtifactKey": "materialized_train_split",
"producerTask": "automl-tabular-transform"
}
},
"metadata": {
"taskOutputArtifact": {
"outputArtifactKey": "metadata",
"producerTask": "tabular-stats-and-example-gen"
}
},
"transform_output": {
"taskOutputArtifact": {
"outputArtifactKey": "transform_output",
"producerTask": "automl-tabular-transform"
}
}
},
"parameters": {
"deadline_hours": {
"componentInputParameter": "pipelineparam--stage_1_deadline_hours"
},
"disable_early_stopping": {
"componentInputParameter": "pipelineparam--disable_early_stopping"
},
"encryption_spec_key_name": {
"componentInputParameter": "pipelineparam--encryption_spec_key_name"
},
"location": {
"componentInputParameter": "pipelineparam--location"
},
"num_parallel_trials": {
"componentInputParameter": "pipelineparam--stage_1_num_parallel_trials"
},
"num_selected_trials": {
"componentInputParameter": "pipelineparam--stage_1_num_selected_trials"
},
"project": {
"componentInputParameter": "pipelineparam--project"
},
"reduce_search_space_mode": {
"componentInputParameter": "pipelineparam--reduce_search_space_mode"
},
"root_dir": {
"componentInputParameter": "pipelineparam--root_dir"
},
"run_distillation": {
"runtimeValue": {
"constantValue": {
"stringValue": "false"
}
}
},
"single_run_max_secs": {
"componentInputParameter": "pipelineparam--stage_1_single_run_max_secs"
},
"study_spec_override": {
"componentInputParameter": "pipelineparam--study_spec_override"
},
"study_spec_parameters_override": {
"runtimeValue": {
"constantValue": {
"stringValue": "[]"
}
}
},
"study_spec_parameters_override_json": {
"runtimeValue": {
"constantValue": {
"stringValue": "{}"
}
}
},
"tune_feature_selection_rate": {
"runtimeValue": {
"constantValue": {
"stringValue": "false"
}
}
},
"worker_pool_specs_override": {
"componentInputParameter": "pipelineparam--stage_1_tuner_worker_pool_specs_override"
},
"worker_pool_specs_override_json": {
"runtimeValue": {
"constantValue": {
"stringValue": "[]"
}
}
}
}
},
"taskInfo": {
"name": "automl-tabular-stage-1-tuner"
}
},
"automl-tabular-transform": {
"cachingOptions": {
"enableCache": true
},
"componentRef": {
"name": "comp-automl-tabular-transform"
},
"dependentTasks": [
"tabular-stats-and-example-gen"
],
"inputs": {
"artifacts": {
"dataset_schema": {
"taskOutputArtifact": {
"outputArtifactKey": "dataset_schema",
"producerTask": "tabular-stats-and-example-gen"
}
},
"eval_split": {
"taskOutputArtifact": {
"outputArtifactKey": "eval_split",
"producerTask": "tabular-stats-and-example-gen"
}
},
"metadata": {
"taskOutputArtifact": {
"outputArtifactKey": "metadata",
"producerTask": "tabular-stats-and-example-gen"
}
},
"test_split": {
"taskOutputArtifact": {
"outputArtifactKey": "test_split",
"producerTask": "tabular-stats-and-example-gen"
}
},
"train_split": {
"taskOutputArtifact": {
"outputArtifactKey": "train_split",
"producerTask": "tabular-stats-and-example-gen"
}
}
},
"parameters": {
"dataflow_disk_size_gb": {
"componentInputParameter": "pipelineparam--transform_dataflow_disk_size_gb"
},
"dataflow_machine_type": {
"componentInputParameter": "pipelineparam--transform_dataflow_machine_type"
},
"dataflow_max_num_workers": {
"componentInputParameter": "pipelineparam--transform_dataflow_max_num_workers"
},
"dataflow_service_account": {
"runtimeValue": {
"constantValue": {
"stringValue": ""
}
}
},
"dataflow_subnetwork": {
"componentInputParameter": "pipelineparam--dataflow_subnetwork"
},
"dataflow_use_public_ips": {
"componentInputParameter": "pipelineparam--dataflow_use_public_ips"
},
"encryption_spec_key_name": {
"componentInputParameter": "pipelineparam--encryption_spec_key_name"
},
"location": {
"componentInputParameter": "pipelineparam--location"
},
"project": {
"componentInputParameter": "pipelineparam--project"
},
"root_dir": {
"componentInputParameter": "pipelineparam--root_dir"
}
}
},
"taskInfo": {
"name": "automl-tabular-transform"
}
},
"bool-identity": {
"cachingOptions": {
"enableCache": true
},
"componentRef": {
"name": "comp-bool-identity"
},
"inputs": {
"parameters": {
"value": {
"componentInputParameter": "pipelineparam--run_evaluation"
}
}
},
"taskInfo": {
"name": "bool-identity"
}
},
"bool-identity-2": {
"cachingOptions": {
"enableCache": true
},
"componentRef": {
"name": "comp-bool-identity-2"
},
"inputs": {
"parameters": {
"value": {
"componentInputParameter": "pipelineparam--run_distillation"
}
}
},
"taskInfo": {
"name": "bool-identity-2"
}
},
"condition-is-distill-4": {
"componentRef": {
"name": "comp-condition-is-distill-4"
},
"dependentTasks": [
"automl-tabular-ensemble",
"automl-tabular-infra-validator",
"bool-identity",
"bool-identity-2",
"tabular-stats-and-example-gen"
],
"inputs": {
"artifacts": {
"pipelineparam--automl-tabular-ensemble-explanation_metadata_artifact": {
"taskOutputArtifact": {
"outputArtifactKey": "explanation_metadata_artifact",
"producerTask": "automl-tabular-ensemble"
}
},
"pipelineparam--automl-tabular-ensemble-unmanaged_container_model": {
"taskOutputArtifact": {
"outputArtifactKey": "unmanaged_container_model",
"producerTask": "automl-tabular-ensemble"
}
},
"pipelineparam--tabular-stats-and-example-gen-dataset_schema": {
"taskOutputArtifact": {
"outputArtifactKey": "dataset_schema",
"producerTask": "tabular-stats-and-example-gen"
}
},
"pipelineparam--tabular-stats-and-example-gen-eval_split": {
"taskOutputArtifact": {
"outputArtifactKey": "eval_split",
"producerTask": "tabular-stats-and-example-gen"
}
},
"pipelineparam--tabular-stats-and-example-gen-instance_baseline": {
"taskOutputArtifact": {
"outputArtifactKey": "instance_baseline",
"producerTask": "tabular-stats-and-example-gen"
}
},
"pipelineparam--tabular-stats-and-example-gen-metadata": {
"taskOutputArtifact": {
"outputArtifactKey": "metadata",
"producerTask": "tabular-stats-and-example-gen"
}
},
"pipelineparam--tabular-stats-and-example-gen-test_split": {
"taskOutputArtifact": {
"outputArtifactKey": "test_split",
"producerTask": "tabular-stats-and-example-gen"
}
},
"pipelineparam--tabular-stats-and-example-gen-train_split": {
"taskOutputArtifact": {
"outputArtifactKey": "train_split",
"producerTask": "tabular-stats-and-example-gen"
}
}
},
"parameters": {
"pipelineparam--automl-tabular-ensemble-explanation_parameters": {
"taskOutputParameter": {
"outputParameterKey": "explanation_parameters",
"producerTask": "automl-tabular-ensemble"
}
},
"pipelineparam--bool-identity-2-Output": {
"taskOutputParameter": {
"outputParameterKey": "Output",
"producerTask": "bool-identity-2"
}
},
"pipelineparam--bool-identity-Output": {
"taskOutputParameter": {
"outputParameterKey": "Output",
"producerTask": "bool-identity"
}
},
"pipelineparam--dataflow_service_account": {
"componentInputParameter": "pipelineparam--dataflow_service_account"
},
"pipelineparam--dataflow_subnetwork": {
"componentInputParameter": "pipelineparam--dataflow_subnetwork"
},
"pipelineparam--dataflow_use_public_ips": {
"componentInputParameter": "pipelineparam--dataflow_use_public_ips"
},
"pipelineparam--disable_early_stopping": {
"componentInputParameter": "pipelineparam--disable_early_stopping"
},
"pipelineparam--distill_batch_predict_machine_type": {
"componentInputParameter": "pipelineparam--distill_batch_predict_machine_type"
},
"pipelineparam--distill_batch_predict_max_replica_count": {
"componentInputParameter": "pipelineparam--distill_batch_predict_max_replica_count"
},
"pipelineparam--distill_batch_predict_starting_replica_count": {
"componentInputParameter": "pipelineparam--distill_batch_predict_starting_replica_count"
},
"pipelineparam--distill_stage_1_deadline_hours": {
"componentInputParameter": "pipelineparam--distill_stage_1_deadline_hours"
},
"pipelineparam--encryption_spec_key_name": {
"componentInputParameter": "pipelineparam--encryption_spec_key_name"
},
"pipelineparam--evaluation_batch_predict_machine_type": {
"componentInputParameter": "pipelineparam--evaluation_batch_predict_machine_type"
},
"pipelineparam--evaluation_batch_predict_max_replica_count": {
"componentInputParameter": "pipelineparam--evaluation_batch_predict_max_replica_count"
},
"pipelineparam--evaluation_batch_predict_starting_replica_count": {
"componentInputParameter": "pipelineparam--evaluation_batch_predict_starting_replica_count"
},
"pipelineparam--evaluation_dataflow_disk_size_gb": {
"componentInputParameter": "pipelineparam--evaluation_dataflow_disk_size_gb"
},
"pipelineparam--evaluation_dataflow_machine_type": {
"componentInputParameter": "pipelineparam--evaluation_dataflow_machine_type"
},
"pipelineparam--evaluation_dataflow_max_num_workers": {
"componentInputParameter": "pipelineparam--evaluation_dataflow_max_num_workers"
},
"pipelineparam--export_additional_model_without_custom_ops": {
"componentInputParameter": "pipelineparam--export_additional_model_without_custom_ops"
},
"pipelineparam--location": {
"componentInputParameter": "pipelineparam--location"
},
"pipelineparam--prediction_type": {
"componentInputParameter": "pipelineparam--prediction_type"
},
"pipelineparam--project": {
"componentInputParameter": "pipelineparam--project"
},
"pipelineparam--reduce_search_space_mode": {
"componentInputParameter": "pipelineparam--reduce_search_space_mode"
},
"pipelineparam--root_dir": {
"componentInputParameter": "pipelineparam--root_dir"
},
"pipelineparam--stage_1_num_parallel_trials": {
"componentInputParameter": "pipelineparam--stage_1_num_parallel_trials"
},
"pipelineparam--stage_1_single_run_max_secs": {
"componentInputParameter": "pipelineparam--stage_1_single_run_max_secs"
},
"pipelineparam--stage_1_tuner_worker_pool_specs_override": {
"componentInputParameter": "pipelineparam--stage_1_tuner_worker_pool_specs_override"
},
"pipelineparam--tabular-stats-and-example-gen-downsampled_test_split_json": {
"taskOutputParameter": {
"outputParameterKey": "downsampled_test_split_json",
"producerTask": "tabular-stats-and-example-gen"
}
},
"pipelineparam--tabular-stats-and-example-gen-test_split_json": {
"taskOutputParameter": {
"outputParameterKey": "test_split_json",
"producerTask": "tabular-stats-and-example-gen"
}
},
"pipelineparam--target_column_name": {
"componentInputParameter": "pipelineparam--target_column_name"
},
"pipelineparam--transform_dataflow_disk_size_gb": {
"componentInputParameter": "pipelineparam--transform_dataflow_disk_size_gb"
},
"pipelineparam--transform_dataflow_machine_type": {
"componentInputParameter": "pipelineparam--transform_dataflow_machine_type"
},
"pipelineparam--transform_dataflow_max_num_workers": {
"componentInputParameter": "pipelineparam--transform_dataflow_max_num_workers"
}
}
},
"taskInfo": {
"name": "condition-is-distill-4"
},
"triggerPolicy": {
"condition": "inputs.parameters['pipelineparam--bool-identity-2-Output'].string_value == 'true'"
}
},
"condition-no-distill-2": {
"componentRef": {
"name": "comp-condition-no-distill-2"
},
"dependentTasks": [
"automl-tabular-ensemble",
"automl-tabular-infra-validator",
"bool-identity",
"bool-identity-2",
"tabular-stats-and-example-gen"
],
"inputs": {
"artifacts": {
"pipelineparam--automl-tabular-ensemble-explanation_metadata_artifact": {
"taskOutputArtifact": {
"outputArtifactKey": "explanation_metadata_artifact",
"producerTask": "automl-tabular-ensemble"
}
},
"pipelineparam--automl-tabular-ensemble-unmanaged_container_model": {
"taskOutputArtifact": {
"outputArtifactKey": "unmanaged_container_model",
"producerTask": "automl-tabular-ensemble"
}
}
},
"parameters": {
"pipelineparam--automl-tabular-ensemble-explanation_parameters": {
"taskOutputParameter": {
"outputParameterKey": "explanation_parameters",
"producerTask": "automl-tabular-ensemble"
}
},
"pipelineparam--bool-identity-2-Output": {
"taskOutputParameter": {
"outputParameterKey": "Output",
"producerTask": "bool-identity-2"
}
},
"pipelineparam--bool-identity-Output": {
"taskOutputParameter": {
"outputParameterKey": "Output",
"producerTask": "bool-identity"
}
},
"pipelineparam--dataflow_service_account": {
"componentInputParameter": "pipelineparam--dataflow_service_account"
},
"pipelineparam--dataflow_subnetwork": {
"componentInputParameter": "pipelineparam--dataflow_subnetwork"
},
"pipelineparam--dataflow_use_public_ips": {
"componentInputParameter": "pipelineparam--dataflow_use_public_ips"
},
"pipelineparam--encryption_spec_key_name": {
"componentInputParameter": "pipelineparam--encryption_spec_key_name"
},
"pipelineparam--evaluation_batch_predict_machine_type": {
"componentInputParameter": "pipelineparam--evaluation_batch_predict_machine_type"
},
"pipelineparam--evaluation_batch_predict_max_replica_count": {
"componentInputParameter": "pipelineparam--evaluation_batch_predict_max_replica_count"
},
"pipelineparam--evaluation_batch_predict_starting_replica_count": {
"componentInputParameter": "pipelineparam--evaluation_batch_predict_starting_replica_count"
},
"pipelineparam--evaluation_dataflow_disk_size_gb": {
"componentInputParameter": "pipelineparam--evaluation_dataflow_disk_size_gb"
},
"pipelineparam--evaluation_dataflow_machine_type": {
"componentInputParameter": "pipelineparam--evaluation_dataflow_machine_type"
},
"pipelineparam--evaluation_dataflow_max_num_workers": {
"componentInputParameter": "pipelineparam--evaluation_dataflow_max_num_workers"
},
"pipelineparam--location": {
"componentInputParameter": "pipelineparam--location"
},
"pipelineparam--prediction_type": {
"componentInputParameter": "pipelineparam--prediction_type"
},
"pipelineparam--project": {
"componentInputParameter": "pipelineparam--project"
},
"pipelineparam--root_dir": {
"componentInputParameter": "pipelineparam--root_dir"
},
"pipelineparam--tabular-stats-and-example-gen-downsampled_test_split_json": {
"taskOutputParameter": {
"outputParameterKey": "downsampled_test_split_json",
"producerTask": "tabular-stats-and-example-gen"
}
},
"pipelineparam--tabular-stats-and-example-gen-test_split_json": {
"taskOutputParameter": {
"outputParameterKey": "test_split_json",
"producerTask": "tabular-stats-and-example-gen"
}
},
"pipelineparam--target_column_name": {
"componentInputParameter": "pipelineparam--target_column_name"
}
}
},
"taskInfo": {
"name": "condition-no-distill-2"
},
"triggerPolicy": {
"condition": "inputs.parameters['pipelineparam--bool-identity-2-Output'].string_value == 'false'"
}
},
"merge-materialized-splits": {
"cachingOptions": {
"enableCache": true
},
"componentRef": {
"name": "comp-merge-materialized-splits"
},
"dependentTasks": [
"automl-tabular-transform"
],
"inputs": {
"artifacts": {
"split_0": {
"taskOutputArtifact": {
"outputArtifactKey": "materialized_train_split",
"producerTask": "automl-tabular-transform"
}
},
"split_1": {
"taskOutputArtifact": {
"outputArtifactKey": "materialized_eval_split",
"producerTask": "automl-tabular-transform"
}
}
}
},
"taskInfo": {
"name": "merge-materialized-splits"
}
},
"tabular-stats-and-example-gen": {
"cachingOptions": {
"enableCache": true
},
"componentRef": {
"name": "comp-tabular-stats-and-example-gen"
},
"inputs": {
"parameters": {
"additional_experiments": {
"componentInputParameter": "pipelineparam--additional_experiments"
},
"additional_experiments_json": {
"runtimeValue": {
"constantValue": {
"stringValue": "{}"
}
}
},
"data_source": {
"componentInputParameter": "pipelineparam--data_source"
},
"data_source_bigquery_table_path": {
"runtimeValue": {
"constantValue": {
"stringValue": ""
}
}
},
"data_source_csv_filenames": {
"runtimeValue": {
"constantValue": {
"stringValue": ""
}
}
},
"dataflow_disk_size_gb": {
"componentInputParameter": "pipelineparam--stats_and_example_gen_dataflow_disk_size_gb"
},
"dataflow_machine_type": {
"componentInputParameter": "pipelineparam--stats_and_example_gen_dataflow_machine_type"
},
"dataflow_max_num_workers": {
"componentInputParameter": "pipelineparam--stats_and_example_gen_dataflow_max_num_workers"
},
"dataflow_service_account": {
"runtimeValue": {
"constantValue": {
"stringValue": ""
}
}
},
"dataflow_subnetwork": {
"componentInputParameter": "pipelineparam--dataflow_subnetwork"
},
"dataflow_use_public_ips": {
"componentInputParameter": "pipelineparam--dataflow_use_public_ips"
},
"encryption_spec_key_name": {
"componentInputParameter": "pipelineparam--encryption_spec_key_name"
},
"location": {
"componentInputParameter": "pipelineparam--location"
},
"optimization_objective": {
"componentInputParameter": "pipelineparam--optimization_objective"
},
"optimization_objective_precision_value": {
"componentInputParameter": "pipelineparam--optimization_objective_precision_value"
},
"optimization_objective_recall_value": {
"componentInputParameter": "pipelineparam--optimization_objective_recall_value"
},
"predefined_split_key": {
"runtimeValue": {
"constantValue": {
"stringValue": ""
}
}
},
"prediction_type": {
"componentInputParameter": "pipelineparam--prediction_type"
},
"project": {
"componentInputParameter": "pipelineparam--project"
},
"request_type": {
"runtimeValue": {
"constantValue": {
"stringValue": "COLUMN_STATS_ONLY"
}
}
},
"root_dir": {
"componentInputParameter": "pipelineparam--root_dir"
},
"run_distillation": {
"componentInputParameter": "pipelineparam--run_distillation"
},
"split_spec": {
"componentInputParameter": "pipelineparam--split_spec"
},
"stratified_split_key": {
"runtimeValue": {
"constantValue": {
"stringValue": ""
}
}
},
"target_column_name": {
"componentInputParameter": "pipelineparam--target_column_name"
},
"test_fraction": {
"runtimeValue": {
"constantValue": {
"stringValue": "-1"
}
}
},
"timestamp_split_key": {
"runtimeValue": {
"constantValue": {
"stringValue": ""
}
}
},
"training_fraction": {
"runtimeValue": {
"constantValue": {
"stringValue": "-1"
}
}
},
"transformations": {
"componentInputParameter": "pipelineparam--transformations"
},
"transformations_path": {
"runtimeValue": {
"constantValue": {
"stringValue": ""
}
}
},
"validation_fraction": {
"runtimeValue": {
"constantValue": {
"stringValue": "-1"
}
}
},
"weight_column_name": {
"componentInputParameter": "pipelineparam--weight_column_name"
}
}
},
"taskInfo": {
"name": "tabular-stats-and-example-gen"
}
}
}
},
"inputDefinitions": {
"parameters": {
"pipelineparam--additional_experiments": {
"type": "STRING"
},
"pipelineparam--cv_trainer_worker_pool_specs_override": {
"type": "STRING"
},
"pipelineparam--data_source": {
"type": "STRING"
},
"pipelineparam--dataflow_service_account": {
"type": "STRING"
},
"pipelineparam--dataflow_subnetwork": {
"type": "STRING"
},
"pipelineparam--dataflow_use_public_ips": {
"type": "STRING"
},
"pipelineparam--disable_early_stopping": {
"type": "STRING"
},
"pipelineparam--distill_batch_predict_machine_type": {
"type": "STRING"
},
"pipelineparam--distill_batch_predict_max_replica_count": {
"type": "INT"
},
"pipelineparam--distill_batch_predict_starting_replica_count": {
"type": "INT"
},
"pipelineparam--distill_stage_1_deadline_hours": {
"type": "DOUBLE"
},
"pipelineparam--encryption_spec_key_name": {
"type": "STRING"
},
"pipelineparam--evaluation_batch_predict_machine_type": {
"type": "STRING"
},
"pipelineparam--evaluation_batch_predict_max_replica_count": {
"type": "INT"
},
"pipelineparam--evaluation_batch_predict_starting_replica_count": {
"type": "INT"
},
"pipelineparam--evaluation_dataflow_disk_size_gb": {
"type": "INT"
},
"pipelineparam--evaluation_dataflow_machine_type": {
"type": "STRING"
},
"pipelineparam--evaluation_dataflow_max_num_workers": {
"type": "INT"
},
"pipelineparam--export_additional_model_without_custom_ops": {
"type": "STRING"
},
"pipelineparam--location": {
"type": "STRING"
},
"pipelineparam--optimization_objective": {
"type": "STRING"
},
"pipelineparam--optimization_objective_precision_value": {
"type": "DOUBLE"
},
"pipelineparam--optimization_objective_recall_value": {
"type": "DOUBLE"
},
"pipelineparam--prediction_type": {
"type": "STRING"
},
"pipelineparam--project": {
"type": "STRING"
},
"pipelineparam--reduce_search_space_mode": {
"type": "STRING"
},
"pipelineparam--root_dir": {
"type": "STRING"
},
"pipelineparam--run_distillation": {
"type": "STRING"
},
"pipelineparam--run_evaluation": {
"type": "STRING"
},
"pipelineparam--split_spec": {
"type": "STRING"
},
"pipelineparam--stage_1_deadline_hours": {
"type": "DOUBLE"
},
"pipelineparam--stage_1_num_parallel_trials": {
"type": "INT"
},
"pipelineparam--stage_1_num_selected_trials": {
"type": "INT"
},
"pipelineparam--stage_1_single_run_max_secs": {
"type": "INT"
},
"pipelineparam--stage_1_tuner_worker_pool_specs_override": {
"type": "STRING"
},
"pipelineparam--stage_2_deadline_hours": {
"type": "DOUBLE"
},
"pipelineparam--stage_2_num_parallel_trials": {
"type": "INT"
},
"pipelineparam--stage_2_num_selected_trials": {
"type": "INT"
},
"pipelineparam--stage_2_single_run_max_secs": {
"type": "INT"
},
"pipelineparam--stats_and_example_gen_dataflow_disk_size_gb": {
"type": "INT"
},
"pipelineparam--stats_and_example_gen_dataflow_machine_type": {
"type": "STRING"
},
"pipelineparam--stats_and_example_gen_dataflow_max_num_workers": {
"type": "INT"
},
"pipelineparam--study_spec_override": {
"type": "STRING"
},
"pipelineparam--target_column_name": {
"type": "STRING"
},
"pipelineparam--transform_dataflow_disk_size_gb": {
"type": "INT"
},
"pipelineparam--transform_dataflow_machine_type": {
"type": "STRING"
},
"pipelineparam--transform_dataflow_max_num_workers": {
"type": "INT"
},
"pipelineparam--transformations": {
"type": "STRING"
},
"pipelineparam--weight_column_name": {
"type": "STRING"
}
}
},
"outputDefinitions": {
"artifacts": {
"model-evaluation-2-evaluation_metrics": {
"artifactType": {
"schemaTitle": "system.Metrics",
"schemaVersion": "0.0.1"
}
},
"model-evaluation-3-evaluation_metrics": {
"artifactType": {
"schemaTitle": "system.Metrics",
"schemaVersion": "0.0.1"
}
},
"model-evaluation-4-evaluation_metrics": {
"artifactType": {
"schemaTitle": "system.Metrics",
"schemaVersion": "0.0.1"
}
},
"model-evaluation-evaluation_metrics": {
"artifactType": {
"schemaTitle": "system.Metrics",
"schemaVersion": "0.0.1"
}
}
}
}
},
"comp-merge-materialized-splits": {
"executorLabel": "exec-merge-materialized-splits",
"inputDefinitions": {
"artifacts": {
"split_0": {
"artifactType": {
"schemaTitle": "system.Artifact",
"schemaVersion": "0.0.1"
}
},
"split_1": {
"artifactType": {
"schemaTitle": "system.Artifact",
"schemaVersion": "0.0.1"
}
}
}
},
"outputDefinitions": {
"artifacts": {
"splits": {
"artifactType": {
"schemaTitle": "system.Artifact",
"schemaVersion": "0.0.1"
}
}
}
}
},
"comp-model-batch-explanation": {
"executorLabel": "exec-model-batch-explanation",
"inputDefinitions": {
"artifacts": {
"explanation_metadata_artifact": {
"artifactType": {
"schemaTitle": "system.Artifact",
"schemaVersion": "0.0.1"
}
},
"unmanaged_container_model": {
"artifactType": {
"schemaTitle": "google.UnmanagedContainerModel",
"schemaVersion": "0.0.1"
}
}
},
"parameters": {
"accelerator_count": {
"type": "INT"
},
"accelerator_type": {
"type": "STRING"
},
"bigquery_destination_output_uri": {
"type": "STRING"
},
"bigquery_source_input_uri": {
"type": "STRING"
},
"encryption_spec_key_name": {
"type": "STRING"
},
"explanation_metadata": {
"type": "STRING"
},
"explanation_parameters": {
"type": "STRING"
},
"gcs_destination_output_uri_prefix": {
"type": "STRING"
},
"gcs_source_uris": {
"type": "STRING"
},
"generate_explanation": {
"type": "STRING"
},
"instances_format": {
"type": "STRING"
},
"job_display_name": {
"type": "STRING"
},
"labels": {
"type": "STRING"
},
"location": {
"type": "STRING"
},
"machine_type": {
"type": "STRING"
},
"manual_batch_tuning_parameters_batch_size": {
"type": "INT"
},
"max_replica_count": {
"type": "INT"
},
"model_parameters": {
"type": "STRING"
},
"predictions_format": {
"type": "STRING"
},
"project": {
"type": "STRING"
},
"starting_replica_count": {
"type": "INT"
}
}
},
"outputDefinitions": {
"artifacts": {
"batchpredictionjob": {
"artifactType": {
"schemaTitle": "google.VertexBatchPredictionJob",
"schemaVersion": "0.0.1"
}
},
"bigquery_output_table": {
"artifactType": {
"schemaTitle": "google.BQTable",
"schemaVersion": "0.0.1"
}
},
"gcs_output_directory": {
"artifactType": {
"schemaTitle": "system.Artifact",
"schemaVersion": "0.0.1"
}
}
},
"parameters": {
"gcp_resources": {
"type": "STRING"
}
}
}
},
"comp-model-batch-explanation-2": {
"executorLabel": "exec-model-batch-explanation-2",
"inputDefinitions": {
"artifacts": {
"explanation_metadata_artifact": {
"artifactType": {
"schemaTitle": "system.Artifact",
"schemaVersion": "0.0.1"
}
},
"unmanaged_container_model": {
"artifactType": {
"schemaTitle": "google.UnmanagedContainerModel",
"schemaVersion": "0.0.1"
}
}
},
"parameters": {
"accelerator_count": {
"type": "INT"
},
"accelerator_type": {
"type": "STRING"
},
"bigquery_destination_output_uri": {
"type": "STRING"
},
"bigquery_source_input_uri": {
"type": "STRING"
},
"encryption_spec_key_name": {
"type": "STRING"
},
"explanation_metadata": {
"type": "STRING"
},
"explanation_parameters": {
"type": "STRING"
},
"gcs_destination_output_uri_prefix": {
"type": "STRING"
},
"gcs_source_uris": {
"type": "STRING"
},
"generate_explanation": {
"type": "STRING"
},
"instances_format": {
"type": "STRING"
},
"job_display_name": {
"type": "STRING"
},
"labels": {
"type": "STRING"
},
"location": {
"type": "STRING"
},
"machine_type": {
"type": "STRING"
},
"manual_batch_tuning_parameters_batch_size": {
"type": "INT"
},
"max_replica_count": {
"type": "INT"
},
"model_parameters": {
"type": "STRING"
},
"predictions_format": {
"type": "STRING"
},
"project": {
"type": "STRING"
},
"starting_replica_count": {
"type": "INT"
}
}
},
"outputDefinitions": {
"artifacts": {
"batchpredictionjob": {
"artifactType": {
"schemaTitle": "google.VertexBatchPredictionJob",
"schemaVersion": "0.0.1"
}
},
"bigquery_output_table": {
"artifactType": {
"schemaTitle": "google.BQTable",
"schemaVersion": "0.0.1"
}
},
"gcs_output_directory": {
"artifactType": {
"schemaTitle": "system.Artifact",
"schemaVersion": "0.0.1"
}
}
},
"parameters": {
"gcp_resources": {
"type": "STRING"
}
}
}
},
"comp-model-batch-predict": {
"executorLabel": "exec-model-batch-predict",
"inputDefinitions": {
"artifacts": {
"unmanaged_container_model": {
"artifactType": {
"schemaTitle": "google.UnmanagedContainerModel",
"schemaVersion": "0.0.1"
}
}
},
"parameters": {
"accelerator_count": {
"type": "INT"
},
"accelerator_type": {
"type": "STRING"
},
"bigquery_destination_output_uri": {
"type": "STRING"
},
"bigquery_source_input_uri": {
"type": "STRING"
},
"encryption_spec_key_name": {
"type": "STRING"
},
"explanation_metadata": {
"type": "STRING"
},
"explanation_parameters": {
"type": "STRING"
},
"gcs_destination_output_uri_prefix": {
"type": "STRING"
},
"gcs_source_uris": {
"type": "STRING"
},
"generate_explanation": {
"type": "STRING"
},
"instances_format": {
"type": "STRING"
},
"job_display_name": {
"type": "STRING"
},
"labels": {
"type": "STRING"
},
"location": {
"type": "STRING"
},
"machine_type": {
"type": "STRING"
},
"manual_batch_tuning_parameters_batch_size": {
"type": "INT"
},
"max_replica_count": {
"type": "INT"
},
"model_parameters": {
"type": "STRING"
},
"predictions_format": {
"type": "STRING"
},
"project": {
"type": "STRING"
},
"starting_replica_count": {
"type": "INT"
}
}
},
"outputDefinitions": {
"artifacts": {
"batchpredictionjob": {
"artifactType": {
"schemaTitle": "google.VertexBatchPredictionJob",
"schemaVersion": "0.0.1"
}
},
"bigquery_output_table": {
"artifactType": {
"schemaTitle": "google.BQTable",
"schemaVersion": "0.0.1"
}
},
"gcs_output_directory": {
"artifactType": {
"schemaTitle": "system.Artifact",
"schemaVersion": "0.0.1"
}
}
},
"parameters": {
"gcp_resources": {
"type": "STRING"
}
}
}
},
"comp-model-batch-predict-2": {
"executorLabel": "exec-model-batch-predict-2",
"inputDefinitions": {
"artifacts": {
"model": {
"artifactType": {
"schemaTitle": "google.VertexModel",
"schemaVersion": "0.0.1"
}
}
},
"parameters": {
"accelerator_count": {
"type": "INT"
},
"accelerator_type": {
"type": "STRING"
},
"bigquery_destination_output_uri": {
"type": "STRING"
},
"bigquery_source_input_uri": {
"type": "STRING"
},
"encryption_spec_key_name": {
"type": "STRING"
},
"explanation_metadata": {
"type": "STRING"
},
"explanation_parameters": {
"type": "STRING"
},
"gcs_destination_output_uri_prefix": {
"type": "STRING"
},
"gcs_source_uris": {
"type": "STRING"
},
"generate_explanation": {
"type": "STRING"
},
"instances_format": {
"type": "STRING"
},
"job_display_name": {
"type": "STRING"
},
"labels": {
"type": "STRING"
},
"location": {
"type": "STRING"
},
"machine_type": {
"type": "STRING"
},
"manual_batch_tuning_parameters_batch_size": {
"type": "INT"
},
"max_replica_count": {
"type": "INT"
},
"model_parameters": {
"type": "STRING"
},
"predictions_format": {
"type": "STRING"
},
"project": {
"type": "STRING"
},
"starting_replica_count": {
"type": "INT"
}
}
},
"outputDefinitions": {
"artifacts": {
"batchpredictionjob": {
"artifactType": {
"schemaTitle": "google.VertexBatchPredictionJob",
"schemaVersion": "0.0.1"
}
},
"bigquery_output_table": {
"artifactType": {
"schemaTitle": "google.BQTable",
"schemaVersion": "0.0.1"
}
},
"gcs_output_directory": {
"artifactType": {
"schemaTitle": "system.Artifact",
"schemaVersion": "0.0.1"
}
}
},
"parameters": {
"gcp_resources": {
"type": "STRING"
}
}
}
},
"comp-model-batch-predict-3": {
"executorLabel": "exec-model-batch-predict-3",
"inputDefinitions": {
"artifacts": {
"model": {
"artifactType": {
"schemaTitle": "google.VertexModel",
"schemaVersion": "0.0.1"
}
}
},
"parameters": {
"accelerator_count": {
"type": "INT"
},
"accelerator_type": {
"type": "STRING"
},
"bigquery_destination_output_uri": {
"type": "STRING"
},
"bigquery_source_input_uri": {
"type": "STRING"
},
"encryption_spec_key_name": {
"type": "STRING"
},
"explanation_metadata": {
"type": "STRING"
},
"explanation_parameters": {
"type": "STRING"
},
"gcs_destination_output_uri_prefix": {
"type": "STRING"
},
"gcs_source_uris": {
"type": "STRING"
},
"generate_explanation": {
"type": "STRING"
},
"instances_format": {
"type": "STRING"
},
"job_display_name": {
"type": "STRING"
},
"labels": {
"type": "STRING"
},
"location": {
"type": "STRING"
},
"machine_type": {
"type": "STRING"
},
"manual_batch_tuning_parameters_batch_size": {
"type": "INT"
},
"max_replica_count": {
"type": "INT"
},
"model_parameters": {
"type": "STRING"
},
"predictions_format": {
"type": "STRING"
},
"project": {
"type": "STRING"
},
"starting_replica_count": {
"type": "INT"
}
}
},
"outputDefinitions": {
"artifacts": {
"batchpredictionjob": {
"artifactType": {
"schemaTitle": "google.VertexBatchPredictionJob",
"schemaVersion": "0.0.1"
}
},
"bigquery_output_table": {
"artifactType": {
"schemaTitle": "google.BQTable",
"schemaVersion": "0.0.1"
}
},
"gcs_output_directory": {
"artifactType": {
"schemaTitle": "system.Artifact",
"schemaVersion": "0.0.1"
}
}
},
"parameters": {
"gcp_resources": {
"type": "STRING"
}
}
}
},
"comp-model-batch-predict-4": {
"executorLabel": "exec-model-batch-predict-4",
"inputDefinitions": {
"artifacts": {
"unmanaged_container_model": {
"artifactType": {
"schemaTitle": "google.UnmanagedContainerModel",
"schemaVersion": "0.0.1"
}
}
},
"parameters": {
"accelerator_count": {
"type": "INT"
},
"accelerator_type": {
"type": "STRING"
},
"bigquery_destination_output_uri": {
"type": "STRING"
},
"bigquery_source_input_uri": {
"type": "STRING"
},
"encryption_spec_key_name": {
"type": "STRING"
},
"explanation_metadata": {
"type": "STRING"
},
"explanation_parameters": {
"type": "STRING"
},
"gcs_destination_output_uri_prefix": {
"type": "STRING"
},
"gcs_source_uris": {
"type": "STRING"
},
"generate_explanation": {
"type": "STRING"
},
"instances_format": {
"type": "STRING"
},
"job_display_name": {
"type": "STRING"
},
"labels": {
"type": "STRING"
},
"location": {
"type": "STRING"
},
"machine_type": {
"type": "STRING"
},
"manual_batch_tuning_parameters_batch_size": {
"type": "INT"
},
"max_replica_count": {
"type": "INT"
},
"model_parameters": {
"type": "STRING"
},
"predictions_format": {
"type": "STRING"
},
"project": {
"type": "STRING"
},
"starting_replica_count": {
"type": "INT"
}
}
},
"outputDefinitions": {
"artifacts": {
"batchpredictionjob": {
"artifactType": {
"schemaTitle": "google.VertexBatchPredictionJob",
"schemaVersion": "0.0.1"
}
},
"bigquery_output_table": {
"artifactType": {
"schemaTitle": "google.BQTable",
"schemaVersion": "0.0.1"
}
},
"gcs_output_directory": {
"artifactType": {
"schemaTitle": "system.Artifact",
"schemaVersion": "0.0.1"
}
}
},
"parameters": {
"gcp_resources": {
"type": "STRING"
}
}
}
},
"comp-model-evaluation": {
"executorLabel": "exec-model-evaluation",
"inputDefinitions": {
"artifacts": {
"batch_prediction_job": {
"artifactType": {
"schemaTitle": "google.VertexBatchPredictionJob",
"schemaVersion": "0.0.1"
}
}
},
"parameters": {
"class_names": {
"type": "STRING"
},
"classification_type": {
"type": "STRING"
},
"dataflow_disk_size": {
"type": "INT"
},
"dataflow_machine_type": {
"type": "STRING"
},
"dataflow_max_workers_num": {
"type": "INT"
},
"dataflow_service_account": {
"type": "STRING"
},
"dataflow_subnetwork": {
"type": "STRING"
},
"dataflow_use_public_ips": {
"type": "STRING"
},
"dataflow_workers_num": {
"type": "INT"
},
"encryption_spec_key_name": {
"type": "STRING"
},
"example_weight_column": {
"type": "STRING"
},
"generate_feature_attribution": {
"type": "STRING"
},
"ground_truth_column": {
"type": "STRING"
},
"ground_truth_format": {
"type": "STRING"
},
"ground_truth_gcs_source": {
"type": "STRING"
},
"key_columns": {
"type": "STRING"
},
"location": {
"type": "STRING"
},
"positive_classes": {
"type": "STRING"
},
"prediction_id_column": {
"type": "STRING"
},
"prediction_label_column": {
"type": "STRING"
},
"prediction_score_column": {
"type": "STRING"
},
"predictions_format": {
"type": "STRING"
},
"problem_type": {
"type": "STRING"
},
"project": {
"type": "STRING"
},
"root_dir": {
"type": "STRING"
}
}
},
"outputDefinitions": {
"artifacts": {
"evaluation_metrics": {
"artifactType": {
"schemaTitle": "system.Metrics",
"schemaVersion": "0.0.1"
}
}
},
"parameters": {
"gcp_resources": {
"type": "STRING"
}
}
}
},
"comp-model-evaluation-2": {
"executorLabel": "exec-model-evaluation-2",
"inputDefinitions": {
"artifacts": {
"batch_prediction_job": {
"artifactType": {
"schemaTitle": "google.VertexBatchPredictionJob",
"schemaVersion": "0.0.1"
}
}
},
"parameters": {
"class_names": {
"type": "STRING"
},
"classification_type": {
"type": "STRING"
},
"dataflow_disk_size": {
"type": "INT"
},
"dataflow_machine_type": {
"type": "STRING"
},
"dataflow_max_workers_num": {
"type": "INT"
},
"dataflow_service_account": {
"type": "STRING"
},
"dataflow_subnetwork": {
"type": "STRING"
},
"dataflow_use_public_ips": {
"type": "STRING"
},
"dataflow_workers_num": {
"type": "INT"
},
"encryption_spec_key_name": {
"type": "STRING"
},
"example_weight_column": {
"type": "STRING"
},
"generate_feature_attribution": {
"type": "STRING"
},
"ground_truth_column": {
"type": "STRING"
},
"ground_truth_format": {
"type": "STRING"
},
"ground_truth_gcs_source": {
"type": "STRING"
},
"key_columns": {
"type": "STRING"
},
"location": {
"type": "STRING"
},
"positive_classes": {
"type": "STRING"
},
"prediction_id_column": {
"type": "STRING"
},
"prediction_label_column": {
"type": "STRING"
},
"prediction_score_column": {
"type": "STRING"
},
"predictions_format": {
"type": "STRING"
},
"problem_type": {
"type": "STRING"
},
"project": {
"type": "STRING"
},
"root_dir": {
"type": "STRING"
}
}
},
"outputDefinitions": {
"artifacts": {
"evaluation_metrics": {
"artifactType": {
"schemaTitle": "system.Metrics",
"schemaVersion": "0.0.1"
}
}
},
"parameters": {
"gcp_resources": {
"type": "STRING"
}
}
}
},
"comp-model-evaluation-3": {
"executorLabel": "exec-model-evaluation-3",
"inputDefinitions": {
"artifacts": {
"batch_prediction_job": {
"artifactType": {
"schemaTitle": "google.VertexBatchPredictionJob",
"schemaVersion": "0.0.1"
}
}
},
"parameters": {
"class_names": {
"type": "STRING"
},
"classification_type": {
"type": "STRING"
},
"dataflow_disk_size": {
"type": "INT"
},
"dataflow_machine_type": {
"type": "STRING"
},
"dataflow_max_workers_num": {
"type": "INT"
},
"dataflow_service_account": {
"type": "STRING"
},
"dataflow_subnetwork": {
"type": "STRING"
},
"dataflow_use_public_ips": {
"type": "STRING"
},
"dataflow_workers_num": {
"type": "INT"
},
"encryption_spec_key_name": {
"type": "STRING"
},
"example_weight_column": {
"type": "STRING"
},
"generate_feature_attribution": {
"type": "STRING"
},
"ground_truth_column": {
"type": "STRING"
},
"ground_truth_format": {
"type": "STRING"
},
"ground_truth_gcs_source": {
"type": "STRING"
},
"key_columns": {
"type": "STRING"
},
"location": {
"type": "STRING"
},
"positive_classes": {
"type": "STRING"
},
"prediction_id_column": {
"type": "STRING"
},
"prediction_label_column": {
"type": "STRING"
},
"prediction_score_column": {
"type": "STRING"
},
"predictions_format": {
"type": "STRING"
},
"problem_type": {
"type": "STRING"
},
"project": {
"type": "STRING"
},
"root_dir": {
"type": "STRING"
}
}
},
"outputDefinitions": {
"artifacts": {
"evaluation_metrics": {
"artifactType": {
"schemaTitle": "system.Metrics",
"schemaVersion": "0.0.1"
}
}
},
"parameters": {
"gcp_resources": {
"type": "STRING"
}
}
}
},
"comp-model-evaluation-4": {
"executorLabel": "exec-model-evaluation-4",
"inputDefinitions": {
"artifacts": {
"batch_prediction_job": {
"artifactType": {
"schemaTitle": "google.VertexBatchPredictionJob",
"schemaVersion": "0.0.1"
}
}
},
"parameters": {
"class_names": {
"type": "STRING"
},
"classification_type": {
"type": "STRING"
},
"dataflow_disk_size": {
"type": "INT"
},
"dataflow_machine_type": {
"type": "STRING"
},
"dataflow_max_workers_num": {
"type": "INT"
},
"dataflow_service_account": {
"type": "STRING"
},
"dataflow_subnetwork": {
"type": "STRING"
},
"dataflow_use_public_ips": {
"type": "STRING"
},
"dataflow_workers_num": {
"type": "INT"
},
"encryption_spec_key_name": {
"type": "STRING"
},
"example_weight_column": {
"type": "STRING"
},
"generate_feature_attribution": {
"type": "STRING"
},
"ground_truth_column": {
"type": "STRING"
},
"ground_truth_format": {
"type": "STRING"
},
"ground_truth_gcs_source": {
"type": "STRING"
},
"key_columns": {
"type": "STRING"
},
"location": {
"type": "STRING"
},
"positive_classes": {
"type": "STRING"
},
"prediction_id_column": {
"type": "STRING"
},
"prediction_label_column": {
"type": "STRING"
},
"prediction_score_column": {
"type": "STRING"
},
"predictions_format": {
"type": "STRING"
},
"problem_type": {
"type": "STRING"
},
"project": {
"type": "STRING"
},
"root_dir": {
"type": "STRING"
}
}
},
"outputDefinitions": {
"artifacts": {
"evaluation_metrics": {
"artifactType": {
"schemaTitle": "system.Metrics",
"schemaVersion": "0.0.1"
}
}
},
"parameters": {
"gcp_resources": {
"type": "STRING"
}
}
}
},
"comp-model-evaluation-import": {
"executorLabel": "exec-model-evaluation-import",
"inputDefinitions": {
"artifacts": {
"explanation": {
"artifactType": {
"schemaTitle": "system.Metrics",
"schemaVersion": "0.0.1"
}
},
"metrics": {
"artifactType": {
"schemaTitle": "system.Metrics",
"schemaVersion": "0.0.1"
}
},
"model": {
"artifactType": {
"schemaTitle": "google.VertexModel",
"schemaVersion": "0.0.1"
}
}
},
"parameters": {
"dataset_path": {
"type": "STRING"
},
"dataset_paths": {
"type": "STRING"
},
"dataset_type": {
"type": "STRING"
},
"display_name": {
"type": "STRING"
},
"problem_type": {
"type": "STRING"
}
}
},
"outputDefinitions": {
"parameters": {
"gcp_resources": {
"type": "STRING"
}
}
}
},
"comp-model-evaluation-import-2": {
"executorLabel": "exec-model-evaluation-import-2",
"inputDefinitions": {
"artifacts": {
"explanation": {
"artifactType": {
"schemaTitle": "system.Metrics",
"schemaVersion": "0.0.1"
}
},
"metrics": {
"artifactType": {
"schemaTitle": "system.Metrics",
"schemaVersion": "0.0.1"
}
},
"model": {
"artifactType": {
"schemaTitle": "google.VertexModel",
"schemaVersion": "0.0.1"
}
}
},
"parameters": {
"dataset_path": {
"type": "STRING"
},
"dataset_paths": {
"type": "STRING"
},
"dataset_type": {
"type": "STRING"
},
"display_name": {
"type": "STRING"
},
"problem_type": {
"type": "STRING"
}
}
},
"outputDefinitions": {
"parameters": {
"gcp_resources": {
"type": "STRING"
}
}
}
},
"comp-model-upload": {
"executorLabel": "exec-model-upload",
"inputDefinitions": {
"artifacts": {
"explanation_metadata_artifact": {
"artifactType": {
"schemaTitle": "system.Artifact",
"schemaVersion": "0.0.1"
}
},
"unmanaged_container_model": {
"artifactType": {
"schemaTitle": "google.UnmanagedContainerModel",
"schemaVersion": "0.0.1"
}
}
},
"parameters": {
"description": {
"type": "STRING"
},
"display_name": {
"type": "STRING"
},
"encryption_spec_key_name": {
"type": "STRING"
},
"explanation_metadata": {
"type": "STRING"
},
"explanation_parameters": {
"type": "STRING"
},
"labels": {
"type": "STRING"
},
"location": {
"type": "STRING"
},
"project": {
"type": "STRING"
}
}
},
"outputDefinitions": {
"artifacts": {
"model": {
"artifactType": {
"schemaTitle": "google.VertexModel",
"schemaVersion": "0.0.1"
}
}
},
"parameters": {
"gcp_resources": {
"type": "STRING"
}
}
}
},
"comp-model-upload-2": {
"executorLabel": "exec-model-upload-2",
"inputDefinitions": {
"artifacts": {
"explanation_metadata_artifact": {
"artifactType": {
"schemaTitle": "system.Artifact",
"schemaVersion": "0.0.1"
}
},
"unmanaged_container_model": {
"artifactType": {
"schemaTitle": "google.UnmanagedContainerModel",
"schemaVersion": "0.0.1"
}
}
},
"parameters": {
"description": {
"type": "STRING"
},
"display_name": {
"type": "STRING"
},
"encryption_spec_key_name": {
"type": "STRING"
},
"explanation_metadata": {
"type": "STRING"
},
"explanation_parameters": {
"type": "STRING"
},
"labels": {
"type": "STRING"
},
"location": {
"type": "STRING"
},
"project": {
"type": "STRING"
}
}
},
"outputDefinitions": {
"artifacts": {
"model": {
"artifactType": {
"schemaTitle": "google.VertexModel",
"schemaVersion": "0.0.1"
}
}
},
"parameters": {
"gcp_resources": {
"type": "STRING"
}
}
}
},
"comp-model-upload-3": {
"executorLabel": "exec-model-upload-3",
"inputDefinitions": {
"artifacts": {
"explanation_metadata_artifact": {
"artifactType": {
"schemaTitle": "system.Artifact",
"schemaVersion": "0.0.1"
}
},
"unmanaged_container_model": {
"artifactType": {
"schemaTitle": "google.UnmanagedContainerModel",
"schemaVersion": "0.0.1"
}
}
},
"parameters": {
"description": {
"type": "STRING"
},
"display_name": {
"type": "STRING"
},
"encryption_spec_key_name": {
"type": "STRING"
},
"explanation_metadata": {
"type": "STRING"
},
"explanation_parameters": {
"type": "STRING"
},
"labels": {
"type": "STRING"
},
"location": {
"type": "STRING"
},
"project": {
"type": "STRING"
}
}
},
"outputDefinitions": {
"artifacts": {
"model": {
"artifactType": {
"schemaTitle": "google.VertexModel",
"schemaVersion": "0.0.1"
}
}
},
"parameters": {
"gcp_resources": {
"type": "STRING"
}
}
}
},
"comp-read-input-uri": {
"executorLabel": "exec-read-input-uri",
"inputDefinitions": {
"artifacts": {
"split_uri": {
"artifactType": {
"schemaTitle": "system.Dataset",
"schemaVersion": "0.0.1"
}
}
}
},
"outputDefinitions": {
"parameters": {
"Output": {
"type": "STRING"
}
}
}
},
"comp-read-input-uri-2": {
"executorLabel": "exec-read-input-uri-2",
"inputDefinitions": {
"artifacts": {
"split_uri": {
"artifactType": {
"schemaTitle": "system.Dataset",
"schemaVersion": "0.0.1"
}
}
}
},
"outputDefinitions": {
"parameters": {
"Output": {
"type": "STRING"
}
}
}
},
"comp-set-model-can-skip-validation": {
"executorLabel": "exec-set-model-can-skip-validation",
"inputDefinitions": {
"artifacts": {
"model": {
"artifactType": {
"schemaTitle": "system.Artifact",
"schemaVersion": "0.0.1"
}
}
}
}
},
"comp-tabular-stats-and-example-gen": {
"executorLabel": "exec-tabular-stats-and-example-gen",
"inputDefinitions": {
"parameters": {
"additional_experiments": {
"type": "STRING"
},
"additional_experiments_json": {
"type": "STRING"
},
"data_source": {
"type": "STRING"
},
"data_source_bigquery_table_path": {
"type": "STRING"
},
"data_source_csv_filenames": {
"type": "STRING"
},
"dataflow_disk_size_gb": {
"type": "INT"
},
"dataflow_machine_type": {
"type": "STRING"
},
"dataflow_max_num_workers": {
"type": "INT"
},
"dataflow_service_account": {
"type": "STRING"
},
"dataflow_subnetwork": {
"type": "STRING"
},
"dataflow_use_public_ips": {
"type": "STRING"
},
"encryption_spec_key_name": {
"type": "STRING"
},
"location": {
"type": "STRING"
},
"optimization_objective": {
"type": "STRING"
},
"optimization_objective_precision_value": {
"type": "DOUBLE"
},
"optimization_objective_recall_value": {
"type": "DOUBLE"
},
"predefined_split_key": {
"type": "STRING"
},
"prediction_type": {
"type": "STRING"
},
"project": {
"type": "STRING"
},
"request_type": {
"type": "STRING"
},
"root_dir": {
"type": "STRING"
},
"run_distillation": {
"type": "STRING"
},
"split_spec": {
"type": "STRING"
},
"stratified_split_key": {
"type": "STRING"
},
"target_column_name": {
"type": "STRING"
},
"test_fraction": {
"type": "DOUBLE"
},
"timestamp_split_key": {
"type": "STRING"
},
"training_fraction": {
"type": "DOUBLE"
},
"transformations": {
"type": "STRING"
},
"transformations_path": {
"type": "STRING"
},
"validation_fraction": {
"type": "DOUBLE"
},
"weight_column_name": {
"type": "STRING"
}
}
},
"outputDefinitions": {
"artifacts": {
"dataset_schema": {
"artifactType": {
"schemaTitle": "system.Artifact",
"schemaVersion": "0.0.1"
}
},
"dataset_stats": {
"artifactType": {
"schemaTitle": "system.Artifact",
"schemaVersion": "0.0.1"
}
},
"eval_split": {
"artifactType": {
"schemaTitle": "system.Dataset",
"schemaVersion": "0.0.1"
}
},
"instance_baseline": {
"artifactType": {
"schemaTitle": "system.Artifact",
"schemaVersion": "0.0.1"
}
},
"metadata": {
"artifactType": {
"schemaTitle": "system.Artifact",
"schemaVersion": "0.0.1"
}
},
"test_split": {
"artifactType": {
"schemaTitle": "system.Dataset",
"schemaVersion": "0.0.1"
}
},
"train_split": {
"artifactType": {
"schemaTitle": "system.Dataset",
"schemaVersion": "0.0.1"
}
}
},
"parameters": {
"downsampled_test_split_json": {
"type": "STRING"
},
"gcp_resources": {
"type": "STRING"
},
"test_split_json": {
"type": "STRING"
}
}
}
},
"comp-write-bp-result-path": {
"executorLabel": "exec-write-bp-result-path",
"inputDefinitions": {
"artifacts": {
"bp_job": {
"artifactType": {
"schemaTitle": "system.Artifact",
"schemaVersion": "0.0.1"
}
}
}
},
"outputDefinitions": {
"artifacts": {
"result": {
"artifactType": {
"schemaTitle": "system.Dataset",
"schemaVersion": "0.0.1"
}
}
}
}
},
"comp-write-bp-result-path-2": {
"executorLabel": "exec-write-bp-result-path-2",
"inputDefinitions": {
"artifacts": {
"bp_job": {
"artifactType": {
"schemaTitle": "system.Artifact",
"schemaVersion": "0.0.1"
}
}
}
},
"outputDefinitions": {
"artifacts": {
"result": {
"artifactType": {
"schemaTitle": "system.Dataset",
"schemaVersion": "0.0.1"
}
}
}
}
}
},
"deploymentSpec": {
"executors": {
"exec-automl-tabular-cv-trainer": {
"container": {
"args": [
"--type",
"CustomJob",
"--project",
"{{$.inputs.parameters['project']}}",
"--location",
"{{$.inputs.parameters['location']}}",
"--gcp_resources",
"{{$.outputs.parameters['gcp_resources'].output_file}}",
"--payload",
"{\"display_name\": \"automl-tabular-cv-tuner-{{$.pipeline_job_uuid}}-{{$.pipeline_task_uuid}}\", \"encryption_spec\": {\"kms_key_name\":\"{{$.inputs.parameters['encryption_spec_key_name']}}\"}, \"job_spec\": {\"worker_pool_specs\": [{\"replica_count\": 1, \"machine_spec\": {\"machine_type\": \"n1-standard-8\"}, \"container_spec\": {\"image_uri\":\"us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/training:prod\", \"args\": [\"l2l_cv_tuner\", \"--transform_output_path={{$.inputs.artifacts['transform_output'].uri}}\", \"--training_docker_uri=us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/training:prod\", \"--component_id={{$.pipeline_task_uuid}}\", \"--training_base_dir={{$.inputs.parameters['root_dir']}}/{{$.pipeline_job_uuid}}/{{$.pipeline_task_uuid}}/train\", \"--worker_pool_specs_override={{$.inputs.parameters['worker_pool_specs_override']}}\", \"--num_parallel_trial={{$.inputs.parameters['num_parallel_trials']}}\", \"--single_run_max_secs={{$.inputs.parameters['single_run_max_secs']}}\", \"--deadline_hours={{$.inputs.parameters['deadline_hours']}}\", \"--valid_trials_completed_threshold=0.7\", \"--num_selected_trials={{$.inputs.parameters['num_selected_trials']}}\", \"--lro_job_info={{$.inputs.parameters['root_dir']}}/{{$.pipeline_job_uuid}}/lro\", \"--error_file_path={{$.inputs.parameters['root_dir']}}/{{$.pipeline_job_uuid}}/{{$.pipeline_task_uuid}}/error.pb\", \"--metadata_path={{$.inputs.artifacts['metadata'].uri}}\", \"--materialized_cv_splits={{$.inputs.artifacts['materialized_cv_splits'].uri}}\", \"--tuning_result_input_path={{$.inputs.artifacts['tuning_result_input'].uri}}\", \"--tuning_result_output_path={{$.outputs.artifacts['tuning_result_output'].uri}}\", \"--kms_key_name={{$.inputs.parameters['encryption_spec_key_name']}}\", \"--use_custom_job=true\", \"--use_json=true\", \"--log_level=ERROR\", \"--executor_input={{$.json_escape[1]}}\"]}}]}}"
],
"command": [
"python3",
"-u",
"-m",
"google_cloud_pipeline_components.container.v1.gcp_launcher.launcher"
],
"image": "gcr.io/ml-pipeline/google-cloud-pipeline-components:1.0.21"
}
},
"exec-automl-tabular-ensemble": {
"container": {
"args": [
"--type",
"CustomJob",
"--project",
"{{$.inputs.parameters['project']}}",
"--location",
"{{$.inputs.parameters['location']}}",
"--gcp_resources",
"{{$.outputs.parameters['gcp_resources'].output_file}}",
"--payload",
"{\"display_name\": \"automl-tabular-ensemble-{{$.pipeline_job_uuid}}-{{$.pipeline_task_uuid}}\", \"encryption_spec\": {\"kms_key_name\":\"{{$.inputs.parameters['encryption_spec_key_name']}}\"}, \"job_spec\": {\"worker_pool_specs\": [{\"replica_count\": 1, \"machine_spec\": {\"machine_type\": \"n1-highmem-8\"}, \"container_spec\": {\"image_uri\":\"us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/training:prod\", \"args\": [\"ensemble\", \"--transform_output_path={{$.inputs.artifacts['transform_output'].uri}}\", \"--model_output_path={{$.inputs.parameters['root_dir']}}/{{$.pipeline_job_uuid}}/{{$.pipeline_task_uuid}}/model\", \"--custom_model_output_path={{$.inputs.parameters['root_dir']}}/{{$.pipeline_job_uuid}}/{{$.pipeline_task_uuid}}/custom_model\", \"--error_file_path={{$.inputs.parameters['root_dir']}}/{{$.pipeline_job_uuid}}/{{$.pipeline_task_uuid}}/error.pb\", \"--export_custom_model={{$.inputs.parameters['export_additional_model_without_custom_ops']}}\", \"--metadata_path={{$.inputs.artifacts['metadata'].uri}}\", \"--dataset_schema_path={{$.inputs.artifacts['dataset_schema'].uri}}\", \"--tuning_result_input_path={{$.inputs.artifacts['tuning_result_input'].uri}}\", \"--instance_baseline_path={{$.inputs.artifacts['instance_baseline'].uri}}\", \"--warmup_data={{$.inputs.artifacts['warmup_data'].uri}}\", \"--prediction_docker_uri=us-docker.pkg.dev/vertex-ai/automl-tabular/prediction-server:prod\", \"--model_path={{$.outputs.artifacts['model'].uri}}\", \"--custom_model_path={{$.outputs.artifacts['model_without_custom_ops'].uri}}\", \"--explanation_metadata_path={{$.outputs.parameters['explanation_metadata'].output_file}},{{$.outputs.artifacts['explanation_metadata_artifact'].uri}}\", \"--explanation_parameters_path={{$.outputs.parameters['explanation_parameters'].output_file}}\", \"--model_architecture_path={{$.outputs.artifacts['model_architecture'].uri}}\", \"--use_json=true\", \"--executor_input={{$.json_escape[1]}}\"]}}]}}"
],
"command": [
"python3",
"-u",
"-m",
"google_cloud_pipeline_components.container.v1.gcp_launcher.launcher"
],
"image": "gcr.io/ml-pipeline/google-cloud-pipeline-components:1.0.21"
}
},
"exec-automl-tabular-ensemble-2": {
"container": {
"args": [
"--type",
"CustomJob",
"--project",
"{{$.inputs.parameters['project']}}",
"--location",
"{{$.inputs.parameters['location']}}",
"--gcp_resources",
"{{$.outputs.parameters['gcp_resources'].output_file}}",
"--payload",
"{\"display_name\": \"automl-tabular-ensemble-{{$.pipeline_job_uuid}}-{{$.pipeline_task_uuid}}\", \"encryption_spec\": {\"kms_key_name\":\"{{$.inputs.parameters['encryption_spec_key_name']}}\"}, \"job_spec\": {\"worker_pool_specs\": [{\"replica_count\": 1, \"machine_spec\": {\"machine_type\": \"n1-highmem-8\"}, \"container_spec\": {\"image_uri\":\"us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/training:prod\", \"args\": [\"ensemble\", \"--transform_output_path={{$.inputs.artifacts['transform_output'].uri}}\", \"--model_output_path={{$.inputs.parameters['root_dir']}}/{{$.pipeline_job_uuid}}/{{$.pipeline_task_uuid}}/model\", \"--custom_model_output_path={{$.inputs.parameters['root_dir']}}/{{$.pipeline_job_uuid}}/{{$.pipeline_task_uuid}}/custom_model\", \"--error_file_path={{$.inputs.parameters['root_dir']}}/{{$.pipeline_job_uuid}}/{{$.pipeline_task_uuid}}/error.pb\", \"--export_custom_model={{$.inputs.parameters['export_additional_model_without_custom_ops']}}\", \"--metadata_path={{$.inputs.artifacts['metadata'].uri}}\", \"--dataset_schema_path={{$.inputs.artifacts['dataset_schema'].uri}}\", \"--tuning_result_input_path={{$.inputs.artifacts['tuning_result_input'].uri}}\", \"--instance_baseline_path={{$.inputs.artifacts['instance_baseline'].uri}}\", \"--warmup_data={{$.inputs.artifacts['warmup_data'].uri}}\", \"--prediction_docker_uri=us-docker.pkg.dev/vertex-ai/automl-tabular/prediction-server:prod\", \"--model_path={{$.outputs.artifacts['model'].uri}}\", \"--custom_model_path={{$.outputs.artifacts['model_without_custom_ops'].uri}}\", \"--explanation_metadata_path={{$.outputs.parameters['explanation_metadata'].output_file}},{{$.outputs.artifacts['explanation_metadata_artifact'].uri}}\", \"--explanation_parameters_path={{$.outputs.parameters['explanation_parameters'].output_file}}\", \"--model_architecture_path={{$.outputs.artifacts['model_architecture'].uri}}\", \"--use_json=true\", \"--executor_input={{$.json_escape[1]}}\"]}}]}}"
],
"command": [
"python3",
"-u",
"-m",
"google_cloud_pipeline_components.container.v1.gcp_launcher.launcher"
],
"image": "gcr.io/ml-pipeline/google-cloud-pipeline-components:1.0.21"
}
},
"exec-automl-tabular-finalizer": {
"container": {
"args": [
"--type",
"CustomJob",
"--project",
"{{$.inputs.parameters['project']}}",
"--location",
"{{$.inputs.parameters['location']}}",
"--gcp_resources",
"{{$.outputs.parameters['gcp_resources'].output_file}}",
"--payload",
"{\"display_name\": \"automl-tabular-finalizer-{{$.pipeline_job_uuid}}-{{$.pipeline_task_uuid}}\", \"encryption_spec\": {\"kms_key_name\":\"{{$.inputs.parameters['encryption_spec_key_name']}}\"}, \"job_spec\": {\"worker_pool_specs\": [{\"replica_count\": 1, \"machine_spec\": {\"machine_type\": \"n1-standard-8\"}, \"container_spec\": {\"image_uri\":\"us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/training:prod\", \"args\": [\"cancel_l2l_tuner\", \"--error_file_path={{$.inputs.parameters['root_dir']}}/{{$.pipeline_job_uuid}}/{{$.pipeline_task_uuid}}/error.pb\", \"--cleanup_lro_job_infos={{$.inputs.parameters['root_dir']}}/{{$.pipeline_job_uuid}}/lro\"]}}]}}"
],
"command": [
"python3",
"-u",
"-m",
"google_cloud_pipeline_components.container.v1.gcp_launcher.launcher"
],
"image": "gcr.io/ml-pipeline/google-cloud-pipeline-components:1.0.21"
}
},
"exec-automl-tabular-infra-validator": {
"container": {
"args": [
"--executor_input",
"{{$}}"
],
"image": "us-docker.pkg.dev/vertex-ai/automl-tabular/prediction-server:prod",
"resources": {
"cpuLimit": 8.0,
"memoryLimit": 52.0
}
}
},
"exec-automl-tabular-infra-validator-2": {
"container": {
"args": [
"--executor_input",
"{{$}}"
],
"image": "us-docker.pkg.dev/vertex-ai/automl-tabular/prediction-server:prod",
"resources": {
"cpuLimit": 8.0,
"memoryLimit": 52.0
}
}
},
"exec-automl-tabular-stage-1-tuner": {
"container": {
"args": [
"--type",
"CustomJob",
"--project",
"{{$.inputs.parameters['project']}}",
"--location",
"{{$.inputs.parameters['location']}}",
"--gcp_resources",
"{{$.outputs.parameters['gcp_resources'].output_file}}",
"--payload",
"{\"display_name\": \"automl-tabular-stage-1-tuner-{{$.pipeline_job_uuid}}-{{$.pipeline_task_uuid}}\", \"encryption_spec\": {\"kms_key_name\":\"{{$.inputs.parameters['encryption_spec_key_name']}}\"}, \"job_spec\": {\"worker_pool_specs\": [{\"replica_count\": 1, \"machine_spec\": {\"machine_type\": \"n1-standard-8\"}, \"container_spec\": {\"image_uri\":\"us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/training:prod\", \"args\": [\"l2l_stage_1_tuner\", \"--transform_output_path={{$.inputs.artifacts['transform_output'].uri}}\", \"--training_docker_uri=us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/training:prod\", \"--disable_early_stopping={{$.inputs.parameters['disable_early_stopping']}}\", \"--tune_feature_selection_rate={{$.inputs.parameters['tune_feature_selection_rate']}}\", \"--reduce_search_space_mode={{$.inputs.parameters['reduce_search_space_mode']}}\", \"--component_id={{$.pipeline_task_uuid}}\", \"--training_base_dir={{$.inputs.parameters['root_dir']}}/{{$.pipeline_job_uuid}}/{{$.pipeline_task_uuid}}/train\", \"--study_spec_override={{$.inputs.parameters['study_spec_override']}}\", \"--worker_pool_specs_override={{$.inputs.parameters['worker_pool_specs_override']}}\", \"--num_parallel_trial={{$.inputs.parameters['num_parallel_trials']}}\", \"--single_run_max_secs={{$.inputs.parameters['single_run_max_secs']}}\", \"--deadline_hours={{$.inputs.parameters['deadline_hours']}}\", \"--num_selected_trials={{$.inputs.parameters['num_selected_trials']}}\", \"--lro_job_info={{$.inputs.parameters['root_dir']}}/{{$.pipeline_job_uuid}}/lro\", \"--error_file_path={{$.inputs.parameters['root_dir']}}/{{$.pipeline_job_uuid}}/{{$.pipeline_task_uuid}}/error.pb\", \"--metadata_path={{$.inputs.artifacts['metadata'].uri}}\", \"--materialized_train_split={{$.inputs.artifacts['materialized_train_split'].uri}}\", \"--materialized_eval_split={{$.inputs.artifacts['materialized_eval_split'].uri}}\", \"--is_distill={{$.inputs.parameters['run_distillation']}}\", \"--tuning_result_output_path={{$.outputs.artifacts['tuning_result_output'].uri}}\", \"--kms_key_name={{$.inputs.parameters['encryption_spec_key_name']}}\", \"--use_json=true\", \"--log_level=ERROR\", \"--executor_input={{$.json_escape[1]}}\"]}}]}}"
],
"command": [
"python3",
"-u",
"-m",
"google_cloud_pipeline_components.container.v1.gcp_launcher.launcher"
],
"image": "gcr.io/ml-pipeline/google-cloud-pipeline-components:1.0.21"
}
},
"exec-automl-tabular-stage-1-tuner-2": {
"container": {
"args": [
"--type",
"CustomJob",
"--project",
"{{$.inputs.parameters['project']}}",
"--location",
"{{$.inputs.parameters['location']}}",
"--gcp_resources",
"{{$.outputs.parameters['gcp_resources'].output_file}}",
"--payload",
"{\"display_name\": \"automl-tabular-stage-1-tuner-{{$.pipeline_job_uuid}}-{{$.pipeline_task_uuid}}\", \"encryption_spec\": {\"kms_key_name\":\"{{$.inputs.parameters['encryption_spec_key_name']}}\"}, \"job_spec\": {\"worker_pool_specs\": [{\"replica_count\": 1, \"machine_spec\": {\"machine_type\": \"n1-standard-8\"}, \"container_spec\": {\"image_uri\":\"us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/training:prod\", \"args\": [\"l2l_stage_1_tuner\", \"--transform_output_path={{$.inputs.artifacts['transform_output'].uri}}\", \"--training_docker_uri=us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/training:prod\", \"--disable_early_stopping={{$.inputs.parameters['disable_early_stopping']}}\", \"--tune_feature_selection_rate={{$.inputs.parameters['tune_feature_selection_rate']}}\", \"--reduce_search_space_mode={{$.inputs.parameters['reduce_search_space_mode']}}\", \"--component_id={{$.pipeline_task_uuid}}\", \"--training_base_dir={{$.inputs.parameters['root_dir']}}/{{$.pipeline_job_uuid}}/{{$.pipeline_task_uuid}}/train\", \"--study_spec_override={{$.inputs.parameters['study_spec_override']}}\", \"--worker_pool_specs_override={{$.inputs.parameters['worker_pool_specs_override']}}\", \"--num_parallel_trial={{$.inputs.parameters['num_parallel_trials']}}\", \"--single_run_max_secs={{$.inputs.parameters['single_run_max_secs']}}\", \"--deadline_hours={{$.inputs.parameters['deadline_hours']}}\", \"--num_selected_trials={{$.inputs.parameters['num_selected_trials']}}\", \"--lro_job_info={{$.inputs.parameters['root_dir']}}/{{$.pipeline_job_uuid}}/lro\", \"--error_file_path={{$.inputs.parameters['root_dir']}}/{{$.pipeline_job_uuid}}/{{$.pipeline_task_uuid}}/error.pb\", \"--metadata_path={{$.inputs.artifacts['metadata'].uri}}\", \"--materialized_train_split={{$.inputs.artifacts['materialized_train_split'].uri}}\", \"--materialized_eval_split={{$.inputs.artifacts['materialized_eval_split'].uri}}\", \"--is_distill={{$.inputs.parameters['run_distillation']}}\", \"--tuning_result_output_path={{$.outputs.artifacts['tuning_result_output'].uri}}\", \"--kms_key_name={{$.inputs.parameters['encryption_spec_key_name']}}\", \"--use_json=true\", \"--log_level=ERROR\", \"--executor_input={{$.json_escape[1]}}\"]}}]}}"
],
"command": [
"python3",
"-u",
"-m",
"google_cloud_pipeline_components.container.v1.gcp_launcher.launcher"
],
"image": "gcr.io/ml-pipeline/google-cloud-pipeline-components:1.0.21"
}
},
"exec-automl-tabular-transform": {
"container": {
"args": [
"--type",
"CustomJob",
"--project",
"{{$.inputs.parameters['project']}}",
"--location",
"{{$.inputs.parameters['location']}}",
"--gcp_resources",
"{{$.outputs.parameters['gcp_resources'].output_file}}",
"--payload",
"{\"display_name\": \"automl-tabular-transform-{{$.pipeline_job_uuid}}-{{$.pipeline_task_uuid}}\", \"encryption_spec\": {\"kms_key_name\":\"{{$.inputs.parameters['encryption_spec_key_name']}}\"}, \"job_spec\": {\"worker_pool_specs\": [{\"replica_count\": 1, \"machine_spec\": {\"machine_type\": \"n1-standard-8\"}, \"container_spec\": {\"image_uri\":\"us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/training:prod\", \"args\": [\"transform\", \"--transform_output_artifact_path={{$.outputs.artifacts['transform_output'].uri}}\", \"--transform_output_path={{$.inputs.parameters['root_dir']}}/{{$.pipeline_job_uuid}}/{{$.pipeline_task_uuid}}/transform\", \"--materialized_splits_output_path={{$.inputs.parameters['root_dir']}}/{{$.pipeline_job_uuid}}/{{$.pipeline_task_uuid}}/transform_materialized\", \"--metadata_path={{$.inputs.artifacts['metadata'].uri}}\", \"--dataset_schema_path={{$.inputs.artifacts['dataset_schema'].uri}}\", \"--train_split={{$.inputs.artifacts['train_split'].uri}}\", \"--eval_split={{$.inputs.artifacts['eval_split'].uri}}\", \"--test_split={{$.inputs.artifacts['test_split'].uri}}\", \"--materialized_train_split={{$.outputs.artifacts['materialized_train_split'].uri}}\", \"--materialized_eval_split={{$.outputs.artifacts['materialized_eval_split'].uri}}\", \"--materialized_test_split={{$.outputs.artifacts['materialized_test_split'].uri}}\", \"--training_schema_path={{$.outputs.artifacts['training_schema_uri'].uri}}\", \"--job_name=automl-tabular-transform-{{$.pipeline_job_uuid}}-{{$.pipeline_task_uuid}}\", \"--dataflow_project={{$.inputs.parameters['project']}}\", \"--error_file_path={{$.inputs.parameters['root_dir']}}/{{$.pipeline_job_uuid}}/{{$.pipeline_task_uuid}}/error.pb\", \"--dataflow_staging_dir={{$.inputs.parameters['root_dir']}}/{{$.pipeline_job_uuid}}/{{$.pipeline_task_uuid}}/dataflow_staging\", \"--dataflow_tmp_dir={{$.inputs.parameters['root_dir']}}/{{$.pipeline_job_uuid}}/{{$.pipeline_task_uuid}}/dataflow_tmp\", \"--dataflow_max_num_workers={{$.inputs.parameters['dataflow_max_num_workers']}}\", \"--dataflow_machine_type={{$.inputs.parameters['dataflow_machine_type']}}\", \"--dataflow_worker_container_image=us-docker.pkg.dev/vertex-ai/automl-tabular/dataflow-worker:prod\", \"--dataflow_disk_size_gb={{$.inputs.parameters['dataflow_disk_size_gb']}}\", \"--dataflow_subnetwork_fully_qualified={{$.inputs.parameters['dataflow_subnetwork']}}\", \"--dataflow_use_public_ips={{$.inputs.parameters['dataflow_use_public_ips']}}\", \"--dataflow_kms_key={{$.inputs.parameters['encryption_spec_key_name']}}\", \"--dataflow_service_account={{$.inputs.parameters['dataflow_service_account']}}\"]}}]}}"
],
"command": [
"python3",
"-u",
"-m",
"google_cloud_pipeline_components.container.v1.gcp_launcher.launcher"
],
"image": "gcr.io/ml-pipeline/google-cloud-pipeline-components:1.0.21"
}
},
"exec-automl-tabular-transform-2": {
"container": {
"args": [
"--type",
"CustomJob",
"--project",
"{{$.inputs.parameters['project']}}",
"--location",
"{{$.inputs.parameters['location']}}",
"--gcp_resources",
"{{$.outputs.parameters['gcp_resources'].output_file}}",
"--payload",
"{\"display_name\": \"automl-tabular-transform-{{$.pipeline_job_uuid}}-{{$.pipeline_task_uuid}}\", \"encryption_spec\": {\"kms_key_name\":\"{{$.inputs.parameters['encryption_spec_key_name']}}\"}, \"job_spec\": {\"worker_pool_specs\": [{\"replica_count\": 1, \"machine_spec\": {\"machine_type\": \"n1-standard-8\"}, \"container_spec\": {\"image_uri\":\"us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/training:prod\", \"args\": [\"transform\", \"--transform_output_artifact_path={{$.outputs.artifacts['transform_output'].uri}}\", \"--transform_output_path={{$.inputs.parameters['root_dir']}}/{{$.pipeline_job_uuid}}/{{$.pipeline_task_uuid}}/transform\", \"--materialized_splits_output_path={{$.inputs.parameters['root_dir']}}/{{$.pipeline_job_uuid}}/{{$.pipeline_task_uuid}}/transform_materialized\", \"--metadata_path={{$.inputs.artifacts['metadata'].uri}}\", \"--dataset_schema_path={{$.inputs.artifacts['dataset_schema'].uri}}\", \"--train_split={{$.inputs.artifacts['train_split'].uri}}\", \"--eval_split={{$.inputs.artifacts['eval_split'].uri}}\", \"--test_split={{$.inputs.artifacts['test_split'].uri}}\", \"--materialized_train_split={{$.outputs.artifacts['materialized_train_split'].uri}}\", \"--materialized_eval_split={{$.outputs.artifacts['materialized_eval_split'].uri}}\", \"--materialized_test_split={{$.outputs.artifacts['materialized_test_split'].uri}}\", \"--training_schema_path={{$.outputs.artifacts['training_schema_uri'].uri}}\", \"--job_name=automl-tabular-transform-{{$.pipeline_job_uuid}}-{{$.pipeline_task_uuid}}\", \"--dataflow_project={{$.inputs.parameters['project']}}\", \"--error_file_path={{$.inputs.parameters['root_dir']}}/{{$.pipeline_job_uuid}}/{{$.pipeline_task_uuid}}/error.pb\", \"--dataflow_staging_dir={{$.inputs.parameters['root_dir']}}/{{$.pipeline_job_uuid}}/{{$.pipeline_task_uuid}}/dataflow_staging\", \"--dataflow_tmp_dir={{$.inputs.parameters['root_dir']}}/{{$.pipeline_job_uuid}}/{{$.pipeline_task_uuid}}/dataflow_tmp\", \"--dataflow_max_num_workers={{$.inputs.parameters['dataflow_max_num_workers']}}\", \"--dataflow_machine_type={{$.inputs.parameters['dataflow_machine_type']}}\", \"--dataflow_worker_container_image=us-docker.pkg.dev/vertex-ai/automl-tabular/dataflow-worker:prod\", \"--dataflow_disk_size_gb={{$.inputs.parameters['dataflow_disk_size_gb']}}\", \"--dataflow_subnetwork_fully_qualified={{$.inputs.parameters['dataflow_subnetwork']}}\", \"--dataflow_use_public_ips={{$.inputs.parameters['dataflow_use_public_ips']}}\", \"--dataflow_kms_key={{$.inputs.parameters['encryption_spec_key_name']}}\", \"--dataflow_service_account={{$.inputs.parameters['dataflow_service_account']}}\"]}}]}}"
],
"command": [
"python3",
"-u",
"-m",
"google_cloud_pipeline_components.container.v1.gcp_launcher.launcher"
],
"image": "gcr.io/ml-pipeline/google-cloud-pipeline-components:1.0.21"
}
},
"exec-bool-identity": {
"container": {
"args": [
"--value",
"{{$.inputs.parameters['value']}}",
"----output-paths",
"{{$.outputs.parameters['Output'].output_file}}"
],
"command": [
"sh",
"-ec",
"program_path=$(mktemp)\nprintf \"%s\" \"$0\" > \"$program_path\"\npython3 -u \"$program_path\" \"$@\"\n",
"def _bool_identity(value):\n \"\"\"Returns boolean value.\n\n Args:\n value: Boolean value to return\n\n Returns:\n Boolean value.\n \"\"\"\n return 'true' if value else 'false'\n\ndef _deserialize_bool(s) -> bool:\n from distutils.util import strtobool\n return strtobool(s) == 1\n\ndef _serialize_str(str_value: str) -> str:\n if not isinstance(str_value, str):\n raise TypeError('Value \"{}\" has type \"{}\" instead of str.'.format(\n str(str_value), str(type(str_value))))\n return str_value\n\nimport argparse\n_parser = argparse.ArgumentParser(prog='Bool identity', description='Returns boolean value.')\n_parser.add_argument(\"--value\", dest=\"value\", type=_deserialize_bool, required=True, default=argparse.SUPPRESS)\n_parser.add_argument(\"----output-paths\", dest=\"_output_paths\", type=str, nargs=1)\n_parsed_args = vars(_parser.parse_args())\n_output_files = _parsed_args.pop(\"_output_paths\", [])\n\n_outputs = _bool_identity(**_parsed_args)\n\n_outputs = [_outputs]\n\n_output_serializers = [\n _serialize_str,\n\n]\n\nimport os\nfor idx, output_file in enumerate(_output_files):\n try:\n os.makedirs(os.path.dirname(output_file))\n except OSError:\n pass\n with open(output_file, 'w') as f:\n f.write(_output_serializers[idx](_outputs[idx]))\n"
],
"image": "python:3.7-slim"
}
},
"exec-bool-identity-2": {
"container": {
"args": [
"--value",
"{{$.inputs.parameters['value']}}",
"----output-paths",
"{{$.outputs.parameters['Output'].output_file}}"
],
"command": [
"sh",
"-ec",
"program_path=$(mktemp)\nprintf \"%s\" \"$0\" > \"$program_path\"\npython3 -u \"$program_path\" \"$@\"\n",
"def _bool_identity(value):\n \"\"\"Returns boolean value.\n\n Args:\n value: Boolean value to return\n\n Returns:\n Boolean value.\n \"\"\"\n return 'true' if value else 'false'\n\ndef _deserialize_bool(s) -> bool:\n from distutils.util import strtobool\n return strtobool(s) == 1\n\ndef _serialize_str(str_value: str) -> str:\n if not isinstance(str_value, str):\n raise TypeError('Value \"{}\" has type \"{}\" instead of str.'.format(\n str(str_value), str(type(str_value))))\n return str_value\n\nimport argparse\n_parser = argparse.ArgumentParser(prog='Bool identity', description='Returns boolean value.')\n_parser.add_argument(\"--value\", dest=\"value\", type=_deserialize_bool, required=True, default=argparse.SUPPRESS)\n_parser.add_argument(\"----output-paths\", dest=\"_output_paths\", type=str, nargs=1)\n_parsed_args = vars(_parser.parse_args())\n_output_files = _parsed_args.pop(\"_output_paths\", [])\n\n_outputs = _bool_identity(**_parsed_args)\n\n_outputs = [_outputs]\n\n_output_serializers = [\n _serialize_str,\n\n]\n\nimport os\nfor idx, output_file in enumerate(_output_files):\n try:\n os.makedirs(os.path.dirname(output_file))\n except OSError:\n pass\n with open(output_file, 'w') as f:\n f.write(_output_serializers[idx](_outputs[idx]))\n"
],
"image": "python:3.7-slim"
}
},
"exec-merge-materialized-splits": {
"container": {
"args": [
"--split-0",
"{{$.inputs.artifacts['split_0'].path}}",
"--split-1",
"{{$.inputs.artifacts['split_1'].path}}",
"--splits",
"{{$.outputs.artifacts['splits'].path}}"
],
"command": [
"sh",
"-ec",
"program_path=$(mktemp)\nprintf \"%s\" \"$0\" > \"$program_path\"\npython3 -u \"$program_path\" \"$@\"\n",
"def _make_parent_dirs_and_return_path(file_path: str):\n import os\n os.makedirs(os.path.dirname(file_path), exist_ok=True)\n return file_path\n\ndef _merge_materialized_splits(\n split_0,\n split_1,\n splits,\n):\n \"\"\"Merge two materialized splits.\n\n Args:\n split_0: The first materialized split.\n split_1: The second materialized split.\n splits: The merged materialized split.\n \"\"\"\n with open(split_0, 'r') as f:\n split_0_content = f.read()\n with open(split_1, 'r') as f:\n split_1_content = f.read()\n with open(splits, 'w') as f:\n f.write(','.join([split_0_content, split_1_content]))\n\nimport argparse\n_parser = argparse.ArgumentParser(prog='Merge materialized splits', description='Merge two materialized splits.')\n_parser.add_argument(\"--split-0\", dest=\"split_0\", type=str, required=True, default=argparse.SUPPRESS)\n_parser.add_argument(\"--split-1\", dest=\"split_1\", type=str, required=True, default=argparse.SUPPRESS)\n_parser.add_argument(\"--splits\", dest=\"splits\", type=_make_parent_dirs_and_return_path, required=True, default=argparse.SUPPRESS)\n_parsed_args = vars(_parser.parse_args())\n\n_outputs = _merge_materialized_splits(**_parsed_args)\n"
],
"image": "python:3.7-slim"
}
},
"exec-model-batch-explanation": {
"container": {
"args": [
"--type",
"BatchPredictionJob",
"--payload",
"{\"display_name\": \"{{$.inputs.parameters['job_display_name']}}\", \"input_config\": {\"instances_format\": \"{{$.inputs.parameters['instances_format']}}\", \"gcs_source\": {\"uris\":{{$.inputs.parameters['gcs_source_uris']}}}, \"bigquery_source\": {\"input_uri\": \"{{$.inputs.parameters['bigquery_source_input_uri']}}\"}}, \"model_parameters\": {{$.inputs.parameters['model_parameters']}}, \"output_config\": {\"predictions_format\": \"{{$.inputs.parameters['predictions_format']}}\", \"gcs_destination\": {\"output_uri_prefix\": \"{{$.inputs.parameters['gcs_destination_output_uri_prefix']}}\"}, \"bigquery_destination\": {\"output_uri\": \"{{$.inputs.parameters['bigquery_destination_output_uri']}}\"}}, \"dedicated_resources\": {\"machine_spec\": {\"machine_type\": \"{{$.inputs.parameters['machine_type']}}\", \"accelerator_type\": \"{{$.inputs.parameters['accelerator_type']}}\", \"accelerator_count\": {{$.inputs.parameters['accelerator_count']}}}, \"starting_replica_count\": {{$.inputs.parameters['starting_replica_count']}}, \"max_replica_count\": {{$.inputs.parameters['max_replica_count']}}}, \"manual_batch_tuning_parameters\": {\"batch_size\": {{$.inputs.parameters['manual_batch_tuning_parameters_batch_size']}}}, \"generate_explanation\": {{$.inputs.parameters['generate_explanation']}}, \"explanation_spec\": {\"parameters\": {{$.inputs.parameters['explanation_parameters']}}, \"metadata\": {{$.inputs.parameters['explanation_metadata']}}}, \"explanation_metadata_artifact\": \"{{$.inputs.artifacts['explanation_metadata_artifact'].uri}}\", \"labels\": {{$.inputs.parameters['labels']}}, \"encryption_spec\": {\"kms_key_name\":\"{{$.inputs.parameters['encryption_spec_key_name']}}\"}}",
"--project",
"{{$.inputs.parameters['project']}}",
"--location",
"{{$.inputs.parameters['location']}}",
"--gcp_resources",
"{{$.outputs.parameters['gcp_resources'].output_file}}",
"--executor_input",
"{{$}}"
],
"command": [
"python3",
"-u",
"-m",
"launcher"
],
"image": "gcr.io/ml-pipeline/automl-tables-private:1.0.13"
}
},
"exec-model-batch-explanation-2": {
"container": {
"args": [
"--type",
"BatchPredictionJob",
"--payload",
"{\"display_name\": \"{{$.inputs.parameters['job_display_name']}}\", \"input_config\": {\"instances_format\": \"{{$.inputs.parameters['instances_format']}}\", \"gcs_source\": {\"uris\":{{$.inputs.parameters['gcs_source_uris']}}}, \"bigquery_source\": {\"input_uri\": \"{{$.inputs.parameters['bigquery_source_input_uri']}}\"}}, \"model_parameters\": {{$.inputs.parameters['model_parameters']}}, \"output_config\": {\"predictions_format\": \"{{$.inputs.parameters['predictions_format']}}\", \"gcs_destination\": {\"output_uri_prefix\": \"{{$.inputs.parameters['gcs_destination_output_uri_prefix']}}\"}, \"bigquery_destination\": {\"output_uri\": \"{{$.inputs.parameters['bigquery_destination_output_uri']}}\"}}, \"dedicated_resources\": {\"machine_spec\": {\"machine_type\": \"{{$.inputs.parameters['machine_type']}}\", \"accelerator_type\": \"{{$.inputs.parameters['accelerator_type']}}\", \"accelerator_count\": {{$.inputs.parameters['accelerator_count']}}}, \"starting_replica_count\": {{$.inputs.parameters['starting_replica_count']}}, \"max_replica_count\": {{$.inputs.parameters['max_replica_count']}}}, \"manual_batch_tuning_parameters\": {\"batch_size\": {{$.inputs.parameters['manual_batch_tuning_parameters_batch_size']}}}, \"generate_explanation\": {{$.inputs.parameters['generate_explanation']}}, \"explanation_spec\": {\"parameters\": {{$.inputs.parameters['explanation_parameters']}}, \"metadata\": {{$.inputs.parameters['explanation_metadata']}}}, \"explanation_metadata_artifact\": \"{{$.inputs.artifacts['explanation_metadata_artifact'].uri}}\", \"labels\": {{$.inputs.parameters['labels']}}, \"encryption_spec\": {\"kms_key_name\":\"{{$.inputs.parameters['encryption_spec_key_name']}}\"}}",
"--project",
"{{$.inputs.parameters['project']}}",
"--location",
"{{$.inputs.parameters['location']}}",
"--gcp_resources",
"{{$.outputs.parameters['gcp_resources'].output_file}}",
"--executor_input",
"{{$}}"
],
"command": [
"python3",
"-u",
"-m",
"launcher"
],
"image": "gcr.io/ml-pipeline/automl-tables-private:1.0.13"
}
},
"exec-model-batch-predict": {
"container": {
"args": [
"--type",
"BatchPredictionJob",
"--payload",
"{\"display_name\": \"{{$.inputs.parameters['job_display_name']}}\", \"input_config\": {\"instances_format\": \"{{$.inputs.parameters['instances_format']}}\", \"gcs_source\": {\"uris\":{{$.inputs.parameters['gcs_source_uris']}}}, \"bigquery_source\": {\"input_uri\": \"{{$.inputs.parameters['bigquery_source_input_uri']}}\"}}, \"model_parameters\": {{$.inputs.parameters['model_parameters']}}, \"output_config\": {\"predictions_format\": \"{{$.inputs.parameters['predictions_format']}}\", \"gcs_destination\": {\"output_uri_prefix\": \"{{$.inputs.parameters['gcs_destination_output_uri_prefix']}}\"}, \"bigquery_destination\": {\"output_uri\": \"{{$.inputs.parameters['bigquery_destination_output_uri']}}\"}}, \"dedicated_resources\": {\"machine_spec\": {\"machine_type\": \"{{$.inputs.parameters['machine_type']}}\", \"accelerator_type\": \"{{$.inputs.parameters['accelerator_type']}}\", \"accelerator_count\": {{$.inputs.parameters['accelerator_count']}}}, \"starting_replica_count\": {{$.inputs.parameters['starting_replica_count']}}, \"max_replica_count\": {{$.inputs.parameters['max_replica_count']}}}, \"manual_batch_tuning_parameters\": {\"batch_size\": {{$.inputs.parameters['manual_batch_tuning_parameters_batch_size']}}}, \"generate_explanation\": {{$.inputs.parameters['generate_explanation']}}, \"explanation_spec\": {\"parameters\": {{$.inputs.parameters['explanation_parameters']}}, \"metadata\": {{$.inputs.parameters['explanation_metadata']}}}, \"labels\": {{$.inputs.parameters['labels']}}, \"encryption_spec\": {\"kms_key_name\":\"{{$.inputs.parameters['encryption_spec_key_name']}}\"}}",
"--project",
"{{$.inputs.parameters['project']}}",
"--location",
"{{$.inputs.parameters['location']}}",
"--gcp_resources",
"{{$.outputs.parameters['gcp_resources'].output_file}}",
"--executor_input",
"{{$}}"
],
"command": [
"python3",
"-u",
"-m",
"google_cloud_pipeline_components.container.v1.batch_prediction_job.launcher"
],
"image": "gcr.io/ml-pipeline/google-cloud-pipeline-components:1.0.21"
}
},
"exec-model-batch-predict-2": {
"container": {
"args": [
"--type",
"BatchPredictionJob",
"--payload",
"{\"display_name\": \"{{$.inputs.parameters['job_display_name']}}\", \"model\": \"{{$.inputs.artifacts['model'].metadata['resourceName']}}\", \"input_config\": {\"instances_format\": \"{{$.inputs.parameters['instances_format']}}\", \"gcs_source\": {\"uris\":{{$.inputs.parameters['gcs_source_uris']}}}, \"bigquery_source\": {\"input_uri\": \"{{$.inputs.parameters['bigquery_source_input_uri']}}\"}}, \"model_parameters\": {{$.inputs.parameters['model_parameters']}}, \"output_config\": {\"predictions_format\": \"{{$.inputs.parameters['predictions_format']}}\", \"gcs_destination\": {\"output_uri_prefix\": \"{{$.inputs.parameters['gcs_destination_output_uri_prefix']}}\"}, \"bigquery_destination\": {\"output_uri\": \"{{$.inputs.parameters['bigquery_destination_output_uri']}}\"}}, \"dedicated_resources\": {\"machine_spec\": {\"machine_type\": \"{{$.inputs.parameters['machine_type']}}\", \"accelerator_type\": \"{{$.inputs.parameters['accelerator_type']}}\", \"accelerator_count\": {{$.inputs.parameters['accelerator_count']}}}, \"starting_replica_count\": {{$.inputs.parameters['starting_replica_count']}}, \"max_replica_count\": {{$.inputs.parameters['max_replica_count']}}}, \"manual_batch_tuning_parameters\": {\"batch_size\": {{$.inputs.parameters['manual_batch_tuning_parameters_batch_size']}}}, \"generate_explanation\": {{$.inputs.parameters['generate_explanation']}}, \"explanation_spec\": {\"parameters\": {{$.inputs.parameters['explanation_parameters']}}, \"metadata\": {{$.inputs.parameters['explanation_metadata']}}}, \"labels\": {{$.inputs.parameters['labels']}}, \"encryption_spec\": {\"kms_key_name\":\"{{$.inputs.parameters['encryption_spec_key_name']}}\"}}",
"--project",
"{{$.inputs.parameters['project']}}",
"--location",
"{{$.inputs.parameters['location']}}",
"--gcp_resources",
"{{$.outputs.parameters['gcp_resources'].output_file}}",
"--executor_input",
"{{$}}"
],
"command": [
"python3",
"-u",
"-m",
"google_cloud_pipeline_components.container.v1.batch_prediction_job.launcher"
],
"image": "gcr.io/ml-pipeline/google-cloud-pipeline-components:1.0.21"
}
},
"exec-model-batch-predict-3": {
"container": {
"args": [
"--type",
"BatchPredictionJob",
"--payload",
"{\"display_name\": \"{{$.inputs.parameters['job_display_name']}}\", \"model\": \"{{$.inputs.artifacts['model'].metadata['resourceName']}}\", \"input_config\": {\"instances_format\": \"{{$.inputs.parameters['instances_format']}}\", \"gcs_source\": {\"uris\":{{$.inputs.parameters['gcs_source_uris']}}}, \"bigquery_source\": {\"input_uri\": \"{{$.inputs.parameters['bigquery_source_input_uri']}}\"}}, \"model_parameters\": {{$.inputs.parameters['model_parameters']}}, \"output_config\": {\"predictions_format\": \"{{$.inputs.parameters['predictions_format']}}\", \"gcs_destination\": {\"output_uri_prefix\": \"{{$.inputs.parameters['gcs_destination_output_uri_prefix']}}\"}, \"bigquery_destination\": {\"output_uri\": \"{{$.inputs.parameters['bigquery_destination_output_uri']}}\"}}, \"dedicated_resources\": {\"machine_spec\": {\"machine_type\": \"{{$.inputs.parameters['machine_type']}}\", \"accelerator_type\": \"{{$.inputs.parameters['accelerator_type']}}\", \"accelerator_count\": {{$.inputs.parameters['accelerator_count']}}}, \"starting_replica_count\": {{$.inputs.parameters['starting_replica_count']}}, \"max_replica_count\": {{$.inputs.parameters['max_replica_count']}}}, \"manual_batch_tuning_parameters\": {\"batch_size\": {{$.inputs.parameters['manual_batch_tuning_parameters_batch_size']}}}, \"generate_explanation\": {{$.inputs.parameters['generate_explanation']}}, \"explanation_spec\": {\"parameters\": {{$.inputs.parameters['explanation_parameters']}}, \"metadata\": {{$.inputs.parameters['explanation_metadata']}}}, \"labels\": {{$.inputs.parameters['labels']}}, \"encryption_spec\": {\"kms_key_name\":\"{{$.inputs.parameters['encryption_spec_key_name']}}\"}}",
"--project",
"{{$.inputs.parameters['project']}}",
"--location",
"{{$.inputs.parameters['location']}}",
"--gcp_resources",
"{{$.outputs.parameters['gcp_resources'].output_file}}",
"--executor_input",
"{{$}}"
],
"command": [
"python3",
"-u",
"-m",
"google_cloud_pipeline_components.container.v1.batch_prediction_job.launcher"
],
"image": "gcr.io/ml-pipeline/google-cloud-pipeline-components:1.0.21"
}
},
"exec-model-batch-predict-4": {
"container": {
"args": [
"--type",
"BatchPredictionJob",
"--payload",
"{\"display_name\": \"{{$.inputs.parameters['job_display_name']}}\", \"input_config\": {\"instances_format\": \"{{$.inputs.parameters['instances_format']}}\", \"gcs_source\": {\"uris\":{{$.inputs.parameters['gcs_source_uris']}}}, \"bigquery_source\": {\"input_uri\": \"{{$.inputs.parameters['bigquery_source_input_uri']}}\"}}, \"model_parameters\": {{$.inputs.parameters['model_parameters']}}, \"output_config\": {\"predictions_format\": \"{{$.inputs.parameters['predictions_format']}}\", \"gcs_destination\": {\"output_uri_prefix\": \"{{$.inputs.parameters['gcs_destination_output_uri_prefix']}}\"}, \"bigquery_destination\": {\"output_uri\": \"{{$.inputs.parameters['bigquery_destination_output_uri']}}\"}}, \"dedicated_resources\": {\"machine_spec\": {\"machine_type\": \"{{$.inputs.parameters['machine_type']}}\", \"accelerator_type\": \"{{$.inputs.parameters['accelerator_type']}}\", \"accelerator_count\": {{$.inputs.parameters['accelerator_count']}}}, \"starting_replica_count\": {{$.inputs.parameters['starting_replica_count']}}, \"max_replica_count\": {{$.inputs.parameters['max_replica_count']}}}, \"manual_batch_tuning_parameters\": {\"batch_size\": {{$.inputs.parameters['manual_batch_tuning_parameters_batch_size']}}}, \"generate_explanation\": {{$.inputs.parameters['generate_explanation']}}, \"explanation_spec\": {\"parameters\": {{$.inputs.parameters['explanation_parameters']}}, \"metadata\": {{$.inputs.parameters['explanation_metadata']}}}, \"labels\": {{$.inputs.parameters['labels']}}, \"encryption_spec\": {\"kms_key_name\":\"{{$.inputs.parameters['encryption_spec_key_name']}}\"}}",
"--project",
"{{$.inputs.parameters['project']}}",
"--location",
"{{$.inputs.parameters['location']}}",
"--gcp_resources",
"{{$.outputs.parameters['gcp_resources'].output_file}}",
"--executor_input",
"{{$}}"
],
"command": [
"python3",
"-u",
"-m",
"google_cloud_pipeline_components.container.v1.batch_prediction_job.launcher"
],
"image": "gcr.io/ml-pipeline/google-cloud-pipeline-components:1.0.21"
}
},
"exec-model-evaluation": {
"container": {
"args": [
"--setup_file",
"/setup.py",
"--json_mode",
"true",
"--project_id",
"{{$.inputs.parameters['project']}}",
"--location",
"{{$.inputs.parameters['location']}}",
"--problem_type",
"{{$.inputs.parameters['problem_type']}}",
"--batch_prediction_format",
"{{$.inputs.parameters['predictions_format']}}",
"--batch_prediction_gcs_source",
"{{$.inputs.artifacts['batch_prediction_job'].metadata['gcsOutputDirectory']}}",
"--ground_truth_format",
"{{$.inputs.parameters['ground_truth_format']}}",
"--ground_truth_gcs_source",
"{{$.inputs.parameters['ground_truth_gcs_source']}}",
"--key_prefix_in_prediction_dataset",
"instance",
"--key_columns",
"{{$.inputs.parameters['key_columns']}}",
"--root_dir",
"{{$.inputs.parameters['root_dir']}}/{{$.pipeline_job_uuid}}-{{$.pipeline_task_uuid}}",
"--classification_type",
"{{$.inputs.parameters['classification_type']}}",
"--class_names",
"{{$.inputs.parameters['class_names']}}",
"--ground_truth_column",
"instance.{{$.inputs.parameters['ground_truth_column']}}",
"--prediction_score_column",
"{{$.inputs.parameters['prediction_score_column']}}",
"--prediction_label_column",
"{{$.inputs.parameters['prediction_label_column']}}",
"--prediction_id_column",
"{{$.inputs.parameters['prediction_id_column']}}",
"--example_weight_column",
"{{$.inputs.parameters['example_weight_column']}}",
"--positive_classes",
"{{$.inputs.parameters['positive_classes']}}",
"--generate_feature_attribution",
"{{$.inputs.parameters['generate_feature_attribution']}}",
"--dataflow_job_prefix",
"evaluation-{{$.pipeline_job_uuid}}-{{$.pipeline_task_uuid}}",
"--dataflow_service_account",
"{{$.inputs.parameters['dataflow_service_account']}}",
"--dataflow_disk_size",
"{{$.inputs.parameters['dataflow_disk_size']}}",
"--dataflow_machine_type",
"{{$.inputs.parameters['dataflow_machine_type']}}",
"--dataflow_workers_num",
"{{$.inputs.parameters['dataflow_workers_num']}}",
"--dataflow_max_workers_num",
"{{$.inputs.parameters['dataflow_max_workers_num']}}",
"--dataflow_subnetwork",
"{{$.inputs.parameters['dataflow_subnetwork']}}",
"--dataflow_use_public_ips",
"{{$.inputs.parameters['dataflow_use_public_ips']}}",
"--kms_key_name",
"{{$.inputs.parameters['encryption_spec_key_name']}}",
"--output_metrics_gcs_path",
"{{$.outputs.artifacts['evaluation_metrics'].uri}}",
"--gcp_resources",
"{{$.outputs.parameters['gcp_resources'].output_file}}",
"--executor_input",
"{{$}}"
],
"command": [
"python",
"/main.py"
],
"image": "gcr.io/ml-pipeline/model-evaluation:v0.4"
}
},
"exec-model-evaluation-2": {
"container": {
"args": [
"--setup_file",
"/setup.py",
"--json_mode",
"true",
"--project_id",
"{{$.inputs.parameters['project']}}",
"--location",
"{{$.inputs.parameters['location']}}",
"--problem_type",
"{{$.inputs.parameters['problem_type']}}",
"--batch_prediction_format",
"{{$.inputs.parameters['predictions_format']}}",
"--batch_prediction_gcs_source",
"{{$.inputs.artifacts['batch_prediction_job'].metadata['gcsOutputDirectory']}}",
"--ground_truth_format",
"{{$.inputs.parameters['ground_truth_format']}}",
"--ground_truth_gcs_source",
"{{$.inputs.parameters['ground_truth_gcs_source']}}",
"--key_prefix_in_prediction_dataset",
"instance",
"--key_columns",
"{{$.inputs.parameters['key_columns']}}",
"--root_dir",
"{{$.inputs.parameters['root_dir']}}/{{$.pipeline_job_uuid}}-{{$.pipeline_task_uuid}}",
"--classification_type",
"{{$.inputs.parameters['classification_type']}}",
"--class_names",
"{{$.inputs.parameters['class_names']}}",
"--ground_truth_column",
"instance.{{$.inputs.parameters['ground_truth_column']}}",
"--prediction_score_column",
"{{$.inputs.parameters['prediction_score_column']}}",
"--prediction_label_column",
"{{$.inputs.parameters['prediction_label_column']}}",
"--prediction_id_column",
"{{$.inputs.parameters['prediction_id_column']}}",
"--example_weight_column",
"{{$.inputs.parameters['example_weight_column']}}",
"--positive_classes",
"{{$.inputs.parameters['positive_classes']}}",
"--generate_feature_attribution",
"{{$.inputs.parameters['generate_feature_attribution']}}",
"--dataflow_job_prefix",
"evaluation-{{$.pipeline_job_uuid}}-{{$.pipeline_task_uuid}}",
"--dataflow_service_account",
"{{$.inputs.parameters['dataflow_service_account']}}",
"--dataflow_disk_size",
"{{$.inputs.parameters['dataflow_disk_size']}}",
"--dataflow_machine_type",
"{{$.inputs.parameters['dataflow_machine_type']}}",
"--dataflow_workers_num",
"{{$.inputs.parameters['dataflow_workers_num']}}",
"--dataflow_max_workers_num",
"{{$.inputs.parameters['dataflow_max_workers_num']}}",
"--dataflow_subnetwork",
"{{$.inputs.parameters['dataflow_subnetwork']}}",
"--dataflow_use_public_ips",
"{{$.inputs.parameters['dataflow_use_public_ips']}}",
"--kms_key_name",
"{{$.inputs.parameters['encryption_spec_key_name']}}",
"--output_metrics_gcs_path",
"{{$.outputs.artifacts['evaluation_metrics'].uri}}",
"--gcp_resources",
"{{$.outputs.parameters['gcp_resources'].output_file}}",
"--executor_input",
"{{$}}"
],
"command": [
"python",
"/main.py"
],
"image": "gcr.io/ml-pipeline/model-evaluation:v0.4"
}
},
"exec-model-evaluation-3": {
"container": {
"args": [
"--setup_file",
"/setup.py",
"--json_mode",
"true",
"--project_id",
"{{$.inputs.parameters['project']}}",
"--location",
"{{$.inputs.parameters['location']}}",
"--problem_type",
"{{$.inputs.parameters['problem_type']}}",
"--batch_prediction_format",
"{{$.inputs.parameters['predictions_format']}}",
"--batch_prediction_gcs_source",
"{{$.inputs.artifacts['batch_prediction_job'].metadata['gcsOutputDirectory']}}",
"--ground_truth_format",
"{{$.inputs.parameters['ground_truth_format']}}",
"--ground_truth_gcs_source",
"{{$.inputs.parameters['ground_truth_gcs_source']}}",
"--key_prefix_in_prediction_dataset",
"instance",
"--key_columns",
"{{$.inputs.parameters['key_columns']}}",
"--root_dir",
"{{$.inputs.parameters['root_dir']}}/{{$.pipeline_job_uuid}}-{{$.pipeline_task_uuid}}",
"--classification_type",
"{{$.inputs.parameters['classification_type']}}",
"--class_names",
"{{$.inputs.parameters['class_names']}}",
"--ground_truth_column",
"instance.{{$.inputs.parameters['ground_truth_column']}}",
"--prediction_score_column",
"{{$.inputs.parameters['prediction_score_column']}}",
"--prediction_label_column",
"{{$.inputs.parameters['prediction_label_column']}}",
"--prediction_id_column",
"{{$.inputs.parameters['prediction_id_column']}}",
"--example_weight_column",
"{{$.inputs.parameters['example_weight_column']}}",
"--positive_classes",
"{{$.inputs.parameters['positive_classes']}}",
"--generate_feature_attribution",
"{{$.inputs.parameters['generate_feature_attribution']}}",
"--dataflow_job_prefix",
"evaluation-{{$.pipeline_job_uuid}}-{{$.pipeline_task_uuid}}",
"--dataflow_service_account",
"{{$.inputs.parameters['dataflow_service_account']}}",
"--dataflow_disk_size",
"{{$.inputs.parameters['dataflow_disk_size']}}",
"--dataflow_machine_type",
"{{$.inputs.parameters['dataflow_machine_type']}}",
"--dataflow_workers_num",
"{{$.inputs.parameters['dataflow_workers_num']}}",
"--dataflow_max_workers_num",
"{{$.inputs.parameters['dataflow_max_workers_num']}}",
"--dataflow_subnetwork",
"{{$.inputs.parameters['dataflow_subnetwork']}}",
"--dataflow_use_public_ips",
"{{$.inputs.parameters['dataflow_use_public_ips']}}",
"--kms_key_name",
"{{$.inputs.parameters['encryption_spec_key_name']}}",
"--output_metrics_gcs_path",
"{{$.outputs.artifacts['evaluation_metrics'].uri}}",
"--gcp_resources",
"{{$.outputs.parameters['gcp_resources'].output_file}}",
"--executor_input",
"{{$}}"
],
"command": [
"python",
"/main.py"
],
"image": "gcr.io/ml-pipeline/model-evaluation:v0.4"
}
},
"exec-model-evaluation-4": {
"container": {
"args": [
"--setup_file",
"/setup.py",
"--json_mode",
"true",
"--project_id",
"{{$.inputs.parameters['project']}}",
"--location",
"{{$.inputs.parameters['location']}}",
"--problem_type",
"{{$.inputs.parameters['problem_type']}}",
"--batch_prediction_format",
"{{$.inputs.parameters['predictions_format']}}",
"--batch_prediction_gcs_source",
"{{$.inputs.artifacts['batch_prediction_job'].metadata['gcsOutputDirectory']}}",
"--ground_truth_format",
"{{$.inputs.parameters['ground_truth_format']}}",
"--ground_truth_gcs_source",
"{{$.inputs.parameters['ground_truth_gcs_source']}}",
"--key_prefix_in_prediction_dataset",
"instance",
"--key_columns",
"{{$.inputs.parameters['key_columns']}}",
"--root_dir",
"{{$.inputs.parameters['root_dir']}}/{{$.pipeline_job_uuid}}-{{$.pipeline_task_uuid}}",
"--classification_type",
"{{$.inputs.parameters['classification_type']}}",
"--class_names",
"{{$.inputs.parameters['class_names']}}",
"--ground_truth_column",
"instance.{{$.inputs.parameters['ground_truth_column']}}",
"--prediction_score_column",
"{{$.inputs.parameters['prediction_score_column']}}",
"--prediction_label_column",
"{{$.inputs.parameters['prediction_label_column']}}",
"--prediction_id_column",
"{{$.inputs.parameters['prediction_id_column']}}",
"--example_weight_column",
"{{$.inputs.parameters['example_weight_column']}}",
"--positive_classes",
"{{$.inputs.parameters['positive_classes']}}",
"--generate_feature_attribution",
"{{$.inputs.parameters['generate_feature_attribution']}}",
"--dataflow_job_prefix",
"evaluation-{{$.pipeline_job_uuid}}-{{$.pipeline_task_uuid}}",
"--dataflow_service_account",
"{{$.inputs.parameters['dataflow_service_account']}}",
"--dataflow_disk_size",
"{{$.inputs.parameters['dataflow_disk_size']}}",
"--dataflow_machine_type",
"{{$.inputs.parameters['dataflow_machine_type']}}",
"--dataflow_workers_num",
"{{$.inputs.parameters['dataflow_workers_num']}}",
"--dataflow_max_workers_num",
"{{$.inputs.parameters['dataflow_max_workers_num']}}",
"--dataflow_subnetwork",
"{{$.inputs.parameters['dataflow_subnetwork']}}",
"--dataflow_use_public_ips",
"{{$.inputs.parameters['dataflow_use_public_ips']}}",
"--kms_key_name",
"{{$.inputs.parameters['encryption_spec_key_name']}}",
"--output_metrics_gcs_path",
"{{$.outputs.artifacts['evaluation_metrics'].uri}}",
"--gcp_resources",
"{{$.outputs.parameters['gcp_resources'].output_file}}",
"--executor_input",
"{{$}}"
],
"command": [
"python",
"/main.py"
],
"image": "gcr.io/ml-pipeline/model-evaluation:v0.4"
}
},
"exec-model-evaluation-import": {
"container": {
"args": [
"--metrics",
"{{$.inputs.artifacts['metrics'].uri}}",
"--metrics_explanation",
"{{$.inputs.artifacts['metrics'].metadata['explanation_gcs_path']}}",
"--explanation",
"{{$.inputs.artifacts['explanation'].metadata['explanation_gcs_path']}}",
"--problem_type",
"{{$.inputs.parameters['problem_type']}}",
"--display_name",
"{{$.inputs.parameters['display_name']}}",
"--dataset_path",
"{{$.inputs.parameters['dataset_path']}}",
"--dataset_paths",
"{{$.inputs.parameters['dataset_paths']}}",
"--dataset_type",
"{{$.inputs.parameters['dataset_type']}}",
"--pipeline_job_id",
"{{$.pipeline_job_uuid}}",
"--pipeline_job_resource_name",
"{{$.pipeline_job_resource_name}}",
"--model_name",
"{{$.inputs.artifacts['model'].metadata['resourceName']}}",
"--gcp_resources",
"{{$.outputs.parameters['gcp_resources'].output_file}}"
],
"command": [
"python3",
"-u",
"-m",
"google_cloud_pipeline_components.container.experimental.evaluation.import_model_evaluation"
],
"image": "gcr.io/ml-pipeline/google-cloud-pipeline-components:1.0.21"
}
},
"exec-model-evaluation-import-2": {
"container": {
"args": [
"--metrics",
"{{$.inputs.artifacts['metrics'].uri}}",
"--metrics_explanation",
"{{$.inputs.artifacts['metrics'].metadata['explanation_gcs_path']}}",
"--explanation",
"{{$.inputs.artifacts['explanation'].metadata['explanation_gcs_path']}}",
"--problem_type",
"{{$.inputs.parameters['problem_type']}}",
"--display_name",
"{{$.inputs.parameters['display_name']}}",
"--dataset_path",
"{{$.inputs.parameters['dataset_path']}}",
"--dataset_paths",
"{{$.inputs.parameters['dataset_paths']}}",
"--dataset_type",
"{{$.inputs.parameters['dataset_type']}}",
"--pipeline_job_id",
"{{$.pipeline_job_uuid}}",
"--pipeline_job_resource_name",
"{{$.pipeline_job_resource_name}}",
"--model_name",
"{{$.inputs.artifacts['model'].metadata['resourceName']}}",
"--gcp_resources",
"{{$.outputs.parameters['gcp_resources'].output_file}}"
],
"command": [
"python3",
"-u",
"-m",
"google_cloud_pipeline_components.container.experimental.evaluation.import_model_evaluation"
],
"image": "gcr.io/ml-pipeline/google-cloud-pipeline-components:1.0.21"
}
},
"exec-model-upload": {
"container": {
"args": [
"--type",
"UploadModel",
"--payload",
"{\"display_name\": \"{{$.inputs.parameters['display_name']}}\", \"description\": \"{{$.inputs.parameters['description']}}\", \"explanation_spec\": {\"parameters\": {{$.inputs.parameters['explanation_parameters']}}, \"metadata\": {{$.inputs.parameters['explanation_metadata']}}}, \"explanation_metadata_artifact\": \"{{$.inputs.artifacts['explanation_metadata_artifact'].uri}}\", \"encryption_spec\": {\"kms_key_name\":\"{{$.inputs.parameters['encryption_spec_key_name']}}\"}, \"labels\": {{$.inputs.parameters['labels']}}}",
"--project",
"{{$.inputs.parameters['project']}}",
"--location",
"{{$.inputs.parameters['location']}}",
"--gcp_resources",
"{{$.outputs.parameters['gcp_resources'].output_file}}",
"--executor_input",
"{{$}}"
],
"command": [
"python3",
"-u",
"-m",
"launcher"
],
"image": "gcr.io/ml-pipeline/automl-tables-private:1.0.13"
}
},
"exec-model-upload-2": {
"container": {
"args": [
"--type",
"UploadModel",
"--payload",
"{\"display_name\": \"{{$.inputs.parameters['display_name']}}\", \"description\": \"{{$.inputs.parameters['description']}}\", \"explanation_spec\": {\"parameters\": {{$.inputs.parameters['explanation_parameters']}}, \"metadata\": {{$.inputs.parameters['explanation_metadata']}}}, \"explanation_metadata_artifact\": \"{{$.inputs.artifacts['explanation_metadata_artifact'].uri}}\", \"encryption_spec\": {\"kms_key_name\":\"{{$.inputs.parameters['encryption_spec_key_name']}}\"}, \"labels\": {{$.inputs.parameters['labels']}}}",
"--project",
"{{$.inputs.parameters['project']}}",
"--location",
"{{$.inputs.parameters['location']}}",
"--gcp_resources",
"{{$.outputs.parameters['gcp_resources'].output_file}}",
"--executor_input",
"{{$}}"
],
"command": [
"python3",
"-u",
"-m",
"launcher"
],
"image": "gcr.io/ml-pipeline/automl-tables-private:1.0.13"
}
},
"exec-model-upload-3": {
"container": {
"args": [
"--type",
"UploadModel",
"--payload",
"{\"display_name\": \"{{$.inputs.parameters['display_name']}}\", \"description\": \"{{$.inputs.parameters['description']}}\", \"explanation_spec\": {\"parameters\": {{$.inputs.parameters['explanation_parameters']}}, \"metadata\": {{$.inputs.parameters['explanation_metadata']}}}, \"explanation_metadata_artifact\": \"{{$.inputs.artifacts['explanation_metadata_artifact'].uri}}\", \"encryption_spec\": {\"kms_key_name\":\"{{$.inputs.parameters['encryption_spec_key_name']}}\"}, \"labels\": {{$.inputs.parameters['labels']}}}",
"--project",
"{{$.inputs.parameters['project']}}",
"--location",
"{{$.inputs.parameters['location']}}",
"--gcp_resources",
"{{$.outputs.parameters['gcp_resources'].output_file}}",
"--executor_input",
"{{$}}"
],
"command": [
"python3",
"-u",
"-m",
"launcher"
],
"image": "gcr.io/ml-pipeline/automl-tables-private:1.0.13"
}
},
"exec-read-input-uri": {
"container": {
"args": [
"--split-uri",
"{{$.inputs.artifacts['split_uri'].path}}",
"----output-paths",
"{{$.outputs.parameters['Output'].output_file}}"
],
"command": [
"sh",
"-ec",
"program_path=$(mktemp)\nprintf \"%s\" \"$0\" > \"$program_path\"\npython3 -u \"$program_path\" \"$@\"\n",
"def _read_input_uri(split_uri):\n \"\"\"Construct Dataset based on the batch prediction job.\n\n Args:\n split_uri: Tbe path to the file that contains Dataset data.\n\n Returns:\n The list of string that represents the batch prediction input files.\n \"\"\"\n # pylint: disable=g-import-not-at-top,import-outside-toplevel,redefined-outer-name,reimported\n import json\n # pylint: enable=g-import-not-at-top,import-outside-toplevel,redefined-outer-name,reimported\n with open(split_uri, 'r') as f:\n data_source = json.loads(f.read())\n return data_source['tf_record_data_source']['file_patterns']\n\ndef _serialize_json(obj) -> str:\n if isinstance(obj, str):\n return obj\n import json\n\n def default_serializer(obj):\n if hasattr(obj, 'to_struct'):\n return obj.to_struct()\n else:\n raise TypeError(\n \"Object of type '%s' is not JSON serializable and does not have .to_struct() method.\"\n % obj.__class__.__name__)\n\n return json.dumps(obj, default=default_serializer, sort_keys=True)\n\nimport argparse\n_parser = argparse.ArgumentParser(prog='Read input uri', description='Construct Dataset based on the batch prediction job.')\n_parser.add_argument(\"--split-uri\", dest=\"split_uri\", type=str, required=True, default=argparse.SUPPRESS)\n_parser.add_argument(\"----output-paths\", dest=\"_output_paths\", type=str, nargs=1)\n_parsed_args = vars(_parser.parse_args())\n_output_files = _parsed_args.pop(\"_output_paths\", [])\n\n_outputs = _read_input_uri(**_parsed_args)\n\n_outputs = [_outputs]\n\n_output_serializers = [\n _serialize_json,\n\n]\n\nimport os\nfor idx, output_file in enumerate(_output_files):\n try:\n os.makedirs(os.path.dirname(output_file))\n except OSError:\n pass\n with open(output_file, 'w') as f:\n f.write(_output_serializers[idx](_outputs[idx]))\n"
],
"image": "python:3.7-slim"
}
},
"exec-read-input-uri-2": {
"container": {
"args": [
"--split-uri",
"{{$.inputs.artifacts['split_uri'].path}}",
"----output-paths",
"{{$.outputs.parameters['Output'].output_file}}"
],
"command": [
"sh",
"-ec",
"program_path=$(mktemp)\nprintf \"%s\" \"$0\" > \"$program_path\"\npython3 -u \"$program_path\" \"$@\"\n",
"def _read_input_uri(split_uri):\n \"\"\"Construct Dataset based on the batch prediction job.\n\n Args:\n split_uri: Tbe path to the file that contains Dataset data.\n\n Returns:\n The list of string that represents the batch prediction input files.\n \"\"\"\n # pylint: disable=g-import-not-at-top,import-outside-toplevel,redefined-outer-name,reimported\n import json\n # pylint: enable=g-import-not-at-top,import-outside-toplevel,redefined-outer-name,reimported\n with open(split_uri, 'r') as f:\n data_source = json.loads(f.read())\n return data_source['tf_record_data_source']['file_patterns']\n\ndef _serialize_json(obj) -> str:\n if isinstance(obj, str):\n return obj\n import json\n\n def default_serializer(obj):\n if hasattr(obj, 'to_struct'):\n return obj.to_struct()\n else:\n raise TypeError(\n \"Object of type '%s' is not JSON serializable and does not have .to_struct() method.\"\n % obj.__class__.__name__)\n\n return json.dumps(obj, default=default_serializer, sort_keys=True)\n\nimport argparse\n_parser = argparse.ArgumentParser(prog='Read input uri', description='Construct Dataset based on the batch prediction job.')\n_parser.add_argument(\"--split-uri\", dest=\"split_uri\", type=str, required=True, default=argparse.SUPPRESS)\n_parser.add_argument(\"----output-paths\", dest=\"_output_paths\", type=str, nargs=1)\n_parsed_args = vars(_parser.parse_args())\n_output_files = _parsed_args.pop(\"_output_paths\", [])\n\n_outputs = _read_input_uri(**_parsed_args)\n\n_outputs = [_outputs]\n\n_output_serializers = [\n _serialize_json,\n\n]\n\nimport os\nfor idx, output_file in enumerate(_output_files):\n try:\n os.makedirs(os.path.dirname(output_file))\n except OSError:\n pass\n with open(output_file, 'w') as f:\n f.write(_output_serializers[idx](_outputs[idx]))\n"
],
"image": "python:3.7-slim"
}
},
"exec-set-model-can-skip-validation": {
"container": {
"args": [
"--executor_input",
"{{$}}",
"--function_to_execute",
"_set_model_can_skip_validation"
],
"command": [
"sh",
"-ec",
"program_path=$(mktemp -d)\nprintf \"%s\" \"$0\" > \"$program_path/ephemeral_component.py\"\npython3 -m kfp.v2.components.executor_main --component_module_path \"$program_path/ephemeral_component.py\" \"$@\"\n",
"\nimport kfp\nfrom kfp.v2 import dsl\nfrom kfp.v2.dsl import *\nfrom typing import *\n\ndef _set_model_can_skip_validation(model: Input[Artifact]):\n \"\"\"Construct Dataset based on the batch prediction job.\n\n Args:\n model: The model artifact.\n \"\"\"\n # pylint: disable=g-import-not-at-top,import-outside-toplevel,redefined-outer-name,reimported\n import os\n import tensorflow as tf\n # pylint: enable=g-import-not-at-top,import-outside-toplevel,redefined-outer-name,reimported\n\n # create an empty CAN_SKIP_VALIDATION file\n with tf.io.gfile.GFile(os.path.join(model.uri, 'CAN_SKIP_VALIDATION'),\n 'w') as f:\n f.write('')\n\n"
],
"image": "us-docker.pkg.dev/vertex-ai/automl-tabular/dataflow-worker:prod"
}
},
"exec-tabular-stats-and-example-gen": {
"container": {
"args": [
"--type",
"CustomJob",
"--project",
"{{$.inputs.parameters['project']}}",
"--location",
"{{$.inputs.parameters['location']}}",
"--gcp_resources",
"{{$.outputs.parameters['gcp_resources'].output_file}}",
"--payload",
"{\"display_name\": \"tabular-stats-and-example-gen-{{$.pipeline_job_uuid}}-{{$.pipeline_task_uuid}}\", \"encryption_spec\": {\"kms_key_name\":\"{{$.inputs.parameters['encryption_spec_key_name']}}\"}, \"job_spec\": {\"worker_pool_specs\": [{\"replica_count\": 1, \"machine_spec\": {\"machine_type\": \"n1-standard-8\"}, \"container_spec\": {\"image_uri\":\"us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/training:prod\", \"args\": [\"stats_generator\",\"--train_spec={\\\"prediction_type\\\": \\\"{{$.inputs.parameters['prediction_type']}}\\\", \\\"target_column\\\": \\\"{{$.inputs.parameters['target_column_name']}}\\\", \\\"optimization_objective\\\": \\\"{{$.inputs.parameters['optimization_objective']}}\\\", \\\"weight_column_name\\\": \\\"{{$.inputs.parameters['weight_column_name']}}\\\", \\\"transformations\\\": {{$.inputs.parameters['transformations']}}}\", \"--transformations_override_path={{$.inputs.parameters['transformations_path']}}\", \"--split_spec={{$.inputs.parameters['split_spec']}}\", \"--data_source={{$.inputs.parameters['data_source']}}\", \"--data_source_csv_filenames={{$.inputs.parameters['data_source_csv_filenames']}}\", \"--data_source_bigquery_table_path={{$.inputs.parameters['data_source_bigquery_table_path']}}\", \"--predefined_split_key={{$.inputs.parameters['predefined_split_key']}}\", \"--timestamp_split_key={{$.inputs.parameters['timestamp_split_key']}}\", \"--stratified_split_key={{$.inputs.parameters['stratified_split_key']}}\", \"--training_fraction={{$.inputs.parameters['training_fraction']}}\", \"--validation_fraction={{$.inputs.parameters['validation_fraction']}}\", \"--test_fraction={{$.inputs.parameters['test_fraction']}}\", \"--target_column={{$.inputs.parameters['target_column_name']}}\", \"--request_type={{$.inputs.parameters['request_type']}}\", \"--optimization_objective_recall_value={{$.inputs.parameters['optimization_objective_recall_value']}}\", \"--optimization_objective_precision_value={{$.inputs.parameters['optimization_objective_precision_value']}}\", \"--example_gen_gcs_output_prefix={{$.inputs.parameters['root_dir']}}/{{$.pipeline_job_uuid}}/{{$.pipeline_task_uuid}}/example_gen_output\", \"--dataset_stats_dir={{$.inputs.parameters['root_dir']}}/{{$.pipeline_job_uuid}}/{{$.pipeline_task_uuid}}/stats/\", \"--stats_result_path={{$.outputs.artifacts['dataset_stats'].uri}}\", \"--dataset_schema_path={{$.outputs.artifacts['dataset_schema'].uri}}\", \"--job_name=tabular-stats-and-example-gen-{{$.pipeline_job_uuid}}-{{$.pipeline_task_uuid}}\", \"--dataflow_project={{$.inputs.parameters['project']}}\", \"--error_file_path={{$.inputs.parameters['root_dir']}}/{{$.pipeline_job_uuid}}/{{$.pipeline_task_uuid}}/error.pb\", \"--dataflow_staging_dir={{$.inputs.parameters['root_dir']}}/{{$.pipeline_job_uuid}}/{{$.pipeline_task_uuid}}/dataflow_staging\", \"--dataflow_tmp_dir={{$.inputs.parameters['root_dir']}}/{{$.pipeline_job_uuid}}/{{$.pipeline_task_uuid}}/dataflow_tmp\", \"--dataflow_max_num_workers={{$.inputs.parameters['dataflow_max_num_workers']}}\", \"--dataflow_worker_container_image=us-docker.pkg.dev/vertex-ai/automl-tabular/dataflow-worker:prod\", \"--dataflow_machine_type={{$.inputs.parameters['dataflow_machine_type']}}\", \"--dataflow_disk_size_gb={{$.inputs.parameters['dataflow_disk_size_gb']}}\", \"--dataflow_kms_key={{$.inputs.parameters['encryption_spec_key_name']}}\", \"--dataflow_subnetwork_fully_qualified={{$.inputs.parameters['dataflow_subnetwork']}}\", \"--dataflow_use_public_ips={{$.inputs.parameters['dataflow_use_public_ips']}}\", \"--dataflow_service_account={{$.inputs.parameters['dataflow_service_account']}}\", \"--is_distill={{$.inputs.parameters['run_distillation']}}\", \"--additional_experiments={{$.inputs.parameters['additional_experiments']}}\", \"--metadata_path={{$.outputs.artifacts['metadata'].uri}}\", \"--train_split={{$.outputs.artifacts['train_split'].uri}}\", \"--eval_split={{$.outputs.artifacts['eval_split'].uri}}\", \"--test_split={{$.outputs.artifacts['test_split'].uri}}\", \"--test_split_for_batch_prediction_component={{$.outputs.parameters['test_split_json'].output_file}}\", \"--downsampled_test_split_for_batch_prediction_component={{$.outputs.parameters['downsampled_test_split_json'].output_file}}\", \"--instance_baseline_path={{$.outputs.artifacts['instance_baseline'].uri}}\", \"--parse_json=true\", \"--generate_additional_downsample_test_split=true\", \"--executor_input={{$.json_escape[1]}}\"]}}]}}"
],
"command": [
"python3",
"-u",
"-m",
"google_cloud_pipeline_components.container.v1.gcp_launcher.launcher"
],
"image": "gcr.io/ml-pipeline/google-cloud-pipeline-components:1.0.21"
}
},
"exec-write-bp-result-path": {
"container": {
"args": [
"--executor_input",
"{{$}}",
"--function_to_execute",
"_write_bp_result_path"
],
"command": [
"sh",
"-ec",
"program_path=$(mktemp -d)\nprintf \"%s\" \"$0\" > \"$program_path/ephemeral_component.py\"\npython3 -m kfp.v2.components.executor_main --component_module_path \"$program_path/ephemeral_component.py\" \"$@\"\n",
"\nimport kfp\nfrom kfp.v2 import dsl\nfrom kfp.v2.dsl import *\nfrom typing import *\n\ndef _write_bp_result_path(\n bp_job: Input[Artifact],\n result: OutputPath('Dataset'),\n):\n \"\"\"Construct Dataset based on the batch prediction job.\n\n Args:\n bp_job: The batch prediction job artifact.\n result: Tbe path to the file that contains Dataset data.\n \"\"\"\n # pylint: disable=g-import-not-at-top,import-outside-toplevel,redefined-outer-name,reimported\n import json\n # pylint: enable=g-import-not-at-top,import-outside-toplevel,redefined-outer-name,reimported\n directory = bp_job.metadata['gcsOutputDirectory']\n data_source = {\n 'tf_record_data_source': {\n 'file_patterns': [f'{directory}/prediction.results-*',],\n 'coder': 'PROTO_VALUE',\n },\n }\n with open(result, 'w') as f:\n f.write(json.dumps(data_source))\n\n"
],
"image": "us-docker.pkg.dev/vertex-ai/automl-tabular/dataflow-worker:prod"
}
},
"exec-write-bp-result-path-2": {
"container": {
"args": [
"--executor_input",
"{{$}}",
"--function_to_execute",
"_write_bp_result_path"
],
"command": [
"sh",
"-ec",
"program_path=$(mktemp -d)\nprintf \"%s\" \"$0\" > \"$program_path/ephemeral_component.py\"\npython3 -m kfp.v2.components.executor_main --component_module_path \"$program_path/ephemeral_component.py\" \"$@\"\n",
"\nimport kfp\nfrom kfp.v2 import dsl\nfrom kfp.v2.dsl import *\nfrom typing import *\n\ndef _write_bp_result_path(\n bp_job: Input[Artifact],\n result: OutputPath('Dataset'),\n):\n \"\"\"Construct Dataset based on the batch prediction job.\n\n Args:\n bp_job: The batch prediction job artifact.\n result: Tbe path to the file that contains Dataset data.\n \"\"\"\n # pylint: disable=g-import-not-at-top,import-outside-toplevel,redefined-outer-name,reimported\n import json\n # pylint: enable=g-import-not-at-top,import-outside-toplevel,redefined-outer-name,reimported\n directory = bp_job.metadata['gcsOutputDirectory']\n data_source = {\n 'tf_record_data_source': {\n 'file_patterns': [f'{directory}/prediction.results-*',],\n 'coder': 'PROTO_VALUE',\n },\n }\n with open(result, 'w') as f:\n f.write(json.dumps(data_source))\n\n"
],
"image": "us-docker.pkg.dev/vertex-ai/automl-tabular/dataflow-worker:prod"
}
}
}
},
"pipelineInfo": {
"name": "automl-tabular-deprecated"
},
"root": {
"dag": {
"outputs": {
"artifacts": {
"model-evaluation-2-evaluation_metrics": {
"artifactSelectors": [
{
"outputArtifactKey": "model-evaluation-2-evaluation_metrics",
"producerSubtask": "exit-handler-1"
}
]
},
"model-evaluation-3-evaluation_metrics": {
"artifactSelectors": [
{
"outputArtifactKey": "model-evaluation-3-evaluation_metrics",
"producerSubtask": "exit-handler-1"
}
]
},
"model-evaluation-4-evaluation_metrics": {
"artifactSelectors": [
{
"outputArtifactKey": "model-evaluation-4-evaluation_metrics",
"producerSubtask": "exit-handler-1"
}
]
},
"model-evaluation-evaluation_metrics": {
"artifactSelectors": [
{
"outputArtifactKey": "model-evaluation-evaluation_metrics",
"producerSubtask": "exit-handler-1"
}
]
}
}
},
"tasks": {
"automl-tabular-finalizer": {
"componentRef": {
"name": "comp-automl-tabular-finalizer"
},
"dependentTasks": [
"exit-handler-1"
],
"inputs": {
"parameters": {
"encryption_spec_key_name": {
"runtimeValue": {
"constantValue": {
"stringValue": ""
}
}
},
"location": {
"componentInputParameter": "location"
},
"project": {
"componentInputParameter": "project"
},
"root_dir": {
"componentInputParameter": "root_dir"
}
}
},
"taskInfo": {
"name": "automl-tabular-finalizer"
},
"triggerPolicy": {
"strategy": "ALL_UPSTREAM_TASKS_COMPLETED"
}
},
"exit-handler-1": {
"componentRef": {
"name": "comp-exit-handler-1"
},
"inputs": {
"parameters": {
"pipelineparam--additional_experiments": {
"componentInputParameter": "additional_experiments"
},
"pipelineparam--cv_trainer_worker_pool_specs_override": {
"componentInputParameter": "cv_trainer_worker_pool_specs_override"
},
"pipelineparam--data_source": {
"componentInputParameter": "data_source"
},
"pipelineparam--dataflow_service_account": {
"componentInputParameter": "dataflow_service_account"
},
"pipelineparam--dataflow_subnetwork": {
"componentInputParameter": "dataflow_subnetwork"
},
"pipelineparam--dataflow_use_public_ips": {
"componentInputParameter": "dataflow_use_public_ips"
},
"pipelineparam--disable_early_stopping": {
"componentInputParameter": "disable_early_stopping"
},
"pipelineparam--distill_batch_predict_machine_type": {
"componentInputParameter": "distill_batch_predict_machine_type"
},
"pipelineparam--distill_batch_predict_max_replica_count": {
"componentInputParameter": "distill_batch_predict_max_replica_count"
},
"pipelineparam--distill_batch_predict_starting_replica_count": {
"componentInputParameter": "distill_batch_predict_starting_replica_count"
},
"pipelineparam--distill_stage_1_deadline_hours": {
"componentInputParameter": "distill_stage_1_deadline_hours"
},
"pipelineparam--encryption_spec_key_name": {
"componentInputParameter": "encryption_spec_key_name"
},
"pipelineparam--evaluation_batch_predict_machine_type": {
"componentInputParameter": "evaluation_batch_predict_machine_type"
},
"pipelineparam--evaluation_batch_predict_max_replica_count": {
"componentInputParameter": "evaluation_batch_predict_max_replica_count"
},
"pipelineparam--evaluation_batch_predict_starting_replica_count": {
"componentInputParameter": "evaluation_batch_predict_starting_replica_count"
},
"pipelineparam--evaluation_dataflow_disk_size_gb": {
"componentInputParameter": "evaluation_dataflow_disk_size_gb"
},
"pipelineparam--evaluation_dataflow_machine_type": {
"componentInputParameter": "evaluation_dataflow_machine_type"
},
"pipelineparam--evaluation_dataflow_max_num_workers": {
"componentInputParameter": "evaluation_dataflow_max_num_workers"
},
"pipelineparam--export_additional_model_without_custom_ops": {
"componentInputParameter": "export_additional_model_without_custom_ops"
},
"pipelineparam--location": {
"componentInputParameter": "location"
},
"pipelineparam--optimization_objective": {
"componentInputParameter": "optimization_objective"
},
"pipelineparam--optimization_objective_precision_value": {
"componentInputParameter": "optimization_objective_precision_value"
},
"pipelineparam--optimization_objective_recall_value": {
"componentInputParameter": "optimization_objective_recall_value"
},
"pipelineparam--prediction_type": {
"componentInputParameter": "prediction_type"
},
"pipelineparam--project": {
"componentInputParameter": "project"
},
"pipelineparam--reduce_search_space_mode": {
"componentInputParameter": "reduce_search_space_mode"
},
"pipelineparam--root_dir": {
"componentInputParameter": "root_dir"
},
"pipelineparam--run_distillation": {
"componentInputParameter": "run_distillation"
},
"pipelineparam--run_evaluation": {
"componentInputParameter": "run_evaluation"
},
"pipelineparam--split_spec": {
"componentInputParameter": "split_spec"
},
"pipelineparam--stage_1_deadline_hours": {
"componentInputParameter": "stage_1_deadline_hours"
},
"pipelineparam--stage_1_num_parallel_trials": {
"componentInputParameter": "stage_1_num_parallel_trials"
},
"pipelineparam--stage_1_num_selected_trials": {
"componentInputParameter": "stage_1_num_selected_trials"
},
"pipelineparam--stage_1_single_run_max_secs": {
"componentInputParameter": "stage_1_single_run_max_secs"
},
"pipelineparam--stage_1_tuner_worker_pool_specs_override": {
"componentInputParameter": "stage_1_tuner_worker_pool_specs_override"
},
"pipelineparam--stage_2_deadline_hours": {
"componentInputParameter": "stage_2_deadline_hours"
},
"pipelineparam--stage_2_num_parallel_trials": {
"componentInputParameter": "stage_2_num_parallel_trials"
},
"pipelineparam--stage_2_num_selected_trials": {
"componentInputParameter": "stage_2_num_selected_trials"
},
"pipelineparam--stage_2_single_run_max_secs": {
"componentInputParameter": "stage_2_single_run_max_secs"
},
"pipelineparam--stats_and_example_gen_dataflow_disk_size_gb": {
"componentInputParameter": "stats_and_example_gen_dataflow_disk_size_gb"
},
"pipelineparam--stats_and_example_gen_dataflow_machine_type": {
"componentInputParameter": "stats_and_example_gen_dataflow_machine_type"
},
"pipelineparam--stats_and_example_gen_dataflow_max_num_workers": {
"componentInputParameter": "stats_and_example_gen_dataflow_max_num_workers"
},
"pipelineparam--study_spec_override": {
"componentInputParameter": "study_spec_override"
},
"pipelineparam--target_column_name": {
"componentInputParameter": "target_column_name"
},
"pipelineparam--transform_dataflow_disk_size_gb": {
"componentInputParameter": "transform_dataflow_disk_size_gb"
},
"pipelineparam--transform_dataflow_machine_type": {
"componentInputParameter": "transform_dataflow_machine_type"
},
"pipelineparam--transform_dataflow_max_num_workers": {
"componentInputParameter": "transform_dataflow_max_num_workers"
},
"pipelineparam--transformations": {
"componentInputParameter": "transformations"
},
"pipelineparam--weight_column_name": {
"componentInputParameter": "weight_column_name"
}
}
},
"taskInfo": {
"name": "exit-handler-1"
}
}
}
},
"inputDefinitions": {
"parameters": {
"additional_experiments": {
"type": "STRING"
},
"cv_trainer_worker_pool_specs_override": {
"type": "STRING"
},
"data_source": {
"type": "STRING"
},
"dataflow_service_account": {
"type": "STRING"
},
"dataflow_subnetwork": {
"type": "STRING"
},
"dataflow_use_public_ips": {
"type": "STRING"
},
"disable_early_stopping": {
"type": "STRING"
},
"distill_batch_predict_machine_type": {
"type": "STRING"
},
"distill_batch_predict_max_replica_count": {
"type": "INT"
},
"distill_batch_predict_starting_replica_count": {
"type": "INT"
},
"distill_stage_1_deadline_hours": {
"type": "DOUBLE"
},
"encryption_spec_key_name": {
"type": "STRING"
},
"evaluation_batch_predict_machine_type": {
"type": "STRING"
},
"evaluation_batch_predict_max_replica_count": {
"type": "INT"
},
"evaluation_batch_predict_starting_replica_count": {
"type": "INT"
},
"evaluation_dataflow_disk_size_gb": {
"type": "INT"
},
"evaluation_dataflow_machine_type": {
"type": "STRING"
},
"evaluation_dataflow_max_num_workers": {
"type": "INT"
},
"export_additional_model_without_custom_ops": {
"type": "STRING"
},
"location": {
"type": "STRING"
},
"optimization_objective": {
"type": "STRING"
},
"optimization_objective_precision_value": {
"type": "DOUBLE"
},
"optimization_objective_recall_value": {
"type": "DOUBLE"
},
"prediction_type": {
"type": "STRING"
},
"project": {
"type": "STRING"
},
"reduce_search_space_mode": {
"type": "STRING"
},
"root_dir": {
"type": "STRING"
},
"run_distillation": {
"type": "STRING"
},
"run_evaluation": {
"type": "STRING"
},
"split_spec": {
"type": "STRING"
},
"stage_1_deadline_hours": {
"type": "DOUBLE"
},
"stage_1_num_parallel_trials": {
"type": "INT"
},
"stage_1_num_selected_trials": {
"type": "INT"
},
"stage_1_single_run_max_secs": {
"type": "INT"
},
"stage_1_tuner_worker_pool_specs_override": {
"type": "STRING"
},
"stage_2_deadline_hours": {
"type": "DOUBLE"
},
"stage_2_num_parallel_trials": {
"type": "INT"
},
"stage_2_num_selected_trials": {
"type": "INT"
},
"stage_2_single_run_max_secs": {
"type": "INT"
},
"stats_and_example_gen_dataflow_disk_size_gb": {
"type": "INT"
},
"stats_and_example_gen_dataflow_machine_type": {
"type": "STRING"
},
"stats_and_example_gen_dataflow_max_num_workers": {
"type": "INT"
},
"study_spec_override": {
"type": "STRING"
},
"target_column_name": {
"type": "STRING"
},
"transform_dataflow_disk_size_gb": {
"type": "INT"
},
"transform_dataflow_machine_type": {
"type": "STRING"
},
"transform_dataflow_max_num_workers": {
"type": "INT"
},
"transformations": {
"type": "STRING"
},
"weight_column_name": {
"type": "STRING"
}
}
},
"outputDefinitions": {
"artifacts": {
"model-evaluation-2-evaluation_metrics": {
"artifactType": {
"schemaTitle": "system.Metrics",
"schemaVersion": "0.0.1"
}
},
"model-evaluation-3-evaluation_metrics": {
"artifactType": {
"schemaTitle": "system.Metrics",
"schemaVersion": "0.0.1"
}
},
"model-evaluation-4-evaluation_metrics": {
"artifactType": {
"schemaTitle": "system.Metrics",
"schemaVersion": "0.0.1"
}
},
"model-evaluation-evaluation_metrics": {
"artifactType": {
"schemaTitle": "system.Metrics",
"schemaVersion": "0.0.1"
}
}
}
}
},
"schemaVersion": "2.0.0",
"sdkVersion": "kfp-1.8.11"
},
"runtimeConfig": {
"parameters": {
"additional_experiments": {
"stringValue": ""
},
"cv_trainer_worker_pool_specs_override": {
"stringValue": ""
},
"dataflow_service_account": {
"stringValue": ""
},
"dataflow_subnetwork": {
"stringValue": ""
},
"dataflow_use_public_ips": {
"stringValue": "True"
},
"disable_early_stopping": {
"stringValue": "False"
},
"distill_batch_predict_machine_type": {
"stringValue": "n1-standard-16"
},
"distill_batch_predict_max_replica_count": {
"intValue": "25"
},
"distill_batch_predict_starting_replica_count": {
"intValue": "25"
},
"distill_stage_1_deadline_hours": {
"doubleValue": 1.0
},
"encryption_spec_key_name": {
"stringValue": ""
},
"evaluation_batch_predict_machine_type": {
"stringValue": "n1-standard-16"
},
"evaluation_batch_predict_max_replica_count": {
"intValue": "25"
},
"evaluation_batch_predict_starting_replica_count": {
"intValue": "25"
},
"evaluation_dataflow_disk_size_gb": {
"intValue": "50"
},
"evaluation_dataflow_machine_type": {
"stringValue": "n1-standard-4"
},
"evaluation_dataflow_max_num_workers": {
"intValue": "25"
},
"export_additional_model_without_custom_ops": {
"stringValue": "False"
},
"optimization_objective_precision_value": {
"doubleValue": -1.0
},
"optimization_objective_recall_value": {
"doubleValue": -1.0
},
"reduce_search_space_mode": {
"stringValue": "regular"
},
"run_distillation": {
"stringValue": "False"
},
"run_evaluation": {
"stringValue": "False"
},
"stage_1_tuner_worker_pool_specs_override": {
"stringValue": ""
},
"stats_and_example_gen_dataflow_disk_size_gb": {
"intValue": "40"
},
"stats_and_example_gen_dataflow_machine_type": {
"stringValue": "n1-standard-16"
},
"stats_and_example_gen_dataflow_max_num_workers": {
"intValue": "25"
},
"study_spec_override": {
"stringValue": ""
},
"transform_dataflow_disk_size_gb": {
"intValue": "40"
},
"transform_dataflow_machine_type": {
"stringValue": "n1-standard-16"
},
"transform_dataflow_max_num_workers": {
"intValue": "25"
},
"weight_column_name": {
"stringValue": ""
}
}
}
} | 821 |
0 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/automl | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/automl/training_job/__init__.py | # Copyright 2023 The Kubeflow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# fmt: off
"""Create [Vertex AI AutoML training jobs](https://cloud.google.com/vertex-ai/docs/beginner/beginners-guide) for image, text, video, and forecasting."""
# fmt: on
from google_cloud_pipeline_components.v1.automl.training_job.automl_forecasting_training_job.component import automl_forecasting_training_job as AutoMLForecastingTrainingJobRunOp
from google_cloud_pipeline_components.v1.automl.training_job.automl_image_training_job.component import automl_image_training_job as AutoMLImageTrainingJobRunOp
from google_cloud_pipeline_components.v1.automl.training_job.automl_tabular_training_job.component import automl_tabular_training_job as AutoMLTabularTrainingJobRunOp
from google_cloud_pipeline_components.v1.automl.training_job.automl_text_training_job.component import automl_text_training_job as AutoMLTextTrainingJobRunOp
from google_cloud_pipeline_components.v1.automl.training_job.automl_video_training_job.component import automl_video_training_job as AutoMLVideoTrainingJobRunOp
__all__ = [
'AutoMLImageTrainingJobRunOp',
'AutoMLTextTrainingJobRunOp',
'AutoMLTabularTrainingJobRunOp',
'AutoMLForecastingTrainingJobRunOp',
'AutoMLVideoTrainingJobRunOp',
]
| 822 |
0 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/automl/training_job | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/automl/training_job/automl_forecasting_training_job/component.py | # Copyright 2023 The Kubeflow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
from google_cloud_pipeline_components import _image
from google_cloud_pipeline_components.types.artifact_types import VertexDataset
from google_cloud_pipeline_components.types.artifact_types import VertexModel
from kfp import dsl
from kfp.dsl import Input
from kfp.dsl import Output
@dsl.container_component
def automl_forecasting_training_job(
project: str,
display_name: str,
target_column: str,
time_column: str,
time_series_identifier_column: str,
unavailable_at_forecast_columns: list,
available_at_forecast_columns: list,
forecast_horizon: int,
data_granularity_unit: str,
data_granularity_count: int,
dataset: Input[VertexDataset],
model: Output[VertexModel],
location: Optional[str] = 'us-central1',
optimization_objective: Optional[str] = None,
time_series_attribute_columns: Optional[list] = None,
context_window: Optional[int] = None,
quantiles: Optional[list] = None,
validation_options: Optional[str] = None,
labels: Optional[dict] = {},
training_encryption_spec_key_name: Optional[str] = None,
model_encryption_spec_key_name: Optional[str] = None,
budget_milli_node_hours: Optional[int] = None,
model_display_name: Optional[str] = None,
model_labels: Optional[dict] = None,
model_id: Optional[str] = None,
parent_model: Optional[str] = None,
is_default_version: Optional[bool] = None,
model_version_aliases: Optional[list] = None,
model_version_description: Optional[str] = None,
hierarchy_group_columns: Optional[list] = None,
hierarchy_group_total_weight: Optional[float] = None,
hierarchy_temporal_total_weight: Optional[float] = None,
hierarchy_group_temporal_total_weight: Optional[float] = None,
window_column: Optional[str] = None,
window_stride_length: Optional[int] = None,
window_max_count: Optional[int] = None,
holiday_regions: Optional[list] = None,
column_specs: Optional[dict] = None,
column_transformations: Optional[list] = None,
training_fraction_split: Optional[float] = None,
validation_fraction_split: Optional[float] = None,
test_fraction_split: Optional[float] = None,
predefined_split_column_name: Optional[str] = None,
timestamp_split_column_name: Optional[str] = None,
weight_column: Optional[str] = None,
export_evaluated_data_items: Optional[bool] = False,
export_evaluated_data_items_bigquery_destination_uri: Optional[str] = None,
export_evaluated_data_items_override_destination: Optional[bool] = None,
additional_experiments: Optional[list] = None,
):
# fmt: off
"""Runs the training job and returns a model.
If training on a Vertex AI dataset, you can use one of the following split configurations: Data fraction splits: Any of `training_fraction_split`, `validation_fraction_split` and `test_fraction_split` may optionally be provided, they must sum to up to 1. If the provided ones sum to less than 1, the remainder is assigned to sets as decided by Vertex AI. If none of the fractions are set, by default roughly 80% of data will be used for training, 10% for validation, and 10% for test. Predefined splits: Assigns input data to training, validation, and test sets based on the value of a provided key. If using predefined splits, `predefined_split_column_name` must be provided. Supported only for tabular Datasets. Timestamp splits: Assigns input data to training, validation, and test sets based on a provided timestamps. The youngest data pieces are assigned to training set, next to validation set, and the oldest to the test set. Supported only for tabular Datasets.
Args:
dataset: The dataset within the same Project from which data will be used to train the Model. The Dataset must use schema compatible with Model being trained, and what is compatible should be described in the used TrainingPipeline's [training_task_definition] [google.cloud.aiplatform.v1beta1.TrainingPipeline.training_task_definition]. For time series Datasets, all their data is exported to training, to pick and choose from.
target_column: Name of the column that the Model is to predict values for. This column must be unavailable at forecast.
time_column: Name of the column that identifies time order in the time series. This column must be available at forecast.
time_series_identifier_column: Name of the column that identifies the time series.
unavailable_at_forecast_columns: Column names of columns that are unavailable at forecast. Each column contains information for the given entity (identified by the [time_series_identifier_column]) that is unknown before the forecast (e.g. population of a city in a given year, or weather on a given day).
available_at_forecast_columns: Column names of columns that are available at forecast. Each column contains information for the given entity (identified by the [time_series_identifier_column]) that is known at forecast.
forecast_horizon: The amount of time into the future for which forecasted values for the target are returned. Expressed in number of units defined by the [data_granularity_unit] and [data_granularity_count] field. Inclusive.
data_granularity_unit: The data granularity unit. Accepted values are `minute`, `hour`, `day`, `week`, `month`, `year`.
data_granularity_count: The number of data granularity units between data points in the training data. If [data_granularity_unit] is `minute`, can be 1, 5, 10, 15, or 30. For all other values of [data_granularity_unit], must be 1.
training_fraction_split: The fraction of the input data that is to be used to train the Model. This is ignored if Dataset is not provided.
validation_fraction_split: The fraction of the input data that is to be used to validate the Model. This is ignored if Dataset is not provided.
test_fraction_split: The fraction of the input data that is to be used to evaluate the Model. This is ignored if Dataset is not provided.
predefined_split_column_name: The key is a name of one of the Dataset's data columns. The value of the key (either the label's value or value in the column) must be one of {`TRAIN`, `VALIDATE`, `TEST`}, and it defines to which set the given piece of data is assigned. If for a piece of data the key is not present or has an invalid value, that piece is ignored by the pipeline. Supported only for tabular and time series Datasets.
timestamp_split_column_name: The key is a name of one of the Dataset's data columns. The value of the key values of the key (the values in the column) must be in RFC 3339 `date-time` format, where `time-offset` = `"Z"` (e.g. 1985-04-12T23:20:50.52Z). If for a piece of data the key is not present or has an invalid value, that piece is ignored by the pipeline. Supported only for tabular and time series Datasets. This parameter must be used with training_fraction_split, validation_fraction_split, and test_fraction_split.
weight_column: Name of the column that should be used as the weight column. Higher values in this column give more importance to the row during Model training. The column must have numeric values between 0 and 10000 inclusively, and 0 value means that the row is ignored. If the weight column field is not set, then all rows are assumed to have equal weight of 1.
time_series_attribute_columns: Column names that should be used as attribute columns. Each column is constant within a time series.
context_window: The amount of time into the past training and prediction data is used for model training and prediction respectively. Expressed in number of units defined by the [data_granularity_unit] and [data_granularity_count] fields. When not provided uses the default value of 0 which means the model sets each series context window to be 0 (also known as "cold start"). Inclusive.
export_evaluated_data_items: Whether to export the test set predictions to a BigQuery table. If False, then the export is not performed.
export_evaluated_data_items_bigquery_destination_uri: URI of desired destination BigQuery table for exported test set predictions. Expected format: `bq://<project_id>:<dataset_id>:<table>` If not specified, then results are exported to the following auto-created BigQuery table: `<project_id>:export_evaluated_examples_<model_name>_<yyyy_MM_dd'T'HH_mm_ss_SSS'Z'>.evaluated_examples` Applies only if [export_evaluated_data_items] is True.
export_evaluated_data_items_override_destination: Whether to override the contents of [export_evaluated_data_items_bigquery_destination_uri], if the table exists, for exported test set predictions. If False, and the table exists, then the training job will fail. Applies only if [export_evaluated_data_items] is True and [export_evaluated_data_items_bigquery_destination_uri] is specified.
quantiles: Quantiles to use for the `minimize-quantile-loss` [AutoMLForecastingTrainingJob.optimization_objective]. This argument is required in this case. Accepts up to 5 quantiles in the form of a double from 0 to 1, exclusive. Each quantile must be unique.
validation_options: Validation options for the data validation component. The available options are: "fail-pipeline" - (default), will validate against the validation and fail the pipeline if it fails. "ignore-validation" - ignore the results of the validation and continue the pipeline
budget_milli_node_hours: The train budget of creating this Model, expressed in milli node hours i.e. 1,000 value in this field means 1 node hour. The training cost of the model will not exceed this budget. The final cost will be attempted to be close to the budget, though may end up being (even) noticeably smaller - at the backend's discretion. This especially may happen when further model training ceases to provide any improvements. If the budget is set to a value known to be insufficient to train a Model for the given training set, the training won't be attempted and will error. The minimum value is 1000 and the maximum is 72000.
model_display_name: If the script produces a managed Vertex AI Model. The display name of the Model. The name can be up to 128 characters long and can be consist of any UTF-8 characters. If not provided upon creation, the job's display_name is used.
model_labels: The labels with user-defined metadata to organize your Models. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels.
model_id: The ID to use for the Model produced by this job, which will become the final component of the model resource name. This value may be up to 63 characters, and valid characters are `[a-z0-9_-]`. The first character cannot be a number or hyphen.
parent_model: The resource name or model ID of an existing model. The new model uploaded by this job will be a version of `parent_model`. Only set this field when training a new version of an existing model.
is_default_version: When set to True, the newly uploaded model version will automatically have alias "default" included. Subsequent uses of the model produced by this job without a version specified will use this "default" version. When set to False, the "default" alias will not be moved. Actions targeting the model version produced by this job will need to specifically reference this version by ID or alias. New model uploads, i.e. version 1, will always be "default" aliased.
model_version_aliases: User provided version aliases so that the model version uploaded by this job can be referenced via alias instead of auto-generated version ID. A default version alias will be created for the first version of the model. The format is [a-z][a-zA-Z0-9-]{0,126}[a-z0-9]
model_version_description: The description of the model version being uploaded by this job.
hierarchy_group_columns: A list of time series attribute column names that define the time series hierarchy. Only one level of hierarchy is supported, ex. `region` for a hierarchy of stores or `department` for a hierarchy of products. If multiple columns are specified, time series will be grouped by their combined values, ex. (`blue`, `large`) for `color` and `size`, up to 5 columns are accepted. If no group columns are specified, all time series are considered to be part of the same group.
hierarchy_group_total_weight: The weight of the loss for predictions aggregated over time series in the same hierarchy group.
hierarchy_temporal_total_weight: The weight of the loss for predictions aggregated over the horizon for a single time series.
hierarchy_group_temporal_total_weight: The weight of the loss for predictions aggregated over both the horizon and time series in the same hierarchy group.
window_column: Name of the column that should be used to filter input rows. The column should contain either booleans or string booleans; if the value of the row is True, generate a sliding window from that row.
window_stride_length: Step length used to generate input examples. Every `window_stride_length` rows will be used to generate a sliding window.
window_max_count: Number of rows that should be used to generate input examples. If the total row count is larger than this number, the input data will be randomly sampled to hit the count.
holiday_regions: The geographical regions to use when creating holiday features. This option is only allowed when data_granularity_unit is `day`. Acceptable values can come from any of the following levels:
Top level: GLOBAL Second level: continental regions
NA: North America
JAPAC: Japan and Asia Pacific
EMEA: Europe, the Middle East and Africa
LAC: Latin America and the Caribbean Third level: countries from ISO 3166-1 Country codes.
display_name: The user-defined name of this TrainingPipeline.
optimization_objective: Objective function the model is to be optimized towards. The training process creates a Model that optimizes the value of the objective function over the validation set. The supported optimization objectives: "minimize-rmse" (default) - Minimize root-mean-squared error (RMSE). "minimize-mae" - Minimize mean-absolute error (MAE). "minimize-rmsle" - Minimize root-mean-squared log error (RMSLE). "minimize-rmspe" - Minimize root-mean-squared percentage error (RMSPE). "minimize-wape-mae" - Minimize the combination of weighted absolute percentage error (WAPE) and mean-absolute-error (MAE). "minimize-quantile-loss" - Minimize the quantile loss at the defined quantiles. (Set this objective to build quantile forecasts.)
column_specs: Alternative to column_transformations where the keys of the dict are column names and their respective values are one of AutoMLTabularTrainingJob.column_data_types. When creating transformation for BigQuery Struct column, the column should be flattened using "." as the delimiter. Only columns with no child should have a transformation. If an input column has no transformations on it, such a column is ignored by the training, except for the targetColumn, which should have no transformations defined on. Only one of column_transformations or column_specs should be passed.
column_transformations: Transformations to apply to the input columns (i.e. columns other than the targetColumn). Each transformation may produce multiple result values from the column's value, and all are used for training. When creating transformation for BigQuery Struct column, the column should be flattened using "." as the delimiter. Only columns with no child should have a transformation. If an input column has no transformations on it, such a column is ignored by the training, except for the targetColumn, which should have no transformations defined on. Only one of column_transformations or column_specs should be passed. Consider using column_specs as column_transformations will be deprecated eventually.
project: Project to retrieve dataset from.
location: Optional location to retrieve dataset from.
labels: The labels with user-defined metadata to organize TrainingPipelines. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels.
training_encryption_spec_key_name: The Cloud KMS resource identifier of the customer managed encryption key used to protect the training pipeline. Has the form: `projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key`. The key needs to be in the same region as where the compute resource is created. If set, this TrainingPipeline will be secured by this key. Note: Model trained by this TrainingPipeline is also secured by this key if `model_to_upload` is not set separately. Overrides encryption_spec_key_name set in aiplatform.init.
model_encryption_spec_key_name: The Cloud KMS resource identifier of the customer managed encryption key used to protect the model. Has the form: `projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key`. The key needs to be in the same region as where the compute resource is created. If set, the trained Model will be secured by this key. Overrides encryption_spec_key_name set in aiplatform.init.
additional_experiments: Additional experiment flags for the time series forcasting training.
Returns:
model: The trained Vertex AI Model resource or None if training did not produce a Vertex AI Model.
"""
# fmt: on
return dsl.ContainerSpec(
image=_image.GCPC_IMAGE_TAG,
command=[
'python3',
'-m',
'google_cloud_pipeline_components.container.v1.aiplatform.remote_runner',
'--cls_name',
'AutoMLForecastingTrainingJob',
'--method_name',
'run',
],
args=[
'--init.project',
project,
'--init.location',
location,
'--init.display_name',
display_name,
'--method.target_column',
target_column,
'--method.time_column',
time_column,
'--method.time_series_identifier_column',
time_series_identifier_column,
'--method.unavailable_at_forecast_columns',
unavailable_at_forecast_columns,
'--method.available_at_forecast_columns',
available_at_forecast_columns,
'--method.forecast_horizon',
forecast_horizon,
'--method.data_granularity_unit',
data_granularity_unit,
'--method.data_granularity_count',
data_granularity_count,
'--method.dataset',
dataset.metadata['resourceName'],
dsl.IfPresentPlaceholder(
input_name='optimization_objective',
then=['--init.optimization_objective', optimization_objective],
),
dsl.IfPresentPlaceholder(
input_name='training_encryption_spec_key_name',
then=[
'--init.training_encryption_spec_key_name',
training_encryption_spec_key_name,
],
),
dsl.IfPresentPlaceholder(
input_name='model_encryption_spec_key_name',
then=[
'--init.model_encryption_spec_key_name',
model_encryption_spec_key_name,
],
),
dsl.IfPresentPlaceholder(
input_name='context_window',
then=['--method.context_window', context_window],
),
dsl.IfPresentPlaceholder(
input_name='validation_options',
then=['--method.validation_options', validation_options],
),
dsl.IfPresentPlaceholder(
input_name='budget_milli_node_hours',
then=[
'--method.budget_milli_node_hours',
budget_milli_node_hours,
],
),
dsl.IfPresentPlaceholder(
input_name='model_display_name',
then=['--method.model_display_name', model_display_name],
),
dsl.IfPresentPlaceholder(
input_name='training_fraction_split',
then=[
'--method.training_fraction_split',
training_fraction_split,
],
),
dsl.IfPresentPlaceholder(
input_name='validation_fraction_split',
then=[
'--method.validation_fraction_split',
validation_fraction_split,
],
),
dsl.IfPresentPlaceholder(
input_name='test_fraction_split',
then=['--method.test_fraction_split', test_fraction_split],
),
dsl.IfPresentPlaceholder(
input_name='predefined_split_column_name',
then=[
'--method.predefined_split_column_name',
predefined_split_column_name,
],
),
dsl.IfPresentPlaceholder(
input_name='timestamp_split_column_name',
then=[
'--method.timestamp_split_column_name',
timestamp_split_column_name,
],
),
dsl.IfPresentPlaceholder(
input_name='weight_column',
then=['--method.weight_column', weight_column],
),
dsl.IfPresentPlaceholder(
input_name='export_evaluated_data_items',
then=[
'--method.export_evaluated_data_items',
export_evaluated_data_items,
],
),
dsl.IfPresentPlaceholder(
input_name='export_evaluated_data_items_bigquery_destination_uri',
then=[
'--method.export_evaluated_data_items_bigquery_destination_uri',
export_evaluated_data_items_bigquery_destination_uri,
],
),
dsl.IfPresentPlaceholder(
input_name='export_evaluated_data_items_override_destination',
then=[
'--method.export_evaluated_data_items_override_destination',
export_evaluated_data_items_override_destination,
],
),
dsl.IfPresentPlaceholder(
input_name='time_series_attribute_columns',
then=[
'--method.time_series_attribute_columns',
time_series_attribute_columns,
],
),
dsl.IfPresentPlaceholder(
input_name='quantiles', then=['--method.quantiles', quantiles]
),
dsl.IfPresentPlaceholder(
input_name='labels', then=['--init.labels', labels]
),
dsl.IfPresentPlaceholder(
input_name='model_labels',
then=['--method.model_labels', model_labels],
),
dsl.IfPresentPlaceholder(
input_name='model_id', then=['--method.model_id', model_id]
),
dsl.IfPresentPlaceholder(
input_name='parent_model',
then=['--method.parent_model', parent_model],
),
dsl.IfPresentPlaceholder(
input_name='is_default_version',
then=['--method.is_default_version', is_default_version],
),
dsl.IfPresentPlaceholder(
input_name='model_version_aliases',
then=['--method.model_version_aliases', model_version_aliases],
),
dsl.IfPresentPlaceholder(
input_name='model_version_description',
then=[
'--method.model_version_description',
model_version_description,
],
),
dsl.IfPresentPlaceholder(
input_name='hierarchy_group_columns',
then=[
'--method.hierarchy_group_columns',
hierarchy_group_columns,
],
),
dsl.IfPresentPlaceholder(
input_name='hierarchy_group_total_weight',
then=[
'--method.hierarchy_group_total_weight',
hierarchy_group_total_weight,
],
),
dsl.IfPresentPlaceholder(
input_name='hierarchy_temporal_total_weight',
then=[
'--method.hierarchy_temporal_total_weight',
hierarchy_temporal_total_weight,
],
),
dsl.IfPresentPlaceholder(
input_name='hierarchy_group_temporal_total_weight',
then=[
'--method.hierarchy_group_temporal_total_weight',
hierarchy_group_temporal_total_weight,
],
),
dsl.IfPresentPlaceholder(
input_name='window_column',
then=['--method.window_column', window_column],
),
dsl.IfPresentPlaceholder(
input_name='window_stride_length',
then=['--method.window_stride_length', window_stride_length],
),
dsl.IfPresentPlaceholder(
input_name='window_max_count',
then=['--method.window_max_count', window_max_count],
),
dsl.IfPresentPlaceholder(
input_name='holiday_regions',
then=['--method.holiday_regions', holiday_regions],
),
dsl.IfPresentPlaceholder(
input_name='column_specs',
then=['--init.column_specs', column_specs],
),
dsl.IfPresentPlaceholder(
input_name='column_transformations',
then=['--init.column_transformations', column_transformations],
),
dsl.IfPresentPlaceholder(
input_name='additional_experiments',
then=['--method.additional_experiments', additional_experiments],
),
'--executor_input',
'{{$}}',
'--resource_name_output_artifact_uri',
model.uri,
],
)
| 823 |
0 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/automl/training_job | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/automl/training_job/automl_forecasting_training_job/__init__.py | # Copyright 2023 The Kubeflow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""AutoML Forecasting Training Job Component."""
| 824 |
0 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/automl/training_job | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/automl/training_job/automl_tabular_training_job/component.py | # Copyright 2023 The Kubeflow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
from google_cloud_pipeline_components import _image
from google_cloud_pipeline_components.types.artifact_types import VertexDataset
from google_cloud_pipeline_components.types.artifact_types import VertexModel
from kfp import dsl
from kfp.dsl import Input
from kfp.dsl import Output
@dsl.container_component
def automl_tabular_training_job(
project: str,
display_name: str,
optimization_prediction_type: str,
dataset: Input[VertexDataset],
target_column: str,
model: Output[VertexModel],
location: Optional[str] = 'us-central1',
optimization_objective: Optional[str] = None,
column_specs: Optional[dict] = None,
column_transformations: Optional[list] = None,
optimization_objective_recall_value: Optional[float] = None,
optimization_objective_precision_value: Optional[float] = None,
labels: Optional[dict] = {},
training_encryption_spec_key_name: Optional[str] = None,
model_encryption_spec_key_name: Optional[str] = None,
training_fraction_split: Optional[float] = None,
test_fraction_split: Optional[float] = None,
validation_fraction_split: Optional[float] = None,
predefined_split_column_name: Optional[str] = None,
timestamp_split_column_name: Optional[str] = None,
weight_column: Optional[str] = None,
budget_milli_node_hours: Optional[int] = None,
model_display_name: Optional[str] = None,
model_labels: Optional[dict] = None,
model_id: Optional[str] = None,
parent_model: Optional[str] = None,
is_default_version: Optional[bool] = None,
model_version_aliases: Optional[list] = None,
model_version_description: Optional[str] = None,
disable_early_stopping: Optional[bool] = False,
export_evaluated_data_items: Optional[bool] = False,
export_evaluated_data_items_bigquery_destination_uri: Optional[str] = None,
export_evaluated_data_items_override_destination: Optional[bool] = None,
):
# fmt: off
"""Runs the training job and returns a model.
If training on a Vertex AI dataset, you can use one of the following split configurations: Data fraction splits: Any of `training_fraction_split`, `validation_fraction_split` and `test_fraction_split` may optionally be provided, they must sum to up to 1. If the provided ones sum to less than 1, the remainder is assigned to sets as decided by Vertex AI. If none of the fractions are set, by default roughly 80% of data will be used for training, 10% for validation, and 10% for test. Predefined splits: Assigns input data to training, validation, and test sets based on the value of a provided key. If using predefined splits, `predefined_split_column_name` must be provided. Supported only for tabular Datasets. Timestamp splits: Assigns input data to training, validation, and test sets based on a provided timestamps. The youngest data pieces are assigned to training set, next to validation set, and the oldest to the test set. Supported only for tabular Datasets.
Args:
dataset: The dataset within the same Project from which data will be used to train the Model. The Dataset must use schema compatible with Model being trained, and what is compatible should be described in the used TrainingPipeline's [training_task_definition] [google.cloud.aiplatform.v1beta1.TrainingPipeline.training_task_definition]. For tabular Datasets, all their data is exported to training, to pick and choose from.
target_column: The name of the column values of which the Model is to predict.
training_fraction_split: The fraction of the input data that is to be used to train the Model. This is ignored if Dataset is not provided.
validation_fraction_split: The fraction of the input data that is to be used to validate the Model. This is ignored if Dataset is not provided.
test_fraction_split: The fraction of the input data that is to be used to evaluate the Model. This is ignored if Dataset is not provided.
predefined_split_column_name: The key is a name of one of the Dataset's data columns. The value of the key (either the label's value or value in the column) must be one of {`training`, `validation`, `test`}, and it defines to which set the given piece of data is assigned. If for a piece of data the key is not present or has an invalid value, that piece is ignored by the pipeline. Supported only for tabular and time series Datasets.
timestamp_split_column_name: The key is a name of one of the Dataset's data columns. The value of the key values of the key (the values in the column) must be in RFC 3339 `date-time` format, where `time-offset` = `"Z"` (e.g. 1985-04-12T23:20:50.52Z). If for a piece of data the key is not present or has an invalid value, that piece is ignored by the pipeline. Supported only for tabular and time series Datasets. This parameter must be used with training_fraction_split, validation_fraction_split and test_fraction_split.
weight_column: Name of the column that should be used as the weight column. Higher values in this column give more importance to the row during Model training. The column must have numeric values between 0 and 10000 inclusively, and 0 value means that the row is ignored. If the weight column field is not set, then all rows are assumed to have equal weight of 1.
budget_milli_node_hours: The train budget of creating this Model, expressed in milli node hours i.e. 1,000 value in this field means 1 node hour. The training cost of the model will not exceed this budget. The final cost will be attempted to be close to the budget, though may end up being (even) noticeably smaller - at the backend's discretion. This especially may happen when further model training ceases to provide any improvements. If the budget is set to a value known to be insufficient to train a Model for the given training set, the training won't be attempted and will error. The minimum value is 1000 and the maximum is 72000.
model_display_name: If the script produces a managed Vertex AI Model. The display name of the Model. The name can be up to 128 characters long and can be consist of any UTF-8 characters. If not provided upon creation, the job's display_name is used.
model_labels: The labels with user-defined metadata to organize your Models. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels.
model_id: The ID to use for the Model produced by this job, which will become the final component of the model resource name. This value may be up to 63 characters, and valid characters are `[a-z0-9_-]`. The first character cannot be a number or hyphen.
parent_model: The resource name or model ID of an existing model. The new model uploaded by this job will be a version of `parent_model`. Only set this field when training a new version of an existing model.
is_default_version: When set to True, the newly uploaded model version will automatically have alias "default" included. Subsequent uses of the model produced by this job without a version specified will use this "default" version. When set to False, the "default" alias will not be moved. Actions targeting the model version produced by this job will need to specifically reference this version by ID or alias. New model uploads, i.e. version 1, will always be "default" aliased.
model_version_aliases: User provided version aliases so that the model version uploaded by this job can be referenced via alias instead of auto-generated version ID. A default version alias will be created for the first version of the model. The format is [a-z][a-zA-Z0-9-]{0,126}[a-z0-9]
model_version_description: The description of the model version being uploaded by this job.
disable_early_stopping: If true, the entire budget is used. This disables the early stopping feature. By default, the early stopping feature is enabled, which means that training might stop before the entire training budget has been used, if further training does no longer brings significant improvement to the model.
export_evaluated_data_items: Whether to export the test set predictions to a BigQuery table. If False, then the export is not performed.
export_evaluated_data_items_bigquery_destination_uri: URI of desired destination BigQuery table for exported test set predictions. Expected format: `bq://<project_id>:<dataset_id>:<table>` If not specified, then results are exported to the following auto-created BigQuery table: `<project_id>:export_evaluated_examples_<model_name>_<yyyy_MM_dd'T'HH_mm_ss_SSS'Z'>.evaluated_examples` Applies only if [export_evaluated_data_items] is True.
export_evaluated_data_items_override_destination: Whether to override the contents of [export_evaluated_data_items_bigquery_destination_uri], if the table exists, for exported test set predictions. If False, and the table exists, then the training job will fail. Applies only if [export_evaluated_data_items] is True and [export_evaluated_data_items_bigquery_destination_uri] is specified.
display_name: The user-defined name of this TrainingPipeline.
optimization_prediction_type: The type of prediction the Model is to produce. "classification" - Predict one out of multiple target values is picked for each row. "regression" - Predict a value based on its relation to other values. This type is available only to columns that contain semantically numeric values, i.e. integers or floating point number, even if stored as e.g. strings.
optimization_objective: Objective function the Model is to be optimized towards. The training task creates a Model that maximizes/minimizes the value of the objective function over the validation set. The supported optimization objectives depend on the prediction type, and in the case of classification also the number of distinct values in the target column (two distint values -> binary, 3 or more distinct values -> multi class). If the field is not set, the default objective function is used. Classification: "maximize-au-roc" (default) - Maximize the area under the receiver operating characteristic (ROC) curve. "minimize-log-loss" - Minimize log loss. "maximize-au-prc" - Maximize the area under the precision-recall curve. "maximize-precision-at-recall" - Maximize precision for a specified recall value. "maximize-recall-at-precision" - Maximize recall for a specified precision value. Classification (multi class): "minimize-log-loss" (default) - Minimize log loss. Regression: "minimize-rmse" (default) - Minimize root-mean-squared error (RMSE). "minimize-mae" - Minimize mean-absolute error (MAE). "minimize-rmsle" - Minimize root-mean-squared log error (RMSLE).
column_specs: Alternative to column_transformations where the keys of the dict are column names and their respective values are one of AutoMLTabularTrainingJob.column_data_types. When creating transformation for BigQuery Struct column, the column should be flattened using "." as the delimiter. Only columns with no child should have a transformation. If an input column has no transformations on it, such a column is ignored by the training, except for the targetColumn, which should have no transformations defined on. Only one of column_transformations or column_specs should be passed.
column_transformations: Transformations to apply to the input columns (i.e. columns other than the targetColumn). Each transformation may produce multiple result values from the column's value, and all are used for training. When creating transformation for BigQuery Struct column, the column should be flattened using "." as the delimiter. Only columns with no child should have a transformation. If an input column has no transformations on it, such a column is ignored by the training, except for the targetColumn, which should have no transformations defined on. Only one of column_transformations or column_specs should be passed. Consider using column_specs as column_transformations will be deprecated eventually.
optimization_objective_recall_value: Required when maximize-precision-at-recall optimizationObjective was picked, represents the recall value at which the optimization is done. The minimum value is 0 and the maximum is 1.0.
optimization_objective_precision_value: Required when maximize-recall-at-precision optimizationObjective was picked, represents the precision value at which the optimization is done. The minimum value is 0 and the maximum is 1.0.
project: Project to retrieve dataset from.
location: Optional location to retrieve dataset from.
labels: The labels with user-defined metadata to organize TrainingPipelines. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels.
training_encryption_spec_key_name: The Cloud KMS resource identifier of the customer managed encryption key used to protect the training pipeline. Has the form: `projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key`. The key needs to be in the same region as where the compute resource is created. If set, this TrainingPipeline will be secured by this key. Note: Model trained by this TrainingPipeline is also secured by this key if `model_to_upload` is not set separately. Overrides encryption_spec_key_name set in aiplatform.init.
model_encryption_spec_key_name: The Cloud KMS resource identifier of the customer managed encryption key used to protect the model. Has the form: `projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key`. The key needs to be in the same region as where the compute resource is created. If set, the trained Model will be secured by this key. Overrides encryption_spec_key_name set in aiplatform.init.
Returns:
model: The trained Vertex AI Model resource or None if training did not produce a Vertex AI Model.
"""
# fmt: on
return dsl.ContainerSpec(
image=_image.GCPC_IMAGE_TAG,
command=[
'python3',
'-m',
'google_cloud_pipeline_components.container.v1.aiplatform.remote_runner',
'--cls_name',
'AutoMLTabularTrainingJob',
'--method_name',
'run',
],
args=[
'--init.project',
project,
'--init.location',
location,
'--init.display_name',
display_name,
'--init.optimization_prediction_type',
optimization_prediction_type,
'--method.dataset',
dataset.metadata['resourceName'],
'--method.target_column',
target_column,
dsl.IfPresentPlaceholder(
input_name='optimization_objective',
then=['--init.optimization_objective', optimization_objective],
),
dsl.IfPresentPlaceholder(
input_name='column_specs',
then=['--init.column_specs', column_specs],
),
dsl.IfPresentPlaceholder(
input_name='column_transformations',
then=['--init.column_transformations', column_transformations],
),
dsl.IfPresentPlaceholder(
input_name='optimization_objective_recall_value',
then=[
'--init.optimization_objective_recall_value',
optimization_objective_recall_value,
],
),
dsl.IfPresentPlaceholder(
input_name='optimization_objective_precision_value',
then=[
'--init.optimization_objective_precision_value',
optimization_objective_precision_value,
],
),
'--init.labels',
labels,
dsl.IfPresentPlaceholder(
input_name='training_encryption_spec_key_name',
then=[
'--init.training_encryption_spec_key_name',
training_encryption_spec_key_name,
],
),
dsl.IfPresentPlaceholder(
input_name='model_encryption_spec_key_name',
then=[
'--init.model_encryption_spec_key_name',
model_encryption_spec_key_name,
],
),
dsl.IfPresentPlaceholder(
input_name='training_fraction_split',
then=[
'--method.training_fraction_split',
training_fraction_split,
],
),
dsl.IfPresentPlaceholder(
input_name='validation_fraction_split',
then=[
'--method.validation_fraction_split',
validation_fraction_split,
],
),
dsl.IfPresentPlaceholder(
input_name='test_fraction_split',
then=['--method.test_fraction_split', test_fraction_split],
),
dsl.IfPresentPlaceholder(
input_name='predefined_split_column_name',
then=[
'--method.predefined_split_column_name',
predefined_split_column_name,
],
),
dsl.IfPresentPlaceholder(
input_name='timestamp_split_column_name',
then=[
'--method.timestamp_split_column_name',
timestamp_split_column_name,
],
),
dsl.IfPresentPlaceholder(
input_name='weight_column',
then=['--method.weight_column', weight_column],
),
dsl.IfPresentPlaceholder(
input_name='budget_milli_node_hours',
then=[
'--method.budget_milli_node_hours',
budget_milli_node_hours,
],
),
dsl.IfPresentPlaceholder(
input_name='model_display_name',
then=['--method.model_display_name', model_display_name],
),
dsl.IfPresentPlaceholder(
input_name='model_labels',
then=['--method.model_labels', model_labels],
),
dsl.IfPresentPlaceholder(
input_name='model_id', then=['--method.model_id', model_id]
),
dsl.IfPresentPlaceholder(
input_name='parent_model',
then=['--method.parent_model', parent_model],
),
dsl.IfPresentPlaceholder(
input_name='is_default_version',
then=['--method.is_default_version', is_default_version],
),
dsl.IfPresentPlaceholder(
input_name='model_version_aliases',
then=['--method.model_version_aliases', model_version_aliases],
),
dsl.IfPresentPlaceholder(
input_name='model_version_description',
then=[
'--method.model_version_description',
model_version_description,
],
),
'--method.disable_early_stopping',
disable_early_stopping,
'--method.export_evaluated_data_items',
export_evaluated_data_items,
dsl.IfPresentPlaceholder(
input_name='export_evaluated_data_items_bigquery_destination_uri',
then=[
'--method.export_evaluated_data_items_bigquery_destination_uri',
export_evaluated_data_items_bigquery_destination_uri,
],
),
dsl.IfPresentPlaceholder(
input_name='export_evaluated_data_items_override_destination',
then=[
'--method.export_evaluated_data_items_override_destination',
export_evaluated_data_items_override_destination,
],
),
'--executor_input',
'{{$}}',
'--resource_name_output_artifact_uri',
model.uri,
],
)
| 825 |
0 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/automl/training_job | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/automl/training_job/automl_tabular_training_job/__init__.py | # Copyright 2023 The Kubeflow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""AutoML Tabular Training Job Component."""
| 826 |
0 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/automl/training_job | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/automl/training_job/automl_text_training_job/component.py | # Copyright 2023 The Kubeflow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
from google_cloud_pipeline_components import _image
from google_cloud_pipeline_components.types.artifact_types import VertexDataset
from google_cloud_pipeline_components.types.artifact_types import VertexModel
from kfp import dsl
from kfp.dsl import Input
from kfp.dsl import Output
@dsl.container_component
def automl_text_training_job(
project: str,
display_name: str,
dataset: Input[VertexDataset],
model: Output[VertexModel],
location: Optional[str] = 'us-central1',
prediction_type: Optional[str] = 'classification',
multi_label: Optional[bool] = False,
labels: Optional[dict] = {},
training_encryption_spec_key_name: Optional[str] = None,
model_encryption_spec_key_name: Optional[str] = None,
training_fraction_split: Optional[float] = None,
validation_fraction_split: Optional[float] = None,
test_fraction_split: Optional[float] = None,
sentiment_max: Optional[int] = 10,
model_display_name: Optional[str] = None,
model_labels: Optional[dict] = None,
):
# fmt: off
"""Runs the training job and returns a model.
If training on a Vertex AI dataset, you can use one of the following split configurations: Data fraction splits: Any of `training_fraction_split`, `validation_fraction_split` and `test_fraction_split` may optionally be provided, they must sum to up to 1. If the provided ones sum to less than 1, the remainder is assigned to sets as decided by Vertex AI. If none of the fractions are set, by default roughly 80% of data will be used for training, 10% for validation, and 10% for test. Data filter splits: Assigns input data to training, validation, and test sets based on the given filters, data pieces not matched by any filter are ignored. Currently only supported for Datasets containing DataItems. If any of the filters in this message are to match nothing, then they can be set as '-' (the minus sign). Supported only for unstructured Datasets.
Args:
dataset: The dataset within the same Project from which data will be used to train the Model. The Dataset must use schema compatible with Model being trained, and what is compatible should be described in the used TrainingPipeline's [training_task_definition] [google.cloud.aiplatform.v1beta1.TrainingPipeline.training_task_definition].
training_fraction_split: The fraction of the input data that is to be used to train the Model. This is ignored if Dataset is not provided.
validation_fraction_split: The fraction of the input data that is to be used to validate the Model. This is ignored if Dataset is not provided.
test_fraction_split: The fraction of the input data that is to be used to evaluate the Model. This is ignored if Dataset is not provided.
model_display_name: The display name of the managed Vertex AI Model. The name can be up to 128 characters long and can consist of any UTF-8 characters. If not provided upon creation, the job's display_name is used.
model_labels: The labels with user-defined metadata to organize your Models. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels.
display_name: The user-defined name of this TrainingPipeline.
prediction_type: The type of prediction the Model is to produce, one of: "classification" - A classification model analyzes text data and returns a list of categories that apply to the text found in the data. Vertex AI offers both single-label and multi-label text classification models. "extraction" - An entity extraction model inspects text data known entities referenced in the data and labels those entities in the text. "sentiment" - A sentiment analysis model inspects text data and identifies the prevailing emotional opinion within it, especially to determine a writer's attitude as positive, negative, or neutral.
multi_label: Required and only applicable for text classification task. If false, a single-label (multi-class) Model will be trained (i.e. assuming that for each text snippet just up to one annotation may be applicable). If true, a multi-label Model will be trained (i.e. assuming that for each text snippet multiple annotations may be applicable).
sentiment_max: Required and only applicable for sentiment task. A sentiment is expressed as an integer ordinal, where higher value means a more positive sentiment. The range of sentiments that will be used is between 0 and sentimentMax (inclusive on both ends), and all the values in the range must be represented in the dataset before a model can be created. Only the Annotations with this sentimentMax will be used for training. sentimentMax value must be between 1 and 10 (inclusive).
project: Project to retrieve dataset from.
location: Optional location to retrieve dataset from.
labels: The labels with user-defined metadata to organize TrainingPipelines. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels.
training_encryption_spec_key_name: The Cloud KMS resource identifier of the customer managed encryption key used to protect the training pipeline. Has the form: `projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key`. The key needs to be in the same region as where the compute resource is created. If set, this TrainingPipeline will be secured by this key. Note: Model trained by this TrainingPipeline is also secured by this key if `model_to_upload` is not set separately. Overrides encryption_spec_key_name set in aiplatform.init.
model_encryption_spec_key_name: The Cloud KMS resource identifier of the customer managed encryption key used to protect the model. Has the form: `projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key`. The key needs to be in the same region as where the compute resource is created. If set, the trained Model will be secured by this key. Overrides encryption_spec_key_name set in aiplatform.init.
Returns:
model: The trained Vertex AI Model resource.
"""
# fmt: on
return dsl.ContainerSpec(
image=_image.GCPC_IMAGE_TAG,
command=[
'python3',
'-m',
'google_cloud_pipeline_components.container.v1.aiplatform.remote_runner',
'--cls_name',
'AutoMLTextTrainingJob',
'--method_name',
'run',
],
args=[
'--init.project',
project,
'--init.location',
location,
'--init.display_name',
display_name,
'--init.prediction_type',
prediction_type,
'--init.multi_label',
multi_label,
'--init.labels',
labels,
'--init.sentiment_max',
sentiment_max,
'--method.dataset',
dataset.metadata['resourceName'],
dsl.IfPresentPlaceholder(
input_name='training_encryption_spec_key_name',
then=[
'--init.training_encryption_spec_key_name',
training_encryption_spec_key_name,
],
),
dsl.IfPresentPlaceholder(
input_name='model_encryption_spec_key_name',
then=[
'--init.model_encryption_spec_key_name',
model_encryption_spec_key_name,
],
),
dsl.IfPresentPlaceholder(
input_name='model_display_name',
then=['--method.model_display_name', model_display_name],
),
dsl.IfPresentPlaceholder(
input_name='training_fraction_split',
then=[
'--method.training_fraction_split',
training_fraction_split,
],
),
dsl.IfPresentPlaceholder(
input_name='validation_fraction_split',
then=[
'--method.validation_fraction_split',
validation_fraction_split,
],
),
dsl.IfPresentPlaceholder(
input_name='test_fraction_split',
then=['--method.test_fraction_split', test_fraction_split],
),
dsl.IfPresentPlaceholder(
input_name='model_labels',
then=['--method.model_labels', model_labels],
),
'--executor_input',
'{{$}}',
'--resource_name_output_artifact_uri',
model.uri,
],
)
| 827 |
0 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/automl/training_job | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/automl/training_job/automl_text_training_job/__init__.py | # Copyright 2023 The Kubeflow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""AutoML Text Training Job Component."""
| 828 |
0 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/automl/training_job | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/automl/training_job/automl_video_training_job/component.py | # Copyright 2023 The Kubeflow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
from google_cloud_pipeline_components import _image
from google_cloud_pipeline_components.types.artifact_types import VertexDataset
from google_cloud_pipeline_components.types.artifact_types import VertexModel
from kfp import dsl
from kfp.dsl import Input
from kfp.dsl import Output
@dsl.container_component
def automl_video_training_job(
project: str,
display_name: str,
dataset: Input[VertexDataset],
model: Output[VertexModel],
location: Optional[str] = 'us-central1',
prediction_type: Optional[str] = 'classification',
model_type: Optional[str] = 'CLOUD',
labels: Optional[dict] = {},
training_encryption_spec_key_name: Optional[str] = None,
model_encryption_spec_key_name: Optional[str] = None,
training_fraction_split: Optional[float] = None,
test_fraction_split: Optional[float] = None,
model_display_name: Optional[str] = None,
model_labels: Optional[dict] = None,
):
# fmt: off
"""Runs the AutoML Video training job and returns a model.
If training on a Vertex AI dataset, you can use one of the following split configurations: Data fraction splits: `training_fraction_split`, and `test_fraction_split` may optionally be provided, they must sum to up to 1. If none of the fractions are set, by default roughly 80% of data will be used for training, and 20% for test. Data filter splits: Assigns input data to training, validation, and test sets based on the given filters, data pieces not matched by any filter are ignored. Currently only supported for Datasets containing DataItems. If any of the filters in this message are to match nothing, then they can be set as '-' (the minus sign). Supported only for unstructured Datasets.
Args:
dataset: The dataset within the same Project from which data will be used to train the Model. The Dataset must use schema compatible with Model being trained, and what is compatible should be described in the used TrainingPipeline's [training_task_definition] [google.cloud.aiplatform.v1beta1.TrainingPipeline.training_task_definition]. For tabular Datasets, all their data is exported to training, to pick and choose from.
training_fraction_split: The fraction of the input data that is to be used to train the Model. This is ignored if Dataset is not provided.
test_fraction_split: The fraction of the input data that is to be used to evaluate the Model. This is ignored if Dataset is not provided.
model_display_name: The display name of the managed Vertex AI Model. The name can be up to 128 characters long and can be consist of any UTF-8 characters. If not provided upon creation, the job's display_name is used.
model_labels: The labels with user-defined metadata to organize your Models. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels.
display_name: The user-defined name of this TrainingPipeline.
prediction_type: The type of prediction the Model is to produce, one of: "classification" - A video classification model classifies shots and segments in your videos according to your own defined labels. "object_tracking" - A video object tracking model detects and tracks multiple objects in shots and segments. You can use these models to track objects in your videos according to your own pre-defined, custom labels. "action_recognition" - A video action reconition model pinpoints the location of actions with short temporal durations (~1 second).
model_type: str = "CLOUD" One of the following: "CLOUD" - available for "classification", "object_tracking" and "action_recognition" A Model best tailored to be used within Google Cloud, and which cannot be exported. "MOBILE_VERSATILE_1" - available for "classification", "object_tracking" and "action_recognition" A model that, in addition to being available within Google Cloud, can also be exported (see ModelService.ExportModel) as a TensorFlow or TensorFlow Lite model and used on a mobile or edge device with afterwards. "MOBILE_CORAL_VERSATILE_1" - available only for "object_tracking" A versatile model that is meant to be exported (see ModelService.ExportModel) and used on a Google Coral device. "MOBILE_CORAL_LOW_LATENCY_1" - available only for "object_tracking" A model that trades off quality for low latency, to be exported (see ModelService.ExportModel) and used on a Google Coral device. "MOBILE_JETSON_VERSATILE_1" - available only for "object_tracking" A versatile model that is meant to be exported (see ModelService.ExportModel) and used on an NVIDIA Jetson device. "MOBILE_JETSON_LOW_LATENCY_1" - available only for "object_tracking" A model that trades off quality for low latency, to be exported (see ModelService.ExportModel) and used on an NVIDIA Jetson device.
project: Project to retrieve dataset from.
location: Optional location to retrieve dataset from.
labels: The labels with user-defined metadata to organize TrainingPipelines. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels.
training_encryption_spec_key_name: The Cloud KMS resource identifier of the customer managed encryption key used to protect the training pipeline. Has the form: `projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key`. The key needs to be in the same region as where the compute resource is created. If set, this TrainingPipeline will be secured by this key. Note: Model trained by this TrainingPipeline is also secured by this key if `model_to_upload` is not set separately. Overrides encryption_spec_key_name set in aiplatform.init.
model_encryption_spec_key_name: The Cloud KMS resource identifier of the customer managed encryption key used to protect the model. Has the form: `projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key`. The key needs to be in the same region as where the compute resource is created. If set, the trained Model will be secured by this key. Overrides encryption_spec_key_name set in aiplatform.init.
Returns:
model: The trained Vertex AI Model resource or None if training did not produce a Vertex AI Model.
"""
# fmt`:` on
return dsl.ContainerSpec(
image=_image.GCPC_IMAGE_TAG,
command=[
'python3',
'-m',
'google_cloud_pipeline_components.container.v1.aiplatform.remote_runner',
'--cls_name',
'AutoMLVideoTrainingJob',
'--method_name',
'run',
],
args=[
'--init.project',
project,
'--init.location',
location,
'--init.display_name',
display_name,
'--init.prediction_type',
prediction_type,
'--init.labels',
labels,
'--init.model_type',
model_type,
'--method.dataset',
dataset.metadata['resourceName'],
dsl.IfPresentPlaceholder(
input_name='training_encryption_spec_key_name',
then=[
'--init.training_encryption_spec_key_name',
training_encryption_spec_key_name,
],
),
dsl.IfPresentPlaceholder(
input_name='model_encryption_spec_key_name',
then=[
'--init.model_encryption_spec_key_name',
model_encryption_spec_key_name,
],
),
dsl.IfPresentPlaceholder(
input_name='model_display_name',
then=['--method.model_display_name', model_display_name],
),
dsl.IfPresentPlaceholder(
input_name='training_fraction_split',
then=[
'--method.training_fraction_split',
training_fraction_split,
],
),
dsl.IfPresentPlaceholder(
input_name='test_fraction_split',
then=['--method.test_fraction_split', test_fraction_split],
),
dsl.IfPresentPlaceholder(
input_name='model_labels',
then=['--method.model_labels', model_labels],
),
'--executor_input',
'{{$}}',
'--resource_name_output_artifact_uri',
model.uri,
],
)
| 829 |
0 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/automl/training_job | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/automl/training_job/automl_video_training_job/__init__.py | # Copyright 2023 The Kubeflow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""AutoML Video Training Job Component."""
| 830 |
0 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/automl/training_job | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/automl/training_job/automl_image_training_job/component.py | # Copyright 2023 The Kubeflow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, List, Optional
from google_cloud_pipeline_components import _image
from google_cloud_pipeline_components.types.artifact_types import VertexDataset
from google_cloud_pipeline_components.types.artifact_types import VertexModel
from kfp import dsl
from kfp.dsl import Input
from kfp.dsl import Output
from kfp.dsl import OutputPath
@dsl.container_component
def automl_image_training_job(
project: str,
display_name: str,
dataset: Input[VertexDataset],
model: Output[VertexModel],
gcp_resources: OutputPath(str),
location: Optional[str] = 'us-central1',
prediction_type: Optional[str] = 'classification',
multi_label: Optional[bool] = False,
model_type: Optional[str] = 'CLOUD',
base_model: Optional[Input[VertexModel]] = None,
incremental_train_base_model: Optional[Input[VertexModel]] = None,
parent_model: Optional[Input[VertexModel]] = None,
is_default_version: Optional[bool] = True,
model_version_aliases: Optional[List[str]] = None,
model_version_description: Optional[str] = None,
labels: Optional[Dict[str, str]] = {},
training_encryption_spec_key_name: Optional[str] = None,
model_encryption_spec_key_name: Optional[str] = None,
training_fraction_split: Optional[float] = None,
validation_fraction_split: Optional[float] = None,
test_fraction_split: Optional[float] = None,
training_filter_split: Optional[str] = None,
validation_filter_split: Optional[str] = None,
test_filter_split: Optional[str] = None,
budget_milli_node_hours: Optional[int] = None,
model_display_name: Optional[str] = None,
model_labels: Optional[Dict[str, str]] = None,
disable_early_stopping: Optional[bool] = False,
):
# fmt: off
"""Runs the AutoML Image training job and returns a model.
If training on a Vertex AI dataset, you can use one of the following split configurations: Data fraction splits: Any of `training_fraction_split`, `validation_fraction_split` and `test_fraction_split` may optionally be provided, they must sum to up to 1. If the provided ones sum to less than 1, the remainder is assigned to sets as decided by Vertex AI. If none of the fractions are set, by default roughly 80% of data will be used for training, 10% for validation, and 10% for test. Data filter splits: Assigns input data to training, validation, and test sets based on the given filters, data pieces not matched by any filter are ignored. Currently only supported for Datasets containing DataItems. If any of the filters in this message are to match nothing, then they can be set as '-' (the minus sign). If using filter splits, all of `training_filter_split`, `validation_filter_split` and `test_filter_split` must be provided. Supported only for unstructured Datasets.
Args:
dataset: The dataset within the same Project from which data will be used to train the Model. The Dataset must use schema compatible with Model being trained, and what is compatible should be described in the used TrainingPipeline's [training_task_definition] [google.cloud.aiplatform.v1beta1.TrainingPipeline.training_task_definition]. For tabular Datasets, all their data is exported to training, to pick and choose from.
training_fraction_split: The fraction of the input data that is to be used to train the Model. This is ignored if Dataset is not provided.
validation_fraction_split: The fraction of the input data that is to be used to validate the Model. This is ignored if Dataset is not provided.
test_fraction_split: The fraction of the input data that is to be used to evaluate the Model. This is ignored if Dataset is not provided.
training_filter_split: A filter on DataItems of the Dataset. DataItems that match this filter are used to train the Model. A filter with same syntax as the one used in DatasetService.ListDataItems may be used. If a single DataItem is matched by more than one of the FilterSplit filters, then it is assigned to the first set that applies to it in the training, validation, test order. This is ignored if Dataset is not provided. Example usage: training_filter_split="labels.aiplatform.googleapis.com/ml_use=training".
validation_filter_split: A filter on DataItems of the Dataset. DataItems that match this filter are used to validate the Model. A filter with same syntax as the one used in DatasetService.ListDataItems may be used. If a single DataItem is matched by more than one of the FilterSplit filters, then it is assigned to the first set that applies to it in the training, validation, test order. This is ignored if Dataset is not provided. Example usage: validation_filter_split= "labels.aiplatform.googleapis.com/ml_use=validation".
test_filter_split: A filter on DataItems of the Dataset. DataItems that match this filter are used to test the Model. A filter with same syntax as the one used in DatasetService.ListDataItems may be used. If a single DataItem is matched by more than one of the FilterSplit filters, then it is assigned to the first set that applies to it in the training, validation, test order. This is ignored if Dataset is not provided. Example usage: test_filter_split= "labels.aiplatform.googleapis.com/ml_use=test".
budget_milli_node_hours: The train budget of creating this Model, expressed in milli node hours i.e. 1,000 value in this field means 1 node hour. Defaults by `prediction_type`: `classification` - For Cloud models the budget must be: 8,000 - 800,000 milli node hours (inclusive). The default value is 192,000 which represents one day in wall time, assuming 8 nodes are used. `object_detection` - For Cloud models the budget must be: 20,000 - 900,000 milli node hours (inclusive). The default value is 216,000 which represents one day in wall time, assuming 9 nodes are used. The training cost of the model will not exceed this budget. The final cost will be attempted to be close to the budget, though may end up being (even) noticeably smaller - at the backend's discretion. This especially may happen when further model training ceases to provide any improvements. If the budget is set to a value known to be insufficient to train a Model for the given training set, the training won't be attempted and will error.
model_display_name: The display name of the managed Vertex AI Model. The name can be up to 128 characters long and can be consist of any UTF-8 characters. If not provided upon creation, the job's display_name is used.
model_labels: The labels with user-defined metadata to organize your Models. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels.
disable_early_stopping: If true, the entire budget is used. This disables the early stopping feature. By default, the early stopping feature is enabled, which means that training might stop before the entire training budget has been used, if further training does no longer brings significant improvement to the model.
display_name: The user-defined name of this TrainingPipeline.
prediction_type: The type of prediction the Model is to produce, one of: "classification" - Predict one out of multiple target values is picked for each row. "object_detection" - Predict a value based on its relation to other values. This type is available only to columns that contain semantically numeric values, i.e. integers or floating point number, even if stored as e.g. strings.
multi_label: Default is False. If false, a single-label (multi-class) Model will be trained (i.e. assuming that for each image just up to one annotation may be applicable). If true, a multi-label Model will be trained (i.e. assuming that for each image multiple annotations may be applicable). This is only applicable for the "classification" prediction_type and will be ignored otherwise.
model_type: One of the following: "CLOUD" - Default for Image Classification. A Model best tailored to be used within Google Cloud, and which cannot be exported. "CLOUD_HIGH_ACCURACY_1" - Default for Image Object Detection. A model best tailored to be used within Google Cloud, and which cannot be exported. Expected to have a higher latency, but should also have a higher prediction quality than other cloud models. "CLOUD_LOW_LATENCY_1" - A model best tailored to be used within Google Cloud, and which cannot be exported. Expected to have a low latency, but may have lower prediction quality than other cloud models. "MOBILE_TF_LOW_LATENCY_1" - A model that, in addition to being available within Google Cloud, can also be exported as TensorFlow or Core ML model and used on a mobile or edge device afterwards. Expected to have low latency, but may have lower prediction quality than other mobile models. "MOBILE_TF_VERSATILE_1" - A model that, in addition to being available within Google Cloud, can also be exported as TensorFlow or Core ML model and used on a mobile or edge device with afterwards. "MOBILE_TF_HIGH_ACCURACY_1" - A model that, in addition to being available within Google Cloud, can also be exported as TensorFlow or Core ML model and used on a mobile or edge device afterwards. Expected to have a higher latency, but should also have a higher prediction quality than other mobile models.
base_model: Only permitted for Image Classification models. If it is specified, the new model will be trained based on the `base` model. Otherwise, the new model will be trained from scratch. The `base` model must be in the same Project and Location as the new Model to train, and have the same model_type.
incremental_train_base_model: Optional for both Image Classification and Object detection models, to incrementally train a new model using an existing model as the starting point, with a reduced training time. If not specified, the new model will be trained from scratch. The `base` model must be in the same Project and Location as the new Model to train, and have the same prediction_type and model_type.
parent_model: The resource name or model ID of an existing model. The new model uploaded by this job will be a version of `parent_model`. Only set this field when training a new version of an existing model.
is_default_version: When set to True, the newly uploaded model version will automatically have alias "default" included. Subsequent uses of the model produced by this job without a version specified will use this "default" version. When set to False, the "default" alias will not be moved. Actions targeting the model version produced by this job will need to specifically reference this version by ID or alias. New model uploads, i.e. version 1, will always be "default" aliased.
model_version_aliases: User provided version aliases so that the model version uploaded by this job can be referenced via alias instead of auto-generated version ID. A default version alias will be created for the first version of the model. The format is [a-z][a-zA-Z0-9-]{0,126}[a-z0-9]
model_version_description: The description of the model version being uploaded by this job.
project: Project to retrieve dataset from.
location: Optional location to retrieve dataset from.
labels: The labels with user-defined metadata to organize TrainingPipelines. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels.
training_encryption_spec_key_name: The Cloud KMS resource identifier of the customer managed encryption key used to protect the training pipeline. Has the form: `projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key`. The key needs to be in the same region as where the compute resource is created. If set, this TrainingPipeline will be secured by this key. Note: Model trained by this TrainingPipeline is also secured by this key if `model_to_upload` is not set separately. Overrides encryption_spec_key_name set in aiplatform.init.
model_encryption_spec_key_name: The Cloud KMS resource identifier of the customer managed encryption key used to protect the model. Has the form: `projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key`. The key needs to be in the same region as where the compute resource is created. If set, the trained Model will be secured by this key. Overrides encryption_spec_key_name set in aiplatform.init.
Returns:
model: The trained Vertex AI Model resource or None if training did not produce a Vertex AI Model.
gcp_resources: Serialized gcp_resources proto tracking the batch prediction job. For more details, see https://github.com/kubeflow/pipelines/blob/master/components/google-cloud/google_cloud_pipeline_components/proto/README.md.
"""
# fmt: on
return dsl.ContainerSpec(
image=_image.GCPC_IMAGE_TAG,
command=[
'python3',
'-m',
'google_cloud_pipeline_components.container.v1.automl_training_job.image.launcher',
],
args=[
'--type',
'AutoMLImageTrainingJob',
'--project',
project,
'--location',
location,
'--display_name',
display_name,
'--prediction_type',
prediction_type,
'--multi_label',
multi_label,
'--model_type',
model_type,
'--labels',
labels,
'--dataset',
dataset.metadata['resourceName'],
'--disable_early_stopping',
disable_early_stopping,
dsl.IfPresentPlaceholder(
input_name='training_encryption_spec_key_name',
then=[
'--training_encryption_spec_key_name',
training_encryption_spec_key_name,
],
),
dsl.IfPresentPlaceholder(
input_name='model_encryption_spec_key_name',
then=[
'--model_encryption_spec_key_name',
model_encryption_spec_key_name,
],
),
dsl.IfPresentPlaceholder(
input_name='model_display_name',
then=['--model_display_name', model_display_name],
),
dsl.IfPresentPlaceholder(
input_name='training_fraction_split',
then=[
'--training_fraction_split',
training_fraction_split,
],
),
dsl.IfPresentPlaceholder(
input_name='validation_fraction_split',
then=[
'--validation_fraction_split',
validation_fraction_split,
],
),
dsl.IfPresentPlaceholder(
input_name='test_fraction_split',
then=['--test_fraction_split', test_fraction_split],
),
dsl.IfPresentPlaceholder(
input_name='budget_milli_node_hours',
then=[
'--budget_milli_node_hours',
budget_milli_node_hours,
],
),
dsl.IfPresentPlaceholder(
input_name='training_filter_split',
then=['--training_filter_split', training_filter_split],
),
dsl.IfPresentPlaceholder(
input_name='validation_filter_split',
then=[
'--validation_filter_split',
validation_filter_split,
],
),
dsl.IfPresentPlaceholder(
input_name='test_filter_split',
then=['--test_filter_split', test_filter_split],
),
dsl.IfPresentPlaceholder(
input_name='base_model',
then=[
'--base_model',
base_model.metadata['resourceName'],
'--model_labels',
base_model.metadata['labels'],
],
else_=[
dsl.IfPresentPlaceholder(
input_name='model_labels',
then=['--model_labels', model_labels],
)
],
),
dsl.IfPresentPlaceholder(
input_name='incremental_train_base_model',
then=[
'--incremental_train_base_model',
incremental_train_base_model.metadata['resourceName'],
],
),
dsl.IfPresentPlaceholder(
input_name='parent_model',
then=[
'--parent_model',
parent_model.metadata['resourceName'],
],
),
dsl.IfPresentPlaceholder(
input_name='is_default_version',
then=[
'--is_default_version',
is_default_version,
],
),
dsl.IfPresentPlaceholder(
input_name='model_version_aliases',
then=[
'--model_version_aliases',
model_version_aliases,
],
),
dsl.IfPresentPlaceholder(
input_name='model_version_description',
then=[
'--model_version_description',
model_version_description,
],
),
'--gcp_resources',
gcp_resources,
'--executor_input',
'{{$}}',
'--resource_name_output_artifact_uri',
model.uri,
],
)
| 831 |
0 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/automl/training_job | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/automl/training_job/automl_image_training_job/__init__.py | # Copyright 2023 The Kubeflow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""AutoML Image Training Job Component."""
| 832 |
0 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/automl | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/automl/forecasting/bqml_arima_train_pipeline.yaml | # PIPELINE DEFINITION
# Name: automl-tabular-bqml-arima-train
# Description: Trains a BQML ARIMA_PLUS model.
# Inputs:
# bigquery_destination_uri: str [Default: '']
# data_granularity_unit: str
# data_source_bigquery_table_path: str [Default: '']
# data_source_csv_filenames: str [Default: '']
# encryption_spec_key_name: str [Default: '']
# forecast_horizon: int
# location: str
# max_order: int [Default: 5.0]
# override_destination: bool [Default: False]
# predefined_split_key: str [Default: '']
# project: str
# root_dir: str
# run_evaluation: bool [Default: True]
# target_column: str
# test_fraction: float [Default: -1.0]
# time_column: str
# time_series_identifier_column: str
# timestamp_split_key: str [Default: '']
# training_fraction: float [Default: -1.0]
# validation_fraction: float [Default: -1.0]
# window_column: str [Default: '']
# window_max_count: int [Default: -1.0]
# window_stride_length: int [Default: -1.0]
# Outputs:
# create-metrics-artifact-evaluation_metrics: system.Metrics
components:
comp-bigquery-create-dataset:
executorLabel: exec-bigquery-create-dataset
inputDefinitions:
parameters:
dataset:
parameterType: STRING
exists_ok:
defaultValue: false
isOptional: true
parameterType: BOOLEAN
location:
parameterType: STRING
project:
parameterType: STRING
outputDefinitions:
parameters:
dataset_id:
parameterType: STRING
project_id:
parameterType: STRING
comp-bigquery-create-dataset-2:
executorLabel: exec-bigquery-create-dataset-2
inputDefinitions:
parameters:
dataset:
parameterType: STRING
exists_ok:
defaultValue: false
isOptional: true
parameterType: BOOLEAN
location:
parameterType: STRING
project:
parameterType: STRING
outputDefinitions:
parameters:
dataset_id:
parameterType: STRING
project_id:
parameterType: STRING
comp-bigquery-create-model-job:
executorLabel: exec-bigquery-create-model-job
inputDefinitions:
parameters:
job_configuration_query:
defaultValue: {}
description: 'A json formatted string describing the rest of the job configuration.
For more details, see
https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationQuery'
isOptional: true
parameterType: STRUCT
labels:
defaultValue: {}
description: "The labels associated with this job. You can\nuse these to\
\ organize and group your jobs. Label keys and values can\nbe no longer\
\ than 63 characters, can only containlowercase letters,\nnumeric characters,\
\ underscores and dashes. International characters\nare allowed. Label\
\ values are optional. Label keys must start with a\nletter and each label\
\ in the list must have a different key.\n Example: { \"name\": \"wrench\"\
, \"mass\": \"1.3kg\", \"count\": \"3\" }."
isOptional: true
parameterType: STRUCT
location:
defaultValue: us-central1
description: 'Location of the job to create the BigQuery model. If not set,
default to
`US` multi-region. For more details, see
https://cloud.google.com/bigquery/docs/locations#specifying_your_location'
isOptional: true
parameterType: STRING
project:
defaultValue: '{{$.pipeline_google_cloud_project_id}}'
description: Project to run BigQuery model creation job. Defaults to the
project in which the PipelineJob is run.
isOptional: true
parameterType: STRING
query:
description: 'SQL query text to execute. Only standard SQL is
supported. If query are both specified in here and in
job_configuration_query, the value in here will override the other
one.'
parameterType: STRING
query_parameters:
defaultValue: []
description: 'Query parameters for standard SQL queries.
If query_parameters are both specified in here and in
job_configuration_query, the value in here will override the other one.'
isOptional: true
parameterType: LIST
outputDefinitions:
artifacts:
model:
artifactType:
schemaTitle: google.BQMLModel
schemaVersion: 0.0.1
description: Describes the model which is created.
parameters:
gcp_resources:
description: 'Serialized gcp_resources proto tracking the BigQuery job.
For more details, see
https://github.com/kubeflow/pipelines/blob/master/components/google-cloud/google_cloud_pipeline_components/proto/README.md.'
parameterType: STRING
comp-bigquery-delete-dataset-with-prefix:
executorLabel: exec-bigquery-delete-dataset-with-prefix
inputDefinitions:
parameters:
dataset_prefix:
parameterType: STRING
delete_contents:
defaultValue: false
isOptional: true
parameterType: BOOLEAN
project:
parameterType: STRING
comp-bigquery-list-rows:
executorLabel: exec-bigquery-list-rows
inputDefinitions:
artifacts:
table:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
description: A google.BQTable artifact.
parameters:
location:
description: The GCP region.
parameterType: STRING
project:
description: The GCP project.
parameterType: STRING
outputDefinitions:
parameters:
Output:
parameterType: LIST
comp-bigquery-list-rows-2:
executorLabel: exec-bigquery-list-rows-2
inputDefinitions:
artifacts:
table:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
description: A google.BQTable artifact.
parameters:
location:
description: The GCP region.
parameterType: STRING
project:
description: The GCP project.
parameterType: STRING
outputDefinitions:
parameters:
Output:
parameterType: LIST
comp-bigquery-query-job:
executorLabel: exec-bigquery-query-job
inputDefinitions:
parameters:
encryption_spec_key_name:
defaultValue: ''
description: 'Describes the Cloud
KMS encryption key that will be used to protect destination
BigQuery table. The BigQuery Service Account associated with your
project requires access to this encryption key. If
encryption_spec_key_name are both specified in here and in
job_configuration_query, the value in here will override the other
one.'
isOptional: true
parameterType: STRING
job_configuration_query:
defaultValue: {}
description: 'A json formatted string
describing the rest of the job configuration. For more details, see
https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationQuery'
isOptional: true
parameterType: STRUCT
labels:
defaultValue: {}
description: 'The labels associated with this job. You can
use these to organize and group your jobs. Label keys and values can
be no longer than 63 characters, can only containlowercase letters,
numeric characters, underscores and dashes. International characters
are allowed. Label values are optional. Label keys must start with a
letter and each label in the list must have a different key.
Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.'
isOptional: true
parameterType: STRUCT
location:
defaultValue: us-central1
description: 'Location for creating the BigQuery job. If not
set, default to `US` multi-region. For more details, see
https://cloud.google.com/bigquery/docs/locations#specifying_your_location'
isOptional: true
parameterType: STRING
project:
defaultValue: '{{$.pipeline_google_cloud_project_id}}'
description: Project to run the BigQuery query job. Defaults to the project
in which the PipelineJob is run.
isOptional: true
parameterType: STRING
query:
defaultValue: ''
description: 'SQL query text to execute. Only standard SQL is
supported. If query are both specified in here and in
job_configuration_query, the value in here will override the other
one.'
isOptional: true
parameterType: STRING
query_parameters:
defaultValue: []
description: 'jobs.query parameters for
standard SQL queries. If query_parameters are both specified in here
and in job_configuration_query, the value in here will override the
other one.'
isOptional: true
parameterType: LIST
outputDefinitions:
artifacts:
destination_table:
artifactType:
schemaTitle: google.BQTable
schemaVersion: 0.0.1
description: 'Describes the table where the query results should be stored.
This property must be set for large results that exceed the maximum
response size.
For queries that produce anonymous (cached) results, this field will
be populated by BigQuery.'
parameters:
gcp_resources:
description: 'Serialized gcp_resources proto tracking the BigQuery job.
For more details, see
https://github.com/kubeflow/pipelines/blob/master/components/google-cloud/google_cloud_pipeline_components/proto/README.md.'
parameterType: STRING
comp-bigquery-query-job-2:
executorLabel: exec-bigquery-query-job-2
inputDefinitions:
parameters:
encryption_spec_key_name:
defaultValue: ''
description: 'Describes the Cloud
KMS encryption key that will be used to protect destination
BigQuery table. The BigQuery Service Account associated with your
project requires access to this encryption key. If
encryption_spec_key_name are both specified in here and in
job_configuration_query, the value in here will override the other
one.'
isOptional: true
parameterType: STRING
job_configuration_query:
defaultValue: {}
description: 'A json formatted string
describing the rest of the job configuration. For more details, see
https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationQuery'
isOptional: true
parameterType: STRUCT
labels:
defaultValue: {}
description: 'The labels associated with this job. You can
use these to organize and group your jobs. Label keys and values can
be no longer than 63 characters, can only containlowercase letters,
numeric characters, underscores and dashes. International characters
are allowed. Label values are optional. Label keys must start with a
letter and each label in the list must have a different key.
Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.'
isOptional: true
parameterType: STRUCT
location:
defaultValue: us-central1
description: 'Location for creating the BigQuery job. If not
set, default to `US` multi-region. For more details, see
https://cloud.google.com/bigquery/docs/locations#specifying_your_location'
isOptional: true
parameterType: STRING
project:
defaultValue: '{{$.pipeline_google_cloud_project_id}}'
description: Project to run the BigQuery query job. Defaults to the project
in which the PipelineJob is run.
isOptional: true
parameterType: STRING
query:
defaultValue: ''
description: 'SQL query text to execute. Only standard SQL is
supported. If query are both specified in here and in
job_configuration_query, the value in here will override the other
one.'
isOptional: true
parameterType: STRING
query_parameters:
defaultValue: []
description: 'jobs.query parameters for
standard SQL queries. If query_parameters are both specified in here
and in job_configuration_query, the value in here will override the
other one.'
isOptional: true
parameterType: LIST
outputDefinitions:
artifacts:
destination_table:
artifactType:
schemaTitle: google.BQTable
schemaVersion: 0.0.1
description: 'Describes the table where the query results should be stored.
This property must be set for large results that exceed the maximum
response size.
For queries that produce anonymous (cached) results, this field will
be populated by BigQuery.'
parameters:
gcp_resources:
description: 'Serialized gcp_resources proto tracking the BigQuery job.
For more details, see
https://github.com/kubeflow/pipelines/blob/master/components/google-cloud/google_cloud_pipeline_components/proto/README.md.'
parameterType: STRING
comp-bigquery-query-job-3:
executorLabel: exec-bigquery-query-job-3
inputDefinitions:
parameters:
encryption_spec_key_name:
defaultValue: ''
description: 'Describes the Cloud
KMS encryption key that will be used to protect destination
BigQuery table. The BigQuery Service Account associated with your
project requires access to this encryption key. If
encryption_spec_key_name are both specified in here and in
job_configuration_query, the value in here will override the other
one.'
isOptional: true
parameterType: STRING
job_configuration_query:
defaultValue: {}
description: 'A json formatted string
describing the rest of the job configuration. For more details, see
https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationQuery'
isOptional: true
parameterType: STRUCT
labels:
defaultValue: {}
description: 'The labels associated with this job. You can
use these to organize and group your jobs. Label keys and values can
be no longer than 63 characters, can only containlowercase letters,
numeric characters, underscores and dashes. International characters
are allowed. Label values are optional. Label keys must start with a
letter and each label in the list must have a different key.
Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.'
isOptional: true
parameterType: STRUCT
location:
defaultValue: us-central1
description: 'Location for creating the BigQuery job. If not
set, default to `US` multi-region. For more details, see
https://cloud.google.com/bigquery/docs/locations#specifying_your_location'
isOptional: true
parameterType: STRING
project:
defaultValue: '{{$.pipeline_google_cloud_project_id}}'
description: Project to run the BigQuery query job. Defaults to the project
in which the PipelineJob is run.
isOptional: true
parameterType: STRING
query:
defaultValue: ''
description: 'SQL query text to execute. Only standard SQL is
supported. If query are both specified in here and in
job_configuration_query, the value in here will override the other
one.'
isOptional: true
parameterType: STRING
query_parameters:
defaultValue: []
description: 'jobs.query parameters for
standard SQL queries. If query_parameters are both specified in here
and in job_configuration_query, the value in here will override the
other one.'
isOptional: true
parameterType: LIST
outputDefinitions:
artifacts:
destination_table:
artifactType:
schemaTitle: google.BQTable
schemaVersion: 0.0.1
description: 'Describes the table where the query results should be stored.
This property must be set for large results that exceed the maximum
response size.
For queries that produce anonymous (cached) results, this field will
be populated by BigQuery.'
parameters:
gcp_resources:
description: 'Serialized gcp_resources proto tracking the BigQuery job.
For more details, see
https://github.com/kubeflow/pipelines/blob/master/components/google-cloud/google_cloud_pipeline_components/proto/README.md.'
parameterType: STRING
comp-bigquery-query-job-4:
executorLabel: exec-bigquery-query-job-4
inputDefinitions:
parameters:
encryption_spec_key_name:
defaultValue: ''
description: 'Describes the Cloud
KMS encryption key that will be used to protect destination
BigQuery table. The BigQuery Service Account associated with your
project requires access to this encryption key. If
encryption_spec_key_name are both specified in here and in
job_configuration_query, the value in here will override the other
one.'
isOptional: true
parameterType: STRING
job_configuration_query:
defaultValue: {}
description: 'A json formatted string
describing the rest of the job configuration. For more details, see
https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationQuery'
isOptional: true
parameterType: STRUCT
labels:
defaultValue: {}
description: 'The labels associated with this job. You can
use these to organize and group your jobs. Label keys and values can
be no longer than 63 characters, can only containlowercase letters,
numeric characters, underscores and dashes. International characters
are allowed. Label values are optional. Label keys must start with a
letter and each label in the list must have a different key.
Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.'
isOptional: true
parameterType: STRUCT
location:
defaultValue: us-central1
description: 'Location for creating the BigQuery job. If not
set, default to `US` multi-region. For more details, see
https://cloud.google.com/bigquery/docs/locations#specifying_your_location'
isOptional: true
parameterType: STRING
project:
defaultValue: '{{$.pipeline_google_cloud_project_id}}'
description: Project to run the BigQuery query job. Defaults to the project
in which the PipelineJob is run.
isOptional: true
parameterType: STRING
query:
defaultValue: ''
description: 'SQL query text to execute. Only standard SQL is
supported. If query are both specified in here and in
job_configuration_query, the value in here will override the other
one.'
isOptional: true
parameterType: STRING
query_parameters:
defaultValue: []
description: 'jobs.query parameters for
standard SQL queries. If query_parameters are both specified in here
and in job_configuration_query, the value in here will override the
other one.'
isOptional: true
parameterType: LIST
outputDefinitions:
artifacts:
destination_table:
artifactType:
schemaTitle: google.BQTable
schemaVersion: 0.0.1
description: 'Describes the table where the query results should be stored.
This property must be set for large results that exceed the maximum
response size.
For queries that produce anonymous (cached) results, this field will
be populated by BigQuery.'
parameters:
gcp_resources:
description: 'Serialized gcp_resources proto tracking the BigQuery job.
For more details, see
https://github.com/kubeflow/pipelines/blob/master/components/google-cloud/google_cloud_pipeline_components/proto/README.md.'
parameterType: STRING
comp-bigquery-query-job-5:
executorLabel: exec-bigquery-query-job-5
inputDefinitions:
parameters:
encryption_spec_key_name:
defaultValue: ''
description: 'Describes the Cloud
KMS encryption key that will be used to protect destination
BigQuery table. The BigQuery Service Account associated with your
project requires access to this encryption key. If
encryption_spec_key_name are both specified in here and in
job_configuration_query, the value in here will override the other
one.'
isOptional: true
parameterType: STRING
job_configuration_query:
defaultValue: {}
description: 'A json formatted string
describing the rest of the job configuration. For more details, see
https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationQuery'
isOptional: true
parameterType: STRUCT
labels:
defaultValue: {}
description: 'The labels associated with this job. You can
use these to organize and group your jobs. Label keys and values can
be no longer than 63 characters, can only containlowercase letters,
numeric characters, underscores and dashes. International characters
are allowed. Label values are optional. Label keys must start with a
letter and each label in the list must have a different key.
Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.'
isOptional: true
parameterType: STRUCT
location:
defaultValue: us-central1
description: 'Location for creating the BigQuery job. If not
set, default to `US` multi-region. For more details, see
https://cloud.google.com/bigquery/docs/locations#specifying_your_location'
isOptional: true
parameterType: STRING
project:
defaultValue: '{{$.pipeline_google_cloud_project_id}}'
description: Project to run the BigQuery query job. Defaults to the project
in which the PipelineJob is run.
isOptional: true
parameterType: STRING
query:
defaultValue: ''
description: 'SQL query text to execute. Only standard SQL is
supported. If query are both specified in here and in
job_configuration_query, the value in here will override the other
one.'
isOptional: true
parameterType: STRING
query_parameters:
defaultValue: []
description: 'jobs.query parameters for
standard SQL queries. If query_parameters are both specified in here
and in job_configuration_query, the value in here will override the
other one.'
isOptional: true
parameterType: LIST
outputDefinitions:
artifacts:
destination_table:
artifactType:
schemaTitle: google.BQTable
schemaVersion: 0.0.1
description: 'Describes the table where the query results should be stored.
This property must be set for large results that exceed the maximum
response size.
For queries that produce anonymous (cached) results, this field will
be populated by BigQuery.'
parameters:
gcp_resources:
description: 'Serialized gcp_resources proto tracking the BigQuery job.
For more details, see
https://github.com/kubeflow/pipelines/blob/master/components/google-cloud/google_cloud_pipeline_components/proto/README.md.'
parameterType: STRING
comp-build-job-configuration-query:
executorLabel: exec-build-job-configuration-query
inputDefinitions:
parameters:
dataset_id:
defaultValue: ''
isOptional: true
parameterType: STRING
priority:
defaultValue: INTERACTIVE
isOptional: true
parameterType: STRING
project_id:
defaultValue: ''
isOptional: true
parameterType: STRING
table_id:
defaultValue: ''
isOptional: true
parameterType: STRING
write_disposition:
defaultValue: ''
isOptional: true
parameterType: STRING
outputDefinitions:
parameters:
Output:
parameterType: STRUCT
comp-build-job-configuration-query-2:
executorLabel: exec-build-job-configuration-query-2
inputDefinitions:
parameters:
dataset_id:
defaultValue: ''
isOptional: true
parameterType: STRING
priority:
defaultValue: INTERACTIVE
isOptional: true
parameterType: STRING
project_id:
defaultValue: ''
isOptional: true
parameterType: STRING
table_id:
defaultValue: ''
isOptional: true
parameterType: STRING
write_disposition:
defaultValue: ''
isOptional: true
parameterType: STRING
outputDefinitions:
parameters:
Output:
parameterType: STRUCT
comp-build-job-configuration-query-3:
executorLabel: exec-build-job-configuration-query-3
inputDefinitions:
parameters:
dataset_id:
defaultValue: ''
isOptional: true
parameterType: STRING
priority:
defaultValue: INTERACTIVE
isOptional: true
parameterType: STRING
project_id:
defaultValue: ''
isOptional: true
parameterType: STRING
table_id:
defaultValue: ''
isOptional: true
parameterType: STRING
write_disposition:
defaultValue: ''
isOptional: true
parameterType: STRING
outputDefinitions:
parameters:
Output:
parameterType: STRUCT
comp-build-job-configuration-query-4:
executorLabel: exec-build-job-configuration-query-4
inputDefinitions:
parameters:
dataset_id:
defaultValue: ''
isOptional: true
parameterType: STRING
priority:
defaultValue: INTERACTIVE
isOptional: true
parameterType: STRING
project_id:
defaultValue: ''
isOptional: true
parameterType: STRING
table_id:
defaultValue: ''
isOptional: true
parameterType: STRING
write_disposition:
defaultValue: ''
isOptional: true
parameterType: STRING
outputDefinitions:
parameters:
Output:
parameterType: STRUCT
comp-build-job-configuration-query-5:
executorLabel: exec-build-job-configuration-query-5
inputDefinitions:
parameters:
dataset_id:
defaultValue: ''
isOptional: true
parameterType: STRING
priority:
defaultValue: INTERACTIVE
isOptional: true
parameterType: STRING
project_id:
defaultValue: ''
isOptional: true
parameterType: STRING
table_id:
defaultValue: ''
isOptional: true
parameterType: STRING
write_disposition:
defaultValue: ''
isOptional: true
parameterType: STRING
outputDefinitions:
parameters:
Output:
parameterType: STRUCT
comp-build-job-configuration-query-6:
executorLabel: exec-build-job-configuration-query-6
inputDefinitions:
parameters:
dataset_id:
defaultValue: ''
isOptional: true
parameterType: STRING
priority:
defaultValue: INTERACTIVE
isOptional: true
parameterType: STRING
project_id:
defaultValue: ''
isOptional: true
parameterType: STRING
table_id:
defaultValue: ''
isOptional: true
parameterType: STRING
write_disposition:
defaultValue: ''
isOptional: true
parameterType: STRING
outputDefinitions:
parameters:
Output:
parameterType: STRUCT
comp-build-serialized-query-parameters:
executorLabel: exec-build-serialized-query-parameters
inputDefinitions:
parameters:
data_granularity_unit:
description: 'The data granularity unit. Accepted values are:
minute, hour, day, week, month, year.'
isOptional: true
parameterType: STRING
forecast_horizon:
description: 'The number of time periods into the future for which
forecasts will be created. Future periods start after the latest timestamp
for each time series.'
isOptional: true
parameterType: NUMBER_INTEGER
forecast_horizon_off_by_one:
defaultValue: false
description: 'If True, subtract 1 from the forecast horizon
in the query parameters.'
isOptional: true
parameterType: BOOLEAN
max_order:
description: 'Integer between 1 and 5 representing the size of the parameter
search space for ARIMA_PLUS. 5 would result in the highest accuracy model,
but also the longest training runtime.'
isOptional: true
parameterType: NUMBER_INTEGER
splits:
description: Dataset splits to be used to train the model.
isOptional: true
parameterType: LIST
window:
description: 'Dict containing information about the forecast window the
model
should have. If no window is provided, the window will start after the
latest period in the available data.'
isOptional: true
parameterType: STRUCT
outputDefinitions:
parameters:
Output:
parameterType: LIST
comp-build-serialized-query-parameters-2:
executorLabel: exec-build-serialized-query-parameters-2
inputDefinitions:
parameters:
data_granularity_unit:
description: 'The data granularity unit. Accepted values are:
minute, hour, day, week, month, year.'
isOptional: true
parameterType: STRING
forecast_horizon:
description: 'The number of time periods into the future for which
forecasts will be created. Future periods start after the latest timestamp
for each time series.'
isOptional: true
parameterType: NUMBER_INTEGER
forecast_horizon_off_by_one:
defaultValue: false
description: 'If True, subtract 1 from the forecast horizon
in the query parameters.'
isOptional: true
parameterType: BOOLEAN
max_order:
description: 'Integer between 1 and 5 representing the size of the parameter
search space for ARIMA_PLUS. 5 would result in the highest accuracy model,
but also the longest training runtime.'
isOptional: true
parameterType: NUMBER_INTEGER
splits:
description: Dataset splits to be used to train the model.
isOptional: true
parameterType: LIST
window:
description: 'Dict containing information about the forecast window the
model
should have. If no window is provided, the window will start after the
latest period in the available data.'
isOptional: true
parameterType: STRUCT
outputDefinitions:
parameters:
Output:
parameterType: LIST
comp-build-serialized-query-parameters-3:
executorLabel: exec-build-serialized-query-parameters-3
inputDefinitions:
parameters:
data_granularity_unit:
description: 'The data granularity unit. Accepted values are:
minute, hour, day, week, month, year.'
isOptional: true
parameterType: STRING
forecast_horizon:
description: 'The number of time periods into the future for which
forecasts will be created. Future periods start after the latest timestamp
for each time series.'
isOptional: true
parameterType: NUMBER_INTEGER
forecast_horizon_off_by_one:
defaultValue: false
description: 'If True, subtract 1 from the forecast horizon
in the query parameters.'
isOptional: true
parameterType: BOOLEAN
max_order:
description: 'Integer between 1 and 5 representing the size of the parameter
search space for ARIMA_PLUS. 5 would result in the highest accuracy model,
but also the longest training runtime.'
isOptional: true
parameterType: NUMBER_INTEGER
splits:
description: Dataset splits to be used to train the model.
isOptional: true
parameterType: LIST
window:
description: 'Dict containing information about the forecast window the
model
should have. If no window is provided, the window will start after the
latest period in the available data.'
isOptional: true
parameterType: STRUCT
outputDefinitions:
parameters:
Output:
parameterType: LIST
comp-cond:
executorLabel: exec-cond
inputDefinitions:
parameters:
false_str:
parameterType: STRING
predicate:
parameterType: BOOLEAN
true_str:
parameterType: STRING
outputDefinitions:
parameters:
Output:
parameterType: STRING
comp-condition-2:
dag:
outputs:
artifacts:
create-metrics-artifact-evaluation_metrics:
artifactSelectors:
- outputArtifactKey: evaluation_metrics
producerSubtask: create-metrics-artifact
tasks:
bigquery-list-rows:
cachingOptions:
enableCache: true
componentRef:
name: comp-bigquery-list-rows
dependentTasks:
- bigquery-query-job
inputs:
artifacts:
table:
taskOutputArtifact:
outputArtifactKey: destination_table
producerTask: bigquery-query-job
parameters:
location:
componentInputParameter: pipelinechannel--get-table-location-Output
project:
componentInputParameter: pipelinechannel--project
taskInfo:
name: bigquery-list-rows
bigquery-list-rows-2:
cachingOptions:
enableCache: true
componentRef:
name: comp-bigquery-list-rows-2
dependentTasks:
- bigquery-query-job-4
inputs:
artifacts:
table:
taskOutputArtifact:
outputArtifactKey: destination_table
producerTask: bigquery-query-job-4
parameters:
location:
componentInputParameter: pipelinechannel--get-table-location-Output
project:
componentInputParameter: pipelinechannel--project
taskInfo:
name: bigquery-list-rows-2
bigquery-query-job:
cachingOptions:
enableCache: true
componentRef:
name: comp-bigquery-query-job
dependentTasks:
- build-job-configuration-query
- build-serialized-query-parameters
inputs:
parameters:
encryption_spec_key_name:
componentInputParameter: pipelinechannel--encryption_spec_key_name
job_configuration_query:
taskOutputParameter:
outputParameterKey: Output
producerTask: build-job-configuration-query
location:
componentInputParameter: pipelinechannel--get-table-location-Output
pipelinechannel--bigquery-create-dataset-2-dataset_id:
componentInputParameter: pipelinechannel--bigquery-create-dataset-2-dataset_id
pipelinechannel--bigquery-create-dataset-2-project_id:
componentInputParameter: pipelinechannel--bigquery-create-dataset-2-project_id
pipelinechannel--data_granularity_unit:
componentInputParameter: pipelinechannel--data_granularity_unit
pipelinechannel--get-fte-suffix-Output:
componentInputParameter: pipelinechannel--get-fte-suffix-Output
pipelinechannel--time_column:
componentInputParameter: pipelinechannel--time_column
pipelinechannel--time_series_identifier_column:
componentInputParameter: pipelinechannel--time_series_identifier_column
project:
componentInputParameter: pipelinechannel--project
query:
runtimeValue:
constant: "\n WITH\n time_series_windows AS (\n \
\ SELECT\n FIRST_VALUE({{$.inputs.parameters['pipelinechannel--time_column']}})\
\ OVER (horizon) AS start_time,\n COUNT(*) OVER (horizon)\
\ AS count,\n FIRST_VALUE(window__{{$.inputs.parameters['pipelinechannel--get-fte-suffix-Output']}})\
\ OVER (horizon) AS window__{{$.inputs.parameters['pipelinechannel--get-fte-suffix-Output']}},\n\
\ FROM `{{$.inputs.parameters['pipelinechannel--bigquery-create-dataset-2-project_id']}}.{{$.inputs.parameters['pipelinechannel--bigquery-create-dataset-2-dataset_id']}}.fte_time_series_output_{{$.inputs.parameters['pipelinechannel--get-fte-suffix-Output']}}`\n\
\ WHERE UPPER(split__{{$.inputs.parameters['pipelinechannel--get-fte-suffix-Output']}})\
\ IN UNNEST(@splits)\n WINDOW horizon AS (\n \
\ PARTITION BY {{$.inputs.parameters['pipelinechannel--time_series_identifier_column']}}\n\
\ ORDER BY {{$.inputs.parameters['pipelinechannel--time_column']}}\n\
\ ROWS BETWEEN 0 PRECEDING AND @forecast_horizon FOLLOWING)\n\
\ )\n SELECT\n start_time,\n TIMESTAMP(DATETIME_ADD(\n\
\ DATETIME(start_time),\n INTERVAL @forecast_horizon\
\ {{$.inputs.parameters['pipelinechannel--data_granularity_unit']}}\n\
\ )) AS end_time,\n SUM(count) AS count,\n \
\ ROW_NUMBER() OVER () AS window_number,\n FROM time_series_windows\n\
\ WHERE window__{{$.inputs.parameters['pipelinechannel--get-fte-suffix-Output']}}\n\
\ GROUP BY start_time\n "
query_parameters:
taskOutputParameter:
outputParameterKey: Output
producerTask: build-serialized-query-parameters
taskInfo:
name: create-eval-windows-table
bigquery-query-job-2:
cachingOptions:
enableCache: true
componentRef:
name: comp-bigquery-query-job-2
inputs:
parameters:
encryption_spec_key_name:
componentInputParameter: pipelinechannel--encryption_spec_key_name
location:
componentInputParameter: pipelinechannel--get-table-location-Output
pipelinechannel--bigquery-create-dataset-dataset_id:
componentInputParameter: pipelinechannel--bigquery-create-dataset-dataset_id
pipelinechannel--bigquery-create-dataset-project_id:
componentInputParameter: pipelinechannel--bigquery-create-dataset-project_id
pipelinechannel--time_column:
componentInputParameter: pipelinechannel--time_column
project:
componentInputParameter: pipelinechannel--project
query:
runtimeValue:
constant: "\n CREATE TABLE `{{$.inputs.parameters['pipelinechannel--bigquery-create-dataset-project_id']}}.{{$.inputs.parameters['pipelinechannel--bigquery-create-dataset-dataset_id']}}.metrics`\
\ (\n predicted_on_{{$.inputs.parameters['pipelinechannel--time_column']}}\
\ TIMESTAMP,\n MAE FLOAT64,\n MSE\
\ FLOAT64,\n MAPE FLOAT64,\n prediction_count\
\ INT64\n )\n "
taskInfo:
name: create-tmp-metrics-table
bigquery-query-job-3:
cachingOptions:
enableCache: true
componentRef:
name: comp-bigquery-query-job-3
inputs:
parameters:
encryption_spec_key_name:
componentInputParameter: pipelinechannel--encryption_spec_key_name
location:
componentInputParameter: pipelinechannel--get-table-location-Output
pipelinechannel--bigquery-create-dataset-dataset_id:
componentInputParameter: pipelinechannel--bigquery-create-dataset-dataset_id
pipelinechannel--bigquery-create-dataset-project_id:
componentInputParameter: pipelinechannel--bigquery-create-dataset-project_id
pipelinechannel--target_column:
componentInputParameter: pipelinechannel--target_column
pipelinechannel--time_column:
componentInputParameter: pipelinechannel--time_column
pipelinechannel--time_series_identifier_column:
componentInputParameter: pipelinechannel--time_series_identifier_column
project:
componentInputParameter: pipelinechannel--project
query:
runtimeValue:
constant: "\n CREATE TABLE `{{$.inputs.parameters['pipelinechannel--bigquery-create-dataset-project_id']}}.{{$.inputs.parameters['pipelinechannel--bigquery-create-dataset-dataset_id']}}.evaluated_examples`\
\ (\n {{$.inputs.parameters['pipelinechannel--time_series_identifier_column']}}\
\ STRING,\n {{$.inputs.parameters['pipelinechannel--time_column']}}\
\ TIMESTAMP,\n predicted_on_{{$.inputs.parameters['pipelinechannel--time_column']}}\
\ TIMESTAMP,\n {{$.inputs.parameters['pipelinechannel--target_column']}}\
\ FLOAT64,\n predicted_{{$.inputs.parameters['pipelinechannel--target_column']}}\
\ STRUCT<value FLOAT64>\n )\n "
taskInfo:
name: create-evaluated-examples-table
bigquery-query-job-4:
cachingOptions:
enableCache: true
componentRef:
name: comp-bigquery-query-job-4
dependentTasks:
- build-job-configuration-query-5
- for-loop-3
- table-to-uri
inputs:
parameters:
encryption_spec_key_name:
componentInputParameter: pipelinechannel--encryption_spec_key_name
job_configuration_query:
taskOutputParameter:
outputParameterKey: Output
producerTask: build-job-configuration-query-5
location:
componentInputParameter: pipelinechannel--get-table-location-Output
pipelinechannel--table-to-uri-uri:
taskOutputParameter:
outputParameterKey: uri
producerTask: table-to-uri
project:
componentInputParameter: pipelinechannel--project
query:
runtimeValue:
constant: "\n SELECT\n SUM(MAE * prediction_count) /\
\ SUM(prediction_count) AS MAE,\n SQRT(SUM(MSE * prediction_count)\
\ / SUM(prediction_count)) AS RMSE,\n SUM(MAPE * prediction_count)\
\ / SUM(prediction_count) AS MAPE,\n FROM `{{$.inputs.parameters['pipelinechannel--table-to-uri-uri']}}`\n\
\ "
taskInfo:
name: create-backtest-table
bigquery-query-job-5:
cachingOptions:
enableCache: true
componentRef:
name: comp-bigquery-query-job-5
dependentTasks:
- build-job-configuration-query-6
- for-loop-3
- table-to-uri-2
inputs:
parameters:
encryption_spec_key_name:
componentInputParameter: pipelinechannel--encryption_spec_key_name
job_configuration_query:
taskOutputParameter:
outputParameterKey: Output
producerTask: build-job-configuration-query-6
location:
componentInputParameter: pipelinechannel--get-table-location-Output
pipelinechannel--table-to-uri-2-uri:
taskOutputParameter:
outputParameterKey: uri
producerTask: table-to-uri-2
project:
componentInputParameter: pipelinechannel--project
query:
runtimeValue:
constant: SELECT * FROM `{{$.inputs.parameters['pipelinechannel--table-to-uri-2-uri']}}`
taskInfo:
name: export-evaluated-examples-table
build-job-configuration-query:
cachingOptions:
enableCache: true
componentRef:
name: comp-build-job-configuration-query
inputs:
parameters:
dataset_id:
runtimeValue:
constant: '{{$.inputs.parameters[''pipelinechannel--bigquery-create-dataset-dataset_id'']}}'
pipelinechannel--bigquery-create-dataset-dataset_id:
componentInputParameter: pipelinechannel--bigquery-create-dataset-dataset_id
pipelinechannel--bigquery-create-dataset-project_id:
componentInputParameter: pipelinechannel--bigquery-create-dataset-project_id
project_id:
runtimeValue:
constant: '{{$.inputs.parameters[''pipelinechannel--bigquery-create-dataset-project_id'']}}'
table_id:
runtimeValue:
constant: windows
taskInfo:
name: build-job-configuration-query
build-job-configuration-query-5:
cachingOptions:
enableCache: true
componentRef:
name: comp-build-job-configuration-query-5
dependentTasks:
- cond
inputs:
parameters:
dataset_id:
runtimeValue:
constant: '{{$.inputs.parameters[''pipelinechannel--bigquery-create-dataset-dataset_id'']}}'
pipelinechannel--bigquery-create-dataset-dataset_id:
componentInputParameter: pipelinechannel--bigquery-create-dataset-dataset_id
pipelinechannel--bigquery-create-dataset-project_id:
componentInputParameter: pipelinechannel--bigquery-create-dataset-project_id
pipelinechannel--cond-Output:
taskOutputParameter:
outputParameterKey: Output
producerTask: cond
project_id:
runtimeValue:
constant: '{{$.inputs.parameters[''pipelinechannel--bigquery-create-dataset-project_id'']}}'
table_id:
runtimeValue:
constant: final_metrics
write_disposition:
runtimeValue:
constant: '{{$.inputs.parameters[''pipelinechannel--cond-Output'']}}'
taskInfo:
name: build-job-configuration-query-5
build-job-configuration-query-6:
cachingOptions:
enableCache: true
componentRef:
name: comp-build-job-configuration-query-6
dependentTasks:
- cond
inputs:
parameters:
dataset_id:
runtimeValue:
constant: '{{$.inputs.parameters[''pipelinechannel--bigquery-create-dataset-2-dataset_id'']}}'
pipelinechannel--bigquery-create-dataset-2-dataset_id:
componentInputParameter: pipelinechannel--bigquery-create-dataset-2-dataset_id
pipelinechannel--bigquery-create-dataset-2-project_id:
componentInputParameter: pipelinechannel--bigquery-create-dataset-2-project_id
pipelinechannel--cond-Output:
taskOutputParameter:
outputParameterKey: Output
producerTask: cond
project_id:
runtimeValue:
constant: '{{$.inputs.parameters[''pipelinechannel--bigquery-create-dataset-2-project_id'']}}'
table_id:
runtimeValue:
constant: evaluated_examples
write_disposition:
runtimeValue:
constant: '{{$.inputs.parameters[''pipelinechannel--cond-Output'']}}'
taskInfo:
name: build-job-configuration-query-6
build-serialized-query-parameters:
cachingOptions:
enableCache: true
componentRef:
name: comp-build-serialized-query-parameters
inputs:
parameters:
forecast_horizon:
componentInputParameter: pipelinechannel--forecast_horizon
forecast_horizon_off_by_one:
runtimeValue:
constant: true
splits:
runtimeValue:
constant:
- TEST
taskInfo:
name: build-serialized-query-parameters
cond:
cachingOptions:
enableCache: true
componentRef:
name: comp-cond
inputs:
parameters:
false_str:
runtimeValue:
constant: WRITE_EMPTY
predicate:
componentInputParameter: pipelinechannel--override_destination
true_str:
runtimeValue:
constant: WRITE_TRUNCATE
taskInfo:
name: cond
create-metrics-artifact:
cachingOptions:
enableCache: true
componentRef:
name: comp-create-metrics-artifact
dependentTasks:
- bigquery-list-rows-2
inputs:
parameters:
metrics_rows:
taskOutputParameter:
outputParameterKey: Output
producerTask: bigquery-list-rows-2
taskInfo:
name: create-metrics-artifact
for-loop-3:
componentRef:
name: comp-for-loop-3
dependentTasks:
- bigquery-list-rows
- table-to-uri
- table-to-uri-2
inputs:
parameters:
pipelinechannel--bigquery-create-dataset-2-dataset_id:
componentInputParameter: pipelinechannel--bigquery-create-dataset-2-dataset_id
pipelinechannel--bigquery-create-dataset-2-project_id:
componentInputParameter: pipelinechannel--bigquery-create-dataset-2-project_id
pipelinechannel--bigquery-create-dataset-dataset_id:
componentInputParameter: pipelinechannel--bigquery-create-dataset-dataset_id
pipelinechannel--bigquery-create-dataset-project_id:
componentInputParameter: pipelinechannel--bigquery-create-dataset-project_id
pipelinechannel--bigquery-list-rows-Output:
taskOutputParameter:
outputParameterKey: Output
producerTask: bigquery-list-rows
pipelinechannel--data_granularity_unit:
componentInputParameter: pipelinechannel--data_granularity_unit
pipelinechannel--forecast_horizon:
componentInputParameter: pipelinechannel--forecast_horizon
pipelinechannel--get-fte-suffix-Output:
componentInputParameter: pipelinechannel--get-fte-suffix-Output
pipelinechannel--get-table-location-Output:
componentInputParameter: pipelinechannel--get-table-location-Output
pipelinechannel--max_order:
componentInputParameter: pipelinechannel--max_order
pipelinechannel--project:
componentInputParameter: pipelinechannel--project
pipelinechannel--run_evaluation:
componentInputParameter: pipelinechannel--run_evaluation
pipelinechannel--table-to-uri-2-dataset_id:
taskOutputParameter:
outputParameterKey: dataset_id
producerTask: table-to-uri-2
pipelinechannel--table-to-uri-2-project_id:
taskOutputParameter:
outputParameterKey: project_id
producerTask: table-to-uri-2
pipelinechannel--table-to-uri-2-table_id:
taskOutputParameter:
outputParameterKey: table_id
producerTask: table-to-uri-2
pipelinechannel--table-to-uri-dataset_id:
taskOutputParameter:
outputParameterKey: dataset_id
producerTask: table-to-uri
pipelinechannel--table-to-uri-project_id:
taskOutputParameter:
outputParameterKey: project_id
producerTask: table-to-uri
pipelinechannel--table-to-uri-table_id:
taskOutputParameter:
outputParameterKey: table_id
producerTask: table-to-uri
pipelinechannel--target_column:
componentInputParameter: pipelinechannel--target_column
pipelinechannel--time_column:
componentInputParameter: pipelinechannel--time_column
pipelinechannel--time_series_identifier_column:
componentInputParameter: pipelinechannel--time_series_identifier_column
iteratorPolicy:
parallelismLimit: 50
parameterIterator:
itemInput: pipelinechannel--bigquery-list-rows-Output-loop-item
items:
inputParameter: pipelinechannel--bigquery-list-rows-Output
taskInfo:
name: for-loop-3
table-to-uri:
cachingOptions:
enableCache: true
componentRef:
name: comp-table-to-uri
dependentTasks:
- bigquery-query-job-2
inputs:
artifacts:
table:
taskOutputArtifact:
outputArtifactKey: destination_table
producerTask: bigquery-query-job-2
taskInfo:
name: table-to-uri
table-to-uri-2:
cachingOptions:
enableCache: true
componentRef:
name: comp-table-to-uri-2
dependentTasks:
- bigquery-query-job-3
inputs:
artifacts:
table:
taskOutputArtifact:
outputArtifactKey: destination_table
producerTask: bigquery-query-job-3
taskInfo:
name: table-to-uri-2
inputDefinitions:
parameters:
pipelinechannel--bigquery-create-dataset-2-dataset_id:
parameterType: STRING
pipelinechannel--bigquery-create-dataset-2-project_id:
parameterType: STRING
pipelinechannel--bigquery-create-dataset-dataset_id:
parameterType: STRING
pipelinechannel--bigquery-create-dataset-project_id:
parameterType: STRING
pipelinechannel--data_granularity_unit:
parameterType: STRING
pipelinechannel--encryption_spec_key_name:
parameterType: STRING
pipelinechannel--forecast_horizon:
parameterType: NUMBER_INTEGER
pipelinechannel--get-fte-suffix-Output:
parameterType: STRING
pipelinechannel--get-table-location-Output:
parameterType: STRING
pipelinechannel--max_order:
parameterType: NUMBER_INTEGER
pipelinechannel--override_destination:
parameterType: BOOLEAN
pipelinechannel--project:
parameterType: STRING
pipelinechannel--run_evaluation:
parameterType: BOOLEAN
pipelinechannel--target_column:
parameterType: STRING
pipelinechannel--time_column:
parameterType: STRING
pipelinechannel--time_series_identifier_column:
parameterType: STRING
outputDefinitions:
artifacts:
create-metrics-artifact-evaluation_metrics:
artifactType:
schemaTitle: system.Metrics
schemaVersion: 0.0.1
comp-create-metrics-artifact:
executorLabel: exec-create-metrics-artifact
inputDefinitions:
parameters:
metrics_rows:
parameterType: LIST
outputDefinitions:
artifacts:
evaluation_metrics:
artifactType:
schemaTitle: system.Metrics
schemaVersion: 0.0.1
comp-exit-handler-1:
dag:
outputs:
artifacts:
create-metrics-artifact-evaluation_metrics:
artifactSelectors:
- outputArtifactKey: create-metrics-artifact-evaluation_metrics
producerSubtask: condition-2
tasks:
bigquery-create-dataset:
cachingOptions: {}
componentRef:
name: comp-bigquery-create-dataset
dependentTasks:
- get-table-location
- validate-inputs
inputs:
parameters:
dataset:
runtimeValue:
constant: tmp_{{$.pipeline_job_uuid}}
location:
taskOutputParameter:
outputParameterKey: Output
producerTask: get-table-location
project:
componentInputParameter: pipelinechannel--project
taskInfo:
name: create-tmp-dataset
bigquery-create-dataset-2:
cachingOptions:
enableCache: true
componentRef:
name: comp-bigquery-create-dataset-2
dependentTasks:
- get-table-location
- maybe-replace-with-default
- validate-inputs
inputs:
parameters:
dataset:
taskOutputParameter:
outputParameterKey: Output
producerTask: maybe-replace-with-default
exists_ok:
runtimeValue:
constant: true
location:
taskOutputParameter:
outputParameterKey: Output
producerTask: get-table-location
project:
componentInputParameter: pipelinechannel--project
taskInfo:
name: create-export-dataset
bigquery-create-model-job:
cachingOptions:
enableCache: true
componentRef:
name: comp-bigquery-create-model-job
dependentTasks:
- bigquery-create-dataset-2
- build-serialized-query-parameters-3
- get-fte-suffix
- get-table-location
inputs:
parameters:
location:
taskOutputParameter:
outputParameterKey: Output
producerTask: get-table-location
pipelinechannel--bigquery-create-dataset-2-dataset_id:
taskOutputParameter:
outputParameterKey: dataset_id
producerTask: bigquery-create-dataset-2
pipelinechannel--bigquery-create-dataset-2-project_id:
taskOutputParameter:
outputParameterKey: project_id
producerTask: bigquery-create-dataset-2
pipelinechannel--get-fte-suffix-Output:
taskOutputParameter:
outputParameterKey: Output
producerTask: get-fte-suffix
pipelinechannel--target_column:
componentInputParameter: pipelinechannel--target_column
pipelinechannel--time_column:
componentInputParameter: pipelinechannel--time_column
pipelinechannel--time_series_identifier_column:
componentInputParameter: pipelinechannel--time_series_identifier_column
project:
componentInputParameter: pipelinechannel--project
query:
runtimeValue:
constant: "\n CREATE MODEL `{{$.inputs.parameters['pipelinechannel--bigquery-create-dataset-2-project_id']}}.{{$.inputs.parameters['pipelinechannel--bigquery-create-dataset-2-dataset_id']}}.model_{{$.pipeline_job_uuid}}`\n\
\ OPTIONS (\n model_type = 'ARIMA_PLUS',\n \
\ time_series_timestamp_col = '{{$.inputs.parameters['pipelinechannel--time_column']}}',\n\
\ time_series_id_col = '{{$.inputs.parameters['pipelinechannel--time_series_identifier_column']}}',\n\
\ time_series_data_col = '{{$.inputs.parameters['pipelinechannel--target_column']}}',\n\
\ horizon = @forecast_horizon,\n auto_arima\
\ = True,\n auto_arima_max_order = @max_order,\n \
\ data_frequency = @data_granularity_unit,\n holiday_region\
\ = 'GLOBAL',\n clean_spikes_and_dips = True,\n \
\ adjust_step_changes = True,\n decompose_time_series\
\ = True\n ) AS\n SELECT\n {{$.inputs.parameters['pipelinechannel--time_series_identifier_column']}},\n\
\ {{$.inputs.parameters['pipelinechannel--time_column']}},\n\
\ {{$.inputs.parameters['pipelinechannel--target_column']}},\n\
\ FROM `{{$.inputs.parameters['pipelinechannel--bigquery-create-dataset-2-project_id']}}.{{$.inputs.parameters['pipelinechannel--bigquery-create-dataset-2-dataset_id']}}.fte_time_series_output_{{$.inputs.parameters['pipelinechannel--get-fte-suffix-Output']}}`\n\
\ WHERE\n UPPER(split__{{$.inputs.parameters['pipelinechannel--get-fte-suffix-Output']}})\
\ IN UNNEST(@splits)\n AND TIMESTAMP({{$.inputs.parameters['pipelinechannel--time_column']}})\
\ < @start_time\n "
query_parameters:
taskOutputParameter:
outputParameterKey: Output
producerTask: build-serialized-query-parameters-3
taskInfo:
name: create-serving-model
build-serialized-query-parameters-3:
cachingOptions:
enableCache: true
componentRef:
name: comp-build-serialized-query-parameters-3
inputs:
parameters:
data_granularity_unit:
componentInputParameter: pipelinechannel--data_granularity_unit
forecast_horizon:
componentInputParameter: pipelinechannel--forecast_horizon
max_order:
componentInputParameter: pipelinechannel--max_order
splits:
runtimeValue:
constant:
- TRAIN
- VALIDATE
- TEST
taskInfo:
name: build-serialized-query-parameters-3
condition-2:
componentRef:
name: comp-condition-2
dependentTasks:
- bigquery-create-dataset
- bigquery-create-dataset-2
- get-fte-suffix
- get-table-location
inputs:
parameters:
pipelinechannel--bigquery-create-dataset-2-dataset_id:
taskOutputParameter:
outputParameterKey: dataset_id
producerTask: bigquery-create-dataset-2
pipelinechannel--bigquery-create-dataset-2-project_id:
taskOutputParameter:
outputParameterKey: project_id
producerTask: bigquery-create-dataset-2
pipelinechannel--bigquery-create-dataset-dataset_id:
taskOutputParameter:
outputParameterKey: dataset_id
producerTask: bigquery-create-dataset
pipelinechannel--bigquery-create-dataset-project_id:
taskOutputParameter:
outputParameterKey: project_id
producerTask: bigquery-create-dataset
pipelinechannel--data_granularity_unit:
componentInputParameter: pipelinechannel--data_granularity_unit
pipelinechannel--encryption_spec_key_name:
componentInputParameter: pipelinechannel--encryption_spec_key_name
pipelinechannel--forecast_horizon:
componentInputParameter: pipelinechannel--forecast_horizon
pipelinechannel--get-fte-suffix-Output:
taskOutputParameter:
outputParameterKey: Output
producerTask: get-fte-suffix
pipelinechannel--get-table-location-Output:
taskOutputParameter:
outputParameterKey: Output
producerTask: get-table-location
pipelinechannel--max_order:
componentInputParameter: pipelinechannel--max_order
pipelinechannel--override_destination:
componentInputParameter: pipelinechannel--override_destination
pipelinechannel--project:
componentInputParameter: pipelinechannel--project
pipelinechannel--run_evaluation:
componentInputParameter: pipelinechannel--run_evaluation
pipelinechannel--target_column:
componentInputParameter: pipelinechannel--target_column
pipelinechannel--time_column:
componentInputParameter: pipelinechannel--time_column
pipelinechannel--time_series_identifier_column:
componentInputParameter: pipelinechannel--time_series_identifier_column
taskInfo:
name: run-evaluation
triggerPolicy:
condition: inputs.parameter_values['pipelinechannel--run_evaluation']
== true
feature-transform-engine:
cachingOptions:
enableCache: true
componentRef:
name: comp-feature-transform-engine
dependentTasks:
- bigquery-create-dataset-2
- wrapped-in-list
inputs:
parameters:
autodetect_csv_schema:
runtimeValue:
constant: true
bigquery_staging_full_dataset_id:
runtimeValue:
constant: '{{$.inputs.parameters[''pipelinechannel--bigquery-create-dataset-2-project_id'']}}.{{$.inputs.parameters[''pipelinechannel--bigquery-create-dataset-2-dataset_id'']}}'
data_source_bigquery_table_path:
componentInputParameter: pipelinechannel--data_source_bigquery_table_path
data_source_csv_filenames:
componentInputParameter: pipelinechannel--data_source_csv_filenames
forecasting_apply_windowing:
runtimeValue:
constant: false
forecasting_context_window:
runtimeValue:
constant: 0.0
forecasting_forecast_horizon:
componentInputParameter: pipelinechannel--forecast_horizon
forecasting_predefined_window_column:
componentInputParameter: pipelinechannel--window_column
forecasting_time_column:
componentInputParameter: pipelinechannel--time_column
forecasting_time_series_identifier_columns:
taskOutputParameter:
outputParameterKey: Output
producerTask: wrapped-in-list
forecasting_window_max_count:
componentInputParameter: pipelinechannel--window_max_count
forecasting_window_stride_length:
componentInputParameter: pipelinechannel--window_stride_length
location:
componentInputParameter: pipelinechannel--location
pipelinechannel--bigquery-create-dataset-2-dataset_id:
taskOutputParameter:
outputParameterKey: dataset_id
producerTask: bigquery-create-dataset-2
pipelinechannel--bigquery-create-dataset-2-project_id:
taskOutputParameter:
outputParameterKey: project_id
producerTask: bigquery-create-dataset-2
predefined_split_key:
componentInputParameter: pipelinechannel--predefined_split_key
prediction_type:
runtimeValue:
constant: time_series
project:
componentInputParameter: pipelinechannel--project
root_dir:
componentInputParameter: pipelinechannel--root_dir
target_column:
componentInputParameter: pipelinechannel--target_column
test_fraction:
componentInputParameter: pipelinechannel--test_fraction
tf_auto_transform_features:
runtimeValue:
constant: {}
timestamp_split_key:
componentInputParameter: pipelinechannel--timestamp_split_key
training_fraction:
componentInputParameter: pipelinechannel--training_fraction
validation_fraction:
componentInputParameter: pipelinechannel--validation_fraction
taskInfo:
name: feature-transform-engine
get-fte-suffix:
cachingOptions:
enableCache: true
componentRef:
name: comp-get-fte-suffix
dependentTasks:
- bigquery-create-dataset-2
- feature-transform-engine
inputs:
parameters:
bigquery_staging_full_dataset_id:
runtimeValue:
constant: '{{$.inputs.parameters[''pipelinechannel--bigquery-create-dataset-2-project_id'']}}.{{$.inputs.parameters[''pipelinechannel--bigquery-create-dataset-2-dataset_id'']}}'
fte_table:
runtimeValue:
constant: fte_time_series_output
location:
componentInputParameter: pipelinechannel--location
pipelinechannel--bigquery-create-dataset-2-dataset_id:
taskOutputParameter:
outputParameterKey: dataset_id
producerTask: bigquery-create-dataset-2
pipelinechannel--bigquery-create-dataset-2-project_id:
taskOutputParameter:
outputParameterKey: project_id
producerTask: bigquery-create-dataset-2
project:
componentInputParameter: pipelinechannel--project
taskInfo:
name: get-fte-suffix
get-table-location:
cachingOptions:
enableCache: true
componentRef:
name: comp-get-table-location
inputs:
parameters:
default_location:
componentInputParameter: pipelinechannel--location
project:
componentInputParameter: pipelinechannel--project
table:
componentInputParameter: pipelinechannel--data_source_bigquery_table_path
taskInfo:
name: get-table-location
maybe-replace-with-default:
cachingOptions:
enableCache: true
componentRef:
name: comp-maybe-replace-with-default
inputs:
parameters:
default:
runtimeValue:
constant: export_{{$.pipeline_job_uuid}}
value:
componentInputParameter: pipelinechannel--bigquery_destination_uri
taskInfo:
name: maybe-replace-with-default
validate-inputs:
cachingOptions:
enableCache: true
componentRef:
name: comp-validate-inputs
inputs:
parameters:
bigquery_destination_uri:
componentInputParameter: pipelinechannel--bigquery_destination_uri
data_source_bigquery_table_path:
componentInputParameter: pipelinechannel--data_source_bigquery_table_path
data_source_csv_filenames:
componentInputParameter: pipelinechannel--data_source_csv_filenames
predefined_split_key:
componentInputParameter: pipelinechannel--predefined_split_key
target_column:
componentInputParameter: pipelinechannel--target_column
test_fraction:
componentInputParameter: pipelinechannel--test_fraction
time_column:
componentInputParameter: pipelinechannel--time_column
time_series_identifier_column:
componentInputParameter: pipelinechannel--time_series_identifier_column
timestamp_split_key:
componentInputParameter: pipelinechannel--timestamp_split_key
training_fraction:
componentInputParameter: pipelinechannel--training_fraction
validation_fraction:
componentInputParameter: pipelinechannel--validation_fraction
window_column:
componentInputParameter: pipelinechannel--window_column
window_max_count:
componentInputParameter: pipelinechannel--window_max_count
window_stride_length:
componentInputParameter: pipelinechannel--window_stride_length
taskInfo:
name: validate-inputs
wrapped-in-list:
cachingOptions:
enableCache: true
componentRef:
name: comp-wrapped-in-list
inputs:
parameters:
value:
componentInputParameter: pipelinechannel--time_series_identifier_column
taskInfo:
name: wrapped-in-list
inputDefinitions:
parameters:
pipelinechannel--bigquery_destination_uri:
parameterType: STRING
pipelinechannel--data_granularity_unit:
parameterType: STRING
pipelinechannel--data_source_bigquery_table_path:
parameterType: STRING
pipelinechannel--data_source_csv_filenames:
parameterType: STRING
pipelinechannel--encryption_spec_key_name:
parameterType: STRING
pipelinechannel--forecast_horizon:
parameterType: NUMBER_INTEGER
pipelinechannel--location:
parameterType: STRING
pipelinechannel--max_order:
parameterType: NUMBER_INTEGER
pipelinechannel--override_destination:
parameterType: BOOLEAN
pipelinechannel--predefined_split_key:
parameterType: STRING
pipelinechannel--project:
parameterType: STRING
pipelinechannel--root_dir:
parameterType: STRING
pipelinechannel--run_evaluation:
parameterType: BOOLEAN
pipelinechannel--target_column:
parameterType: STRING
pipelinechannel--test_fraction:
parameterType: NUMBER_DOUBLE
pipelinechannel--time_column:
parameterType: STRING
pipelinechannel--time_series_identifier_column:
parameterType: STRING
pipelinechannel--timestamp_split_key:
parameterType: STRING
pipelinechannel--training_fraction:
parameterType: NUMBER_DOUBLE
pipelinechannel--validation_fraction:
parameterType: NUMBER_DOUBLE
pipelinechannel--window_column:
parameterType: STRING
pipelinechannel--window_max_count:
parameterType: NUMBER_INTEGER
pipelinechannel--window_stride_length:
parameterType: NUMBER_INTEGER
outputDefinitions:
artifacts:
create-metrics-artifact-evaluation_metrics:
artifactType:
schemaTitle: system.Metrics
schemaVersion: 0.0.1
comp-feature-transform-engine:
executorLabel: exec-feature-transform-engine
inputDefinitions:
parameters:
autodetect_csv_schema:
defaultValue: false
description: 'If True, infers the column types
when importing CSVs into BigQuery.'
isOptional: true
parameterType: BOOLEAN
bigquery_staging_full_dataset_id:
defaultValue: ''
description: Dataset in "projectId.datasetId" format for storing intermediate-FTE
BigQuery tables. If the specified dataset does not exist in BigQuery,
FTE will create the dataset. If no bigquery_staging_full_dataset_id is
specified, all intermediate tables will be stored in a dataset created
under the provided project in the input data source's location during
FTE execution called "vertex_feature_transform_engine_staging_{location.replace('-',
'_')}". All tables generated by FTE will have a 30 day TTL.
isOptional: true
parameterType: STRING
data_source_bigquery_table_path:
defaultValue: ''
description: BigQuery input data source to run feature transform on.
isOptional: true
parameterType: STRING
data_source_csv_filenames:
defaultValue: ''
description: CSV input data source to run feature transform on.
isOptional: true
parameterType: STRING
dataflow_disk_size_gb:
defaultValue: 40.0
description: The disk size, in gigabytes, to use on each Dataflow worker
instance. If not set, default to 40.
isOptional: true
parameterType: NUMBER_INTEGER
dataflow_machine_type:
defaultValue: n1-standard-16
description: The machine type used for dataflow jobs. If not set, default
to n1-standard-16.
isOptional: true
parameterType: STRING
dataflow_max_num_workers:
defaultValue: 25.0
description: The number of workers to run the dataflow job. If not set,
default to 25.
isOptional: true
parameterType: NUMBER_INTEGER
dataflow_service_account:
defaultValue: ''
description: Custom service account to run Dataflow jobs.
isOptional: true
parameterType: STRING
dataflow_subnetwork:
defaultValue: ''
description: 'Dataflow''s fully qualified subnetwork name, when empty the
default subnetwork will be used. More details: https://cloud.google.com/dataflow/docs/guides/specifying-networks#example_network_and_subnetwork_specifications'
isOptional: true
parameterType: STRING
dataflow_use_public_ips:
defaultValue: true
description: Specifies whether Dataflow workers use public IP addresses.
isOptional: true
parameterType: BOOLEAN
dataset_level_custom_transformation_definitions:
defaultValue: []
description: 'List of dataset-level custom transformation definitions. Custom,
bring-your-own dataset-level transform functions, where users can define
and import their own transform function and use it with FTE''s built-in
transformations. Using custom transformations is an experimental feature
and it is currently not supported during batch prediction.
[ { "transformation": "ConcatCols", "module_path": "/path/to/custom_transform_fn_dlt.py",
"function_name": "concat_cols" } ] Using custom transform function together
with FTE''s built-in transformations: .. code-block:: python [ { "transformation":
"Join", "right_table_uri": "bq://test-project.dataset_test.table", "join_keys":
[["join_key_col", "join_key_col"]] },{ "transformation": "ConcatCols",
"cols": ["feature_1", "feature_2"], "output_col": "feature_1_2" } ]'
isOptional: true
parameterType: LIST
dataset_level_transformations:
defaultValue: []
description: "List of dataset-level transformations.\n[ { \"transformation\"\
: \"Join\", \"right_table_uri\": \"bq://test-project.dataset_test.table\"\
, \"join_keys\": [[\"join_key_col\", \"join_key_col\"]] }, ... ] Additional\
\ information about FTE's currently supported built-in\n transformations:\n\
\ Join: Joins features from right_table_uri. For each join key, the\
\ left table keys will be included and the right table keys will be dropped.\n\
\ Example: .. code-block:: python { \"transformation\": \"Join\"\
, \"right_table_uri\": \"bq://test-project.dataset_test.table\", \"join_keys\"\
: [[\"join_key_col\", \"join_key_col\"]] }\n Arguments:\n \
\ right_table_uri: Right table BigQuery uri to join with input_full_table_id.\n\
\ join_keys: Features to join on. For each nested list, the\
\ first element is a left table column and the second is its corresponding\
\ right table column.\n TimeAggregate: Creates a new feature composed\
\ of values of an existing feature from a fixed time period ago or in\
\ the future.\n Ex: A feature for sales by store 1 year ago.\n \
\ Example: .. code-block:: python { \"transformation\": \"TimeAggregate\"\
, \"time_difference\": 40, \"time_difference_units\": \"DAY\", \"time_series_identifier_columns\"\
: [\"store_id\"], \"time_column\": \"time_col\", \"time_difference_target_column\"\
: \"target_col\", \"output_column\": \"output_col\" }\n Arguments:\n\
\ time_difference: Number of time_difference_units to look\
\ back or into the future on our time_difference_target_column.\n \
\ time_difference_units: Units of time_difference to look back\
\ or into the future on our time_difference_target_column. Must be one\
\ of * 'DAY' * 'WEEK' (Equivalent to 7 DAYs) * 'MONTH' * 'QUARTER' * 'YEAR'\n\
\ time_series_identifier_columns: Names of the time series\
\ identifier columns.\n time_column: Name of the time column.\n\
\ time_difference_target_column: Column we wish to get the\
\ value of time_difference time_difference_units in the past or future.\n\
\ output_column: Name of our new time aggregate feature.\n\
\ is_future: Whether we wish to look forward in time. Defaults\
\ to False. PartitionByMax/PartitionByMin/PartitionByAvg/PartitionBySum:\
\ Performs a partition by reduce operation (one of max, min, avg, or sum)\
\ with a fixed historic time period. Ex: Getting avg sales (the reduce\
\ column) for each store (partition_by_column) over the previous 5 days\
\ (time_column, time_ago_units, and time_ago).\n Example: .. code-block::\
\ python { \"transformation\": \"PartitionByMax\", \"reduce_column\"\
: \"sell_price\", \"partition_by_columns\": [\"store_id\", \"state_id\"\
], \"time_column\": \"date\", \"time_ago\": 1, \"time_ago_units\": \"\
WEEK\", \"output_column\": \"partition_by_reduce_max_output\" }\n \
\ Arguments:\n reduce_column: Column to apply the reduce\
\ operation on. Reduce operations include the\n following:\
\ Max, Min, Avg, Sum.\n partition_by_columns: List of columns\
\ to partition by.\n time_column: Time column for the partition\
\ by operation's window function.\n time_ago: Number of time_ago_units\
\ to look back on our target_column, starting from time_column (inclusive).\n\
\ time_ago_units: Units of time_ago to look back on our target_column.\
\ Must be one of * 'DAY' * 'WEEK'\n output_column: Name of\
\ our output feature."
isOptional: true
parameterType: LIST
encryption_spec_key_name:
defaultValue: ''
description: Customer-managed encryption key.
isOptional: true
parameterType: STRING
feature_selection_algorithm:
defaultValue: AMI
description: "The algorithm of feature selection. One of \"AMI\", \"CMIM\"\
, \"JMIM\", \"MRMR\", default to be \"AMI\". The algorithms available\
\ are: AMI(Adjusted Mutual Information):\nReference: https://scikit-learn.org/stable/modules/generated/sklearn.metrics.adjusted_mutual_info_score.html\
\ Arrays are not yet supported in this algorithm. CMIM(Conditional Mutual\
\ Information Maximization): Reference paper: Mohamed Bennasar, Yulia\
\ Hicks, Rossitza Setchi, \u201CFeature selection using Joint Mutual Information\
\ Maximisation,\u201D Expert Systems with Applications, vol. 42, issue\
\ 22, 1 December 2015, Pages 8520-8532. JMIM(Joint Mutual Information\
\ Maximization\nReference:\n paper: Mohamed Bennasar, Yulia Hicks, Rossitza\
\ Setchi, \u201CFeature selection using Joint Mutual Information Maximisation,\u201D\
\ Expert Systems with Applications, vol. 42, issue 22, 1 December 2015,\
\ Pages 8520-8532. MRMR(MIQ Minimum-redundancy Maximum-relevance): Reference\
\ paper: Hanchuan Peng, Fuhui Long, and Chris Ding. \"Feature selection\
\ based on mutual information criteria of max-dependency, max-relevance,\
\ and min-redundancy.\" IEEE Transactions on pattern analysis and machine\
\ intelligence 27, no.\n 8: 1226-1238."
isOptional: true
parameterType: STRING
feature_selection_execution_engine:
defaultValue: dataflow
description: Execution engine to run feature selection, value can be dataflow,
bigquery.
isOptional: true
parameterType: STRING
forecasting_apply_windowing:
defaultValue: true
description: Whether to apply window strategy.
isOptional: true
parameterType: BOOLEAN
forecasting_available_at_forecast_columns:
defaultValue: []
description: Forecasting available at forecast columns.
isOptional: true
parameterType: LIST
forecasting_context_window:
defaultValue: -1.0
description: Forecasting context window.
isOptional: true
parameterType: NUMBER_INTEGER
forecasting_forecast_horizon:
defaultValue: -1.0
description: Forecasting horizon.
isOptional: true
parameterType: NUMBER_INTEGER
forecasting_holiday_regions:
defaultValue: []
description: 'The geographical region based on which the holiday effect
is applied in modeling by adding holiday categorical array feature that
include all holidays matching the date. This option only allowed when
data granularity is day. By default, holiday effect modeling is disabled.
To turn it on, specify the holiday region using this option.
Top level: * ''GLOBAL''
Second level: continental regions: * ''NA'': North America
* ''JAPAC'': Japan and Asia Pacific
* ''EMEA'': Europe, the Middle East and Africa
* ''LAC'': Latin America and the Caribbean
Third level: countries from ISO 3166-1 Country codes.
Valid regions: * ''GLOBAL'' * ''NA'' * ''JAPAC'' * ''EMEA'' * ''LAC''
* ''AE''
* ''AR'' * ''AT'' * ''AU'' * ''BE'' * ''BR'' * ''CA'' * ''CH'' * ''CL''
* ''CN'' * ''CO''
* ''CZ'' * ''DE'' * ''DK'' * ''DZ'' * ''EC'' * ''EE'' * ''EG'' * ''ES''
* ''FI'' * ''FR''
* ''GB'' * ''GR'' * ''HK'' * ''HU'' * ''ID'' * ''IE'' * ''IL'' * ''IN''
* ''IR'' * ''IT''
* ''JP'' * ''KR'' * ''LV'' * ''MA'' * ''MX'' * ''MY'' * ''NG'' * ''NL''
* ''NO'' * ''NZ''
* ''PE'' * ''PH'' * ''PK'' * ''PL'' * ''PT'' * ''RO'' * ''RS'' * ''RU''
* ''SA'' * ''SE''
* ''SG'' * ''SI'' * ''SK'' * ''TH'' * ''TR'' * ''TW'' * ''UA'' * ''US''
* ''VE'' * ''VN''
* ''ZA'''
isOptional: true
parameterType: LIST
forecasting_predefined_window_column:
defaultValue: ''
description: Forecasting predefined window column.
isOptional: true
parameterType: STRING
forecasting_time_column:
defaultValue: ''
description: Forecasting time column.
isOptional: true
parameterType: STRING
forecasting_time_series_attribute_columns:
defaultValue: []
description: Forecasting time series attribute columns.
isOptional: true
parameterType: LIST
forecasting_time_series_identifier_column:
description: '[Deprecated] A forecasting time series identifier column.
Raises an exception if used - use the "time_series_identifier_column"
field instead.'
isOptional: true
parameterType: STRING
forecasting_time_series_identifier_columns:
defaultValue: []
description: The list of forecasting time series identifier columns.
isOptional: true
parameterType: LIST
forecasting_unavailable_at_forecast_columns:
defaultValue: []
description: Forecasting unavailable at forecast columns.
isOptional: true
parameterType: LIST
forecasting_window_max_count:
defaultValue: -1.0
description: Forecasting window max count.
isOptional: true
parameterType: NUMBER_INTEGER
forecasting_window_stride_length:
defaultValue: -1.0
description: Forecasting window stride length.
isOptional: true
parameterType: NUMBER_INTEGER
group_columns:
isOptional: true
parameterType: LIST
group_temporal_total_weight:
defaultValue: 0.0
isOptional: true
parameterType: NUMBER_DOUBLE
group_total_weight:
defaultValue: 0.0
isOptional: true
parameterType: NUMBER_DOUBLE
legacy_transformations_path:
defaultValue: ''
isOptional: true
parameterType: STRING
location:
description: Location for the created GCP services.
parameterType: STRING
materialized_examples_format:
defaultValue: tfrecords_gzip
description: The format to use for the materialized examples. Should be
either 'tfrecords_gzip' (default) or 'parquet'.
isOptional: true
parameterType: STRING
max_selected_features:
defaultValue: 1000.0
description: Maximum number of features to select. If specified, the transform
config will be purged by only using the selected features that ranked
top in the feature ranking, which has the ranking value for all supported
features. If the number of input features is smaller than max_selected_features
specified, we will still run the feature selection process and generate
the feature ranking, no features will be excluded. The value will be
set to 1000 by default if run_feature_selection is enabled.
isOptional: true
parameterType: NUMBER_INTEGER
model_type:
description: 'Model type, which we wish to engineer features for. Can be
one of: neural_network, boosted_trees, l2l, seq2seq, tft, or tide. Defaults
to the empty value, `None`.'
isOptional: true
parameterType: STRING
multimodal_image_columns:
defaultValue: []
description: List of multimodal image columns. Defaults to an empty list.
isOptional: true
parameterType: LIST
multimodal_tabular_columns:
defaultValue: []
description: List of multimodal tabular columns. Defaults to an empty list
isOptional: true
parameterType: LIST
multimodal_text_columns:
defaultValue: []
description: List of multimodal text columns. Defaults to an empty list
isOptional: true
parameterType: LIST
multimodal_timeseries_columns:
defaultValue: []
description: List of multimodal timeseries columns. Defaults to an empty
list
isOptional: true
parameterType: LIST
predefined_split_key:
defaultValue: ''
description: Predefined split key.
isOptional: true
parameterType: STRING
prediction_type:
defaultValue: ''
description: Model prediction type. One of "classification", "regression",
"time_series".
isOptional: true
parameterType: STRING
project:
description: Project to run feature transform engine.
parameterType: STRING
root_dir:
description: The Cloud Storage location to store the output.
parameterType: STRING
run_distill:
defaultValue: false
description: (deprecated) Whether the distillation should be applied to
the training.
isOptional: true
parameterType: BOOLEAN
run_feature_selection:
defaultValue: false
description: Whether the feature selection should be applied to the dataset.
isOptional: true
parameterType: BOOLEAN
stats_gen_execution_engine:
defaultValue: dataflow
description: 'Execution engine to perform statistics generation. Can be
one of: "dataflow" (by default) or "bigquery". Using "bigquery" as the
execution engine is experimental.'
isOptional: true
parameterType: STRING
stratified_split_key:
defaultValue: ''
description: Stratified split key.
isOptional: true
parameterType: STRING
target_column:
defaultValue: ''
description: Target column of input data.
isOptional: true
parameterType: STRING
temporal_total_weight:
defaultValue: 0.0
isOptional: true
parameterType: NUMBER_DOUBLE
test_fraction:
defaultValue: -1.0
description: Fraction of input data for testing.
isOptional: true
parameterType: NUMBER_DOUBLE
tf_auto_transform_features:
defaultValue: {}
description: 'Dict mapping auto and/or type-resolutions to TF transform
features. FTE will automatically configure a set of built-in transformations
for each feature based on its data statistics. If users do not want auto
type resolution, but want the set of transformations for a given type
to be automatically generated, they may specify pre-resolved transformations
types. The following type hint dict keys are supported: * ''auto'' * ''categorical''
* ''numeric'' * ''text'' * ''timestamp'' Example: `{ "auto": ["feature1"],
"categorical": ["feature2", "feature3"], }`. Note that the target and
weight column may not be included as an auto transformation unless users
are running forecasting.'
isOptional: true
parameterType: STRUCT
tf_custom_transformation_definitions:
defaultValue: []
description: 'List of TensorFlow-based custom transformation definitions. Custom,
bring-your-own transform functions, where users can define and import
their own transform function and use it with FTE''s built-in transformations.
`[ { "transformation": "PlusOne", "module_path": "gs://bucket/custom_transform_fn.py",
"function_name": "plus_one_transform" }, { "transformation": "MultiplyTwo",
"module_path": "gs://bucket/custom_transform_fn.py", "function_name":
"multiply_two_transform" } ] Using custom transform function together
with FTE''s built-in transformations: .. code-block:: python [ { "transformation":
"CastToFloat", "input_columns": ["feature_1"], "output_columns": ["feature_1"]
},{ "transformation": "PlusOne", "input_columns": ["feature_1"] "output_columns":
["feature_1_plused_one"] },{ "transformation": "MultiplyTwo", "input_columns":
["feature_1"] "output_columns": ["feature_1_multiplied_two"] } ]'
isOptional: true
parameterType: LIST
tf_transform_execution_engine:
defaultValue: dataflow
description: 'Execution engine to perform row-level TF transformations.
Can be one of: "dataflow" (by default) or "bigquery". Using "bigquery"
as the execution engine is experimental and is for allowlisted customers
only. In addition, executing on "bigquery" only supports auto transformations
(i.e., specified by tf_auto_transform_features) and will raise an error
when tf_custom_transformation_definitions or tf_transformations_path is
set.'
isOptional: true
parameterType: STRING
tf_transformations_path:
defaultValue: ''
description: "Path to TensorFlow-based transformation configuration. Path\
\ to a JSON file used to specified FTE's TF transformation configurations.\
\ In the following, we provide some sample transform configurations to\
\ demonstrate FTE's capabilities. All transformations on input columns\
\ are explicitly specified with FTE's built-in transformations. Chaining\
\ of multiple transformations on a single column is also supported. For\
\ example: .. code-block:: python [ { \"transformation\": \"ZScale\"\
, \"input_columns\": [\"feature_1\"] }, { \"transformation\": \"ZScale\"\
, \"input_columns\": [\"feature_2\"] } ]`. Additional information about\
\ FTE's currently supported built-in\ntransformations:\nDatetime: Extracts\
\ datetime featues from a column containing timestamp strings.\n Example:\
\ .. code-block:: python { \"transformation\": \"Datetime\", \"input_columns\"\
: [\"feature_1\"], \"time_format\": \"%Y-%m-%d\" }\n Arguments:\n \
\ input_columns: A list with a single column to perform the datetime\
\ transformation on.\n output_columns: Names of output columns,\
\ one for each datetime_features element.\n time_format: Datetime\
\ format string. Time format is a combination of Date + Time Delimiter\
\ (optional) + Time (optional) directives. Valid date directives are as\
\ follows * '%Y-%m-%d' # 2018-11-30 * '%Y/%m/%d' # 2018/11/30 * '%y-%m-%d'\
\ # 18-11-30 * '%y/%m/%d' # 18/11/30 * '%m-%d-%Y' # 11-30-2018 * '%m/%d/%Y'\
\ # 11/30/2018 * '%m-%d-%y' # 11-30-18 * '%m/%d/%y' # 11/30/18 * '%d-%m-%Y'\
\ # 30-11-2018 * '%d/%m/%Y' # 30/11/2018 * '%d-%B-%Y' # 30-November-2018\
\ * '%d-%m-%y' # 30-11-18 * '%d/%m/%y' # 30/11/18 * '%d-%B-%y' # 30-November-18\
\ * '%d%m%Y' # 30112018 * '%m%d%Y' # 11302018 * '%Y%m%d' # 20181130\
\ Valid time delimiters are as follows * 'T' * ' ' Valid time directives\
\ are as follows * '%H:%M' # 23:59 * '%H:%M:%S' #\n \
\ 23:59:58 * '%H:%M:%S.%f' # 23:59:58[.123456] * '%H:%M:%S.%f%z'\
\ # 23:59:58[.123456]+0000 * '%H:%M:%S%z', # 23:59:58+0000\n \
\ datetime_features: List of datetime features to be extract. Each entry\
\ must be one of * 'YEAR' * 'MONTH' * 'DAY' * 'DAY_OF_WEEK' * 'DAY_OF_YEAR'\
\ * 'WEEK_OF_YEAR' * 'QUARTER' * 'HOUR' * 'MINUTE' * 'SECOND' Defaults\
\ to ['YEAR', 'MONTH', 'DAY', 'DAY_OF_WEEK', 'DAY_OF_YEAR', 'WEEK_OF_YEAR']\n\
Log: Performs the natural log on a numeric column.\n Example: .. code-block::\
\ python { \"transformation\": \"Log\", \"input_columns\": [\"feature_1\"\
] }\n Arguments:\n input_columns: A list with a single column\
\ to perform the log transformation on.\n output_columns: A list\
\ with a single output column name, corresponding to the output of our\
\ transformation.\nZScale: Performs Z-scale normalization on a numeric\
\ column.\n Example: .. code-block:: python { \"transformation\"\
: \"ZScale\", \"input_columns\": [\"feature_1\"] }\n Arguments:\n \
\ input_columns: A list with a single column to perform the z-scale\
\ transformation on.\n output_columns: A list with a single output\
\ column name, corresponding to the output of our transformation.\nVocabulary:\
\ Converts strings to integers, where each unique string gets a unique\
\ integer representation.\n Example: .. code-block:: python { \"\
transformation\": \"Vocabulary\", \"input_columns\": [\"feature_1\"] }\n\
\ Arguments:\n input_columns: A list with a single column to\
\ perform the vocabulary transformation on.\n output_columns: A\
\ list with a single output column name, corresponding to the output of\
\ our transformation.\n top_k: Number of the most frequent words\
\ in the vocabulary to use for generating dictionary lookup indices. If\
\ not specified, all words in the vocabulary will be used. Defaults to\
\ None.\n frequency_threshold: Limit the vocabulary only to words\
\ whose number of occurrences in the input exceeds frequency_threshold.\
\ If not specified, all words in the vocabulary will be included. If both\
\ top_k and frequency_threshold are specified, a word must satisfy both\
\ conditions to be included. Defaults to None.\nCategorical: Transforms\
\ categorical columns to integer columns.\n Example: .. code-block::\
\ python { \"transformation\": \"Categorical\", \"input_columns\": [\"\
feature_1\"], \"top_k\": 10 }\n Arguments:\n input_columns:\
\ A list with a single column to perform the categorical transformation\
\ on.\n output_columns: A list with a single output column name,\
\ corresponding to the output of our transformation.\n top_k: Number\
\ of the most frequent words in the vocabulary to use for generating dictionary\
\ lookup indices. If not specified, all words in the vocabulary will be\
\ used.\n frequency_threshold: Limit the vocabulary only to words\
\ whose number of occurrences in the input exceeds frequency_threshold.\
\ If not specified, all words in the vocabulary will be included. If both\
\ top_k and frequency_threshold are specified, a word must satisfy both\
\ conditions to be included.\nReduce: Given a column where each entry\
\ is a numeric array, reduces arrays according to our reduce_mode.\n \
\ Example: .. code-block:: python { \"transformation\": \"Reduce\"\
, \"input_columns\": [\"feature_1\"], \"reduce_mode\": \"MEAN\", \"output_columns\"\
: [\"feature_1_mean\"] }\n Arguments:\n input_columns: A list\
\ with a single column to perform the reduce transformation on.\n \
\ output_columns: A list with a single output column name, corresponding\
\ to the output of our transformation.\n reduce_mode: One of *\
\ 'MAX' * 'MIN' * 'MEAN' * 'LAST_K' Defaults to 'MEAN'.\n last_k:\
\ The number of last k elements when 'LAST_K' reduce mode is used. Defaults\
\ to 1.\nSplitString: Given a column of strings, splits strings into token\
\ arrays.\n Example: .. code-block:: python { \"transformation\"\
: \"SplitString\", \"input_columns\": [\"feature_1\"], \"separator\":\
\ \"$\" }\n Arguments:\n input_columns: A list with a single\
\ column to perform the split string transformation on.\n output_columns:\
\ A list with a single output column name, corresponding to the output\
\ of our transformation.\n separator: Separator to split input\
\ string into tokens. Defaults to ' '.\n missing_token: Missing\
\ token to use when no string is included. Defaults to ' _MISSING_ '.\n\
NGram: Given a column of strings, splits strings into token arrays where\
\ each token is an integer.\n Example: .. code-block:: python { \"\
transformation\": \"NGram\", \"input_columns\": [\"feature_1\"], \"min_ngram_size\"\
: 1, \"max_ngram_size\": 2, \"separator\": \" \" }\n Arguments:\n \
\ input_columns: A list with a single column to perform the n-gram\
\ transformation on.\n output_columns: A list with a single output\
\ column name, corresponding to the output of our transformation.\n \
\ min_ngram_size: Minimum n-gram size. Must be a positive number\
\ and <= max_ngram_size. Defaults to 1.\n max_ngram_size: Maximum\
\ n-gram size. Must be a positive number and >= min_ngram_size. Defaults\
\ to 2.\n top_k: Number of the most frequent words in the vocabulary\
\ to use for generating dictionary lookup indices. If not specified, all\
\ words in the vocabulary will be used. Defaults to None.\n frequency_threshold:\
\ Limit the dictionary's vocabulary only to words whose number of occurrences\
\ in the input exceeds frequency_threshold. If not specified, all words\
\ in the vocabulary will be included. If both top_k and frequency_threshold\
\ are specified, a word must satisfy both conditions to be included. Defaults\
\ to None.\n separator: Separator to split input string into tokens.\
\ Defaults to ' '.\n missing_token: Missing token to use when no\
\ string is included. Defaults to ' _MISSING_ '.\nClip: Given a numeric\
\ column, clips elements such that elements < min_value are assigned min_value,\
\ and elements > max_value are assigned max_value.\n Example: .. code-block::\
\ python { \"transformation\": \"Clip\", \"input_columns\": [\"col1\"\
], \"output_columns\": [\"col1_clipped\"], \"min_value\": 1., \"max_value\"\
: 10., }\n Arguments:\n input_columns: A list with a single\
\ column to perform the n-gram transformation on.\n output_columns:\
\ A list with a single output column name, corresponding to the output\
\ of our transformation.\n min_value: Number where all values below\
\ min_value are set to min_value. If no min_value is provided, min clipping\
\ will not occur. Defaults to None.\n max_value: Number where all\
\ values above max_value are set to max_value If no max_value is provided,\
\ max clipping will not occur. Defaults to None.\nMultiHotEncoding: Performs\
\ multi-hot encoding on a categorical array column.\n Example: ..\
\ code-block:: python { \"transformation\": \"MultiHotEncoding\", \"\
input_columns\": [\"col1\"], } The number of classes is determened by\
\ the largest number included in the input if it is numeric or the total\
\ number of unique values of the input if it is type str. If the input\
\ is has type str and an element contians separator tokens, the input\
\ will be split at separator indices, and the each element of the split\
\ list will be considered a seperate class. For example,\n Input: \
\ .. code-block:: python [ [\"foo bar\"], # Example 0 [\"foo\",\
\ \"bar\"], # Example 1 [\"foo\"], # Example 2 [\"bar\"], \
\ # Example 3 ] Output (with default separator=\" \"): .. code-block::\
\ python [ [1, 1], # Example 0 [1, 1], # Example 1 [1,\
\ 0], # Example 2 [0, 1], # Example 3 ]\n Arguments:\n\
\ input_columns: A list with a single column to perform the multi-hot-encoding\
\ on.\n output_columns: A list with a single output column name,\
\ corresponding to the output of our transformation.\n top_k: Number\
\ of the most frequent words in the vocabulary to use for generating dictionary\
\ lookup indices. If not specified, all words in the vocabulary will be\
\ used. Defaults to None.\n frequency_threshold: Limit the dictionary's\
\ vocabulary only to words whose number of occurrences in the input exceeds\
\ frequency_threshold. If not specified, all words in the vocabulary will\
\ be included. If both top_k and frequency_threshold are specified, a\
\ word must satisfy both conditions to be included. Defaults to None.\n\
\ separator: Separator to split input string into tokens. Defaults\
\ to ' '.\nMaxAbsScale: Performs maximum absolute scaling on a numeric\
\ column.\n Example: .. code-block:: python { \"transformation\"\
: \"MaxAbsScale\", \"input_columns\": [\"col1\"], \"output_columns\":\
\ [\"col1_max_abs_scaled\"] }\n Arguments:\n input_columns:\
\ A list with a single column to perform max-abs-scale on.\n output_columns:\
\ A list with a single output column name, corresponding to the output\
\ of our transformation.\nCustom: Transformations defined in tf_custom_transformation_definitions\
\ are included here in the TensorFlow-based transformation configuration.\
\ For example, given the following tf_custom_transformation_definitions:\
\ .. code-block:: python [ { \"transformation\": \"PlusX\", \"module_path\"\
: \"gs://bucket/custom_transform_fn.py\", \"function_name\": \"plus_one_transform\"\
\ } ] We can include the following transformation: .. code-block:: python\
\ { \"transformation\": \"PlusX\", \"input_columns\": [\"col1\"], \"\
output_columns\": [\"col1_max_abs_scaled\"] \"x\": 5 } Note that input_columns\
\ must still be included in our arguments and output_columns is optional.\
\ All other arguments are those defined in custom_transform_fn.py, which\
\ includes `\"x\"` in this case. See tf_custom_transformation_definitions\
\ above. legacy_transformations_path (Optional[str]) Deprecated. Prefer\
\ tf_auto_transform_features. Path to a GCS file containing JSON string\
\ for legacy style transformations. Note that legacy_transformations_path\
\ and tf_auto_transform_features cannot both be specified."
isOptional: true
parameterType: STRING
timestamp_split_key:
defaultValue: ''
description: Timestamp split key.
isOptional: true
parameterType: STRING
training_fraction:
defaultValue: -1.0
description: Fraction of input data for training.
isOptional: true
parameterType: NUMBER_DOUBLE
validation_fraction:
defaultValue: -1.0
description: Fraction of input data for validation.
isOptional: true
parameterType: NUMBER_DOUBLE
weight_column:
defaultValue: ''
description: Weight column of input data.
isOptional: true
parameterType: STRING
outputDefinitions:
artifacts:
dataset_stats:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
description: The stats of the dataset.
feature_ranking:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
description: The ranking of features, all features supported in the dataset
will be included. For "AMI" algorithm, array features won't be available
in the ranking as arrays are not supported yet.
instance_schema:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
materialized_data:
artifactType:
schemaTitle: system.Dataset
schemaVersion: 0.0.1
description: The materialized dataset.
training_schema:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
transform_output:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
description: The transform output artifact.
parameters:
bigquery_downsampled_test_split_uri:
description: BigQuery URI for the downsampled test split to pass to the
batch prediction component during batch explain.
parameterType: STRING
bigquery_test_split_uri:
description: BigQuery URI for the test split to pass to the batch prediction
component during evaluation.
parameterType: STRING
bigquery_train_split_uri:
description: BigQuery URI for the train split to pass to the batch prediction
component during distillation.
parameterType: STRING
bigquery_validation_split_uri:
description: BigQuery URI for the validation split to pass to the batch
prediction component during distillation.
parameterType: STRING
gcp_resources:
description: GCP resources created by this component. For more details,
see https://github.com/kubeflow/pipelines/blob/master/components/google-cloud/google_cloud_pipeline_components/proto/README.md.
parameterType: STRING
split_example_counts:
description: JSON string of data split example counts for train, validate,
and test splits.
parameterType: STRING
comp-for-loop-3:
dag:
tasks:
build-job-configuration-query-2:
cachingOptions:
enableCache: true
componentRef:
name: comp-build-job-configuration-query-2
dependentTasks:
- get-window-query-priority
inputs:
parameters:
pipelinechannel--get-window-query-priority-Output:
taskOutputParameter:
outputParameterKey: Output
producerTask: get-window-query-priority
priority:
runtimeValue:
constant: '{{$.inputs.parameters[''pipelinechannel--get-window-query-priority-Output'']}}'
taskInfo:
name: build-job-configuration-query-2
build-job-configuration-query-3:
cachingOptions:
enableCache: true
componentRef:
name: comp-build-job-configuration-query-3
dependentTasks:
- get-window-query-priority
inputs:
parameters:
dataset_id:
runtimeValue:
constant: '{{$.inputs.parameters[''pipelinechannel--table-to-uri-dataset_id'']}}'
pipelinechannel--get-window-query-priority-Output:
taskOutputParameter:
outputParameterKey: Output
producerTask: get-window-query-priority
pipelinechannel--table-to-uri-dataset_id:
componentInputParameter: pipelinechannel--table-to-uri-dataset_id
pipelinechannel--table-to-uri-project_id:
componentInputParameter: pipelinechannel--table-to-uri-project_id
pipelinechannel--table-to-uri-table_id:
componentInputParameter: pipelinechannel--table-to-uri-table_id
priority:
runtimeValue:
constant: '{{$.inputs.parameters[''pipelinechannel--get-window-query-priority-Output'']}}'
project_id:
runtimeValue:
constant: '{{$.inputs.parameters[''pipelinechannel--table-to-uri-project_id'']}}'
table_id:
runtimeValue:
constant: '{{$.inputs.parameters[''pipelinechannel--table-to-uri-table_id'']}}'
write_disposition:
runtimeValue:
constant: WRITE_APPEND
taskInfo:
name: build-job-configuration-query-3
build-job-configuration-query-4:
cachingOptions:
enableCache: true
componentRef:
name: comp-build-job-configuration-query-4
dependentTasks:
- get-window-query-priority
inputs:
parameters:
dataset_id:
runtimeValue:
constant: '{{$.inputs.parameters[''pipelinechannel--table-to-uri-2-dataset_id'']}}'
pipelinechannel--get-window-query-priority-Output:
taskOutputParameter:
outputParameterKey: Output
producerTask: get-window-query-priority
pipelinechannel--table-to-uri-2-dataset_id:
componentInputParameter: pipelinechannel--table-to-uri-2-dataset_id
pipelinechannel--table-to-uri-2-project_id:
componentInputParameter: pipelinechannel--table-to-uri-2-project_id
pipelinechannel--table-to-uri-2-table_id:
componentInputParameter: pipelinechannel--table-to-uri-2-table_id
priority:
runtimeValue:
constant: '{{$.inputs.parameters[''pipelinechannel--get-window-query-priority-Output'']}}'
project_id:
runtimeValue:
constant: '{{$.inputs.parameters[''pipelinechannel--table-to-uri-2-project_id'']}}'
table_id:
runtimeValue:
constant: '{{$.inputs.parameters[''pipelinechannel--table-to-uri-2-table_id'']}}'
write_disposition:
runtimeValue:
constant: WRITE_APPEND
taskInfo:
name: build-job-configuration-query-4
build-serialized-query-parameters-2:
cachingOptions:
enableCache: true
componentRef:
name: comp-build-serialized-query-parameters-2
inputs:
parameters:
data_granularity_unit:
componentInputParameter: pipelinechannel--data_granularity_unit
forecast_horizon:
componentInputParameter: pipelinechannel--forecast_horizon
max_order:
componentInputParameter: pipelinechannel--max_order
splits:
runtimeValue:
constant:
- TRAIN
- VALIDATE
- TEST
window:
componentInputParameter: pipelinechannel--bigquery-list-rows-Output-loop-item
taskInfo:
name: build-serialized-query-parameters-2
get-value:
cachingOptions:
enableCache: true
componentRef:
name: comp-get-value
inputs:
parameters:
d:
componentInputParameter: pipelinechannel--bigquery-list-rows-Output-loop-item
key:
runtimeValue:
constant: window_number
taskInfo:
name: get_window_number
get-window-query-priority:
cachingOptions:
enableCache: true
componentRef:
name: comp-get-window-query-priority
inputs:
parameters:
max_interactive:
runtimeValue:
constant: 50.0
window:
componentInputParameter: pipelinechannel--bigquery-list-rows-Output-loop-item
taskInfo:
name: get-window-query-priority
query-with-retry:
cachingOptions:
enableCache: true
componentRef:
name: comp-query-with-retry
dependentTasks:
- build-job-configuration-query-2
- build-serialized-query-parameters-2
- get-value
inputs:
parameters:
destination_uri:
runtimeValue:
constant: '{{$.inputs.parameters[''pipelinechannel--bigquery-create-dataset-project_id'']}}.{{$.inputs.parameters[''pipelinechannel--bigquery-create-dataset-dataset_id'']}}.model_{{$.inputs.parameters[''pipelinechannel--get-value-Output'']}}'
job_configuration_query:
taskOutputParameter:
outputParameterKey: Output
producerTask: build-job-configuration-query-2
location:
componentInputParameter: pipelinechannel--get-table-location-Output
pipelinechannel--bigquery-create-dataset-2-dataset_id:
componentInputParameter: pipelinechannel--bigquery-create-dataset-2-dataset_id
pipelinechannel--bigquery-create-dataset-2-project_id:
componentInputParameter: pipelinechannel--bigquery-create-dataset-2-project_id
pipelinechannel--bigquery-create-dataset-dataset_id:
componentInputParameter: pipelinechannel--bigquery-create-dataset-dataset_id
pipelinechannel--bigquery-create-dataset-project_id:
componentInputParameter: pipelinechannel--bigquery-create-dataset-project_id
pipelinechannel--get-fte-suffix-Output:
componentInputParameter: pipelinechannel--get-fte-suffix-Output
pipelinechannel--get-value-Output:
taskOutputParameter:
outputParameterKey: Output
producerTask: get-value
pipelinechannel--target_column:
componentInputParameter: pipelinechannel--target_column
pipelinechannel--time_column:
componentInputParameter: pipelinechannel--time_column
pipelinechannel--time_series_identifier_column:
componentInputParameter: pipelinechannel--time_series_identifier_column
project:
componentInputParameter: pipelinechannel--project
query:
runtimeValue:
constant: "\n CREATE MODEL `{{$.inputs.parameters['pipelinechannel--bigquery-create-dataset-project_id']}}.{{$.inputs.parameters['pipelinechannel--bigquery-create-dataset-dataset_id']}}.model_{{$.inputs.parameters['pipelinechannel--get-value-Output']}}`\n\
\ OPTIONS (\n model_type = 'ARIMA_PLUS',\n \
\ time_series_timestamp_col = '{{$.inputs.parameters['pipelinechannel--time_column']}}',\n\
\ time_series_id_col = '{{$.inputs.parameters['pipelinechannel--time_series_identifier_column']}}',\n\
\ time_series_data_col = '{{$.inputs.parameters['pipelinechannel--target_column']}}',\n\
\ horizon = @forecast_horizon,\n auto_arima\
\ = True,\n auto_arima_max_order = @max_order,\n \
\ data_frequency = @data_granularity_unit,\n holiday_region\
\ = 'GLOBAL',\n clean_spikes_and_dips = True,\n \
\ adjust_step_changes = True,\n decompose_time_series\
\ = True\n ) AS\n SELECT\n {{$.inputs.parameters['pipelinechannel--time_series_identifier_column']}},\n\
\ {{$.inputs.parameters['pipelinechannel--time_column']}},\n\
\ {{$.inputs.parameters['pipelinechannel--target_column']}},\n\
\ FROM `{{$.inputs.parameters['pipelinechannel--bigquery-create-dataset-2-project_id']}}.{{$.inputs.parameters['pipelinechannel--bigquery-create-dataset-2-dataset_id']}}.fte_time_series_output_{{$.inputs.parameters['pipelinechannel--get-fte-suffix-Output']}}`\n\
\ WHERE\n UPPER(split__{{$.inputs.parameters['pipelinechannel--get-fte-suffix-Output']}})\
\ IN UNNEST(@splits)\n AND TIMESTAMP({{$.inputs.parameters['pipelinechannel--time_column']}})\
\ < @start_time\n "
query_parameters:
taskOutputParameter:
outputParameterKey: Output
producerTask: build-serialized-query-parameters-2
taskInfo:
name: create-eval-model
query-with-retry-2:
cachingOptions:
enableCache: true
componentRef:
name: comp-query-with-retry-2
dependentTasks:
- build-job-configuration-query-3
- build-serialized-query-parameters-2
- query-with-retry
inputs:
parameters:
job_configuration_query:
taskOutputParameter:
outputParameterKey: Output
producerTask: build-job-configuration-query-3
location:
componentInputParameter: pipelinechannel--get-table-location-Output
pipelinechannel--bigquery-create-dataset-2-dataset_id:
componentInputParameter: pipelinechannel--bigquery-create-dataset-2-dataset_id
pipelinechannel--bigquery-create-dataset-2-project_id:
componentInputParameter: pipelinechannel--bigquery-create-dataset-2-project_id
pipelinechannel--forecast_horizon:
componentInputParameter: pipelinechannel--forecast_horizon
pipelinechannel--get-fte-suffix-Output:
componentInputParameter: pipelinechannel--get-fte-suffix-Output
pipelinechannel--query-with-retry-Output:
taskOutputParameter:
outputParameterKey: Output
producerTask: query-with-retry
pipelinechannel--time_column:
componentInputParameter: pipelinechannel--time_column
project:
componentInputParameter: pipelinechannel--project
query:
runtimeValue:
constant: "\n SELECT\n @start_time AS predicted_on_{{$.inputs.parameters['pipelinechannel--time_column']}},\n\
\ AVG(mean_absolute_error) AS MAE,\n AVG(mean_squared_error)\
\ AS MSE,\n AVG(mean_absolute_percentage_error) AS MAPE,\n\
\ @prediction_count AS prediction_count,\n FROM ML.EVALUATE(\n\
\ MODEL `{{$.inputs.parameters['pipelinechannel--query-with-retry-Output']}}`,\n\
\ TABLE `{{$.inputs.parameters['pipelinechannel--bigquery-create-dataset-2-project_id']}}.{{$.inputs.parameters['pipelinechannel--bigquery-create-dataset-2-dataset_id']}}.fte_time_series_output_{{$.inputs.parameters['pipelinechannel--get-fte-suffix-Output']}}`,\n\
\ STRUCT(True AS perform_aggregation, {{$.inputs.parameters['pipelinechannel--forecast_horizon']}}\
\ as horizon))\n "
query_parameters:
taskOutputParameter:
outputParameterKey: Output
producerTask: build-serialized-query-parameters-2
taskInfo:
name: append-evaluation-metrics
query-with-retry-3:
cachingOptions:
enableCache: true
componentRef:
name: comp-query-with-retry-3
dependentTasks:
- build-job-configuration-query-4
- build-serialized-query-parameters-2
- query-with-retry
inputs:
parameters:
job_configuration_query:
taskOutputParameter:
outputParameterKey: Output
producerTask: build-job-configuration-query-4
location:
componentInputParameter: pipelinechannel--get-table-location-Output
pipelinechannel--bigquery-create-dataset-2-dataset_id:
componentInputParameter: pipelinechannel--bigquery-create-dataset-2-dataset_id
pipelinechannel--bigquery-create-dataset-2-project_id:
componentInputParameter: pipelinechannel--bigquery-create-dataset-2-project_id
pipelinechannel--forecast_horizon:
componentInputParameter: pipelinechannel--forecast_horizon
pipelinechannel--get-fte-suffix-Output:
componentInputParameter: pipelinechannel--get-fte-suffix-Output
pipelinechannel--query-with-retry-Output:
taskOutputParameter:
outputParameterKey: Output
producerTask: query-with-retry
pipelinechannel--target_column:
componentInputParameter: pipelinechannel--target_column
pipelinechannel--time_column:
componentInputParameter: pipelinechannel--time_column
pipelinechannel--time_series_identifier_column:
componentInputParameter: pipelinechannel--time_series_identifier_column
project:
componentInputParameter: pipelinechannel--project
query:
runtimeValue:
constant: "\n SELECT\n CAST(actual.{{$.inputs.parameters['pipelinechannel--time_series_identifier_column']}}\
\ AS STRING)\n AS {{$.inputs.parameters['pipelinechannel--time_series_identifier_column']}},\n\
\ TIMESTAMP(actual.{{$.inputs.parameters['pipelinechannel--time_column']}})\
\ AS {{$.inputs.parameters['pipelinechannel--time_column']}},\n\
\ @start_time AS predicted_on_{{$.inputs.parameters['pipelinechannel--time_column']}},\n\
\ CAST(actual.{{$.inputs.parameters['pipelinechannel--target_column']}}\
\ AS FLOAT64) AS {{$.inputs.parameters['pipelinechannel--target_column']}},\n\
\ STRUCT(pred.forecast_value AS value) AS predicted_{{$.inputs.parameters['pipelinechannel--target_column']}},\n\
\ FROM\n ML.FORECAST(\n MODEL `{{$.inputs.parameters['pipelinechannel--query-with-retry-Output']}}`,\n\
\ STRUCT({{$.inputs.parameters['pipelinechannel--forecast_horizon']}}\
\ AS horizon)) pred\n JOIN `{{$.inputs.parameters['pipelinechannel--bigquery-create-dataset-2-project_id']}}.{{$.inputs.parameters['pipelinechannel--bigquery-create-dataset-2-dataset_id']}}.fte_time_series_output_{{$.inputs.parameters['pipelinechannel--get-fte-suffix-Output']}}`\
\ actual\n ON\n pred.forecast_timestamp = TIMESTAMP(actual.{{$.inputs.parameters['pipelinechannel--time_column']}})\n\
\ AND pred.{{$.inputs.parameters['pipelinechannel--time_series_identifier_column']}}\n\
\ = actual.{{$.inputs.parameters['pipelinechannel--time_series_identifier_column']}}\n\
\ "
query_parameters:
taskOutputParameter:
outputParameterKey: Output
producerTask: build-serialized-query-parameters-2
taskInfo:
name: append-evaluated-examples
inputDefinitions:
parameters:
pipelinechannel--bigquery-create-dataset-2-dataset_id:
parameterType: STRING
pipelinechannel--bigquery-create-dataset-2-project_id:
parameterType: STRING
pipelinechannel--bigquery-create-dataset-dataset_id:
parameterType: STRING
pipelinechannel--bigquery-create-dataset-project_id:
parameterType: STRING
pipelinechannel--bigquery-list-rows-Output:
parameterType: LIST
pipelinechannel--bigquery-list-rows-Output-loop-item:
parameterType: STRUCT
pipelinechannel--data_granularity_unit:
parameterType: STRING
pipelinechannel--forecast_horizon:
parameterType: NUMBER_INTEGER
pipelinechannel--get-fte-suffix-Output:
parameterType: STRING
pipelinechannel--get-table-location-Output:
parameterType: STRING
pipelinechannel--max_order:
parameterType: NUMBER_INTEGER
pipelinechannel--project:
parameterType: STRING
pipelinechannel--run_evaluation:
parameterType: BOOLEAN
pipelinechannel--table-to-uri-2-dataset_id:
parameterType: STRING
pipelinechannel--table-to-uri-2-project_id:
parameterType: STRING
pipelinechannel--table-to-uri-2-table_id:
parameterType: STRING
pipelinechannel--table-to-uri-dataset_id:
parameterType: STRING
pipelinechannel--table-to-uri-project_id:
parameterType: STRING
pipelinechannel--table-to-uri-table_id:
parameterType: STRING
pipelinechannel--target_column:
parameterType: STRING
pipelinechannel--time_column:
parameterType: STRING
pipelinechannel--time_series_identifier_column:
parameterType: STRING
comp-get-fte-suffix:
executorLabel: exec-get-fte-suffix
inputDefinitions:
parameters:
bigquery_staging_full_dataset_id:
parameterType: STRING
fte_table:
parameterType: STRING
location:
parameterType: STRING
project:
parameterType: STRING
outputDefinitions:
parameters:
Output:
parameterType: STRING
comp-get-table-location:
executorLabel: exec-get-table-location
inputDefinitions:
parameters:
default_location:
defaultValue: ''
description: Location to return if no table was given.
isOptional: true
parameterType: STRING
project:
description: The GCP project.
parameterType: STRING
table:
description: The BigQuery table to get a location for.
parameterType: STRING
outputDefinitions:
parameters:
Output:
parameterType: STRING
comp-get-value:
executorLabel: exec-get-value
inputDefinitions:
parameters:
d:
parameterType: STRUCT
key:
parameterType: STRING
outputDefinitions:
parameters:
Output:
parameterType: STRING
comp-get-window-query-priority:
executorLabel: exec-get-window-query-priority
inputDefinitions:
parameters:
max_interactive:
defaultValue: 100.0
isOptional: true
parameterType: NUMBER_INTEGER
window:
parameterType: STRUCT
outputDefinitions:
parameters:
Output:
parameterType: STRING
comp-maybe-replace-with-default:
executorLabel: exec-maybe-replace-with-default
inputDefinitions:
parameters:
default:
defaultValue: ''
isOptional: true
parameterType: STRING
value:
parameterType: STRING
outputDefinitions:
parameters:
Output:
parameterType: STRING
comp-query-with-retry:
executorLabel: exec-query-with-retry
inputDefinitions:
parameters:
destination_uri:
defaultValue: ''
description: Optional BigQuery URI to output if the query succeeds.
isOptional: true
parameterType: STRING
job_configuration_query:
description: Additional query job configurations.
isOptional: true
parameterType: STRUCT
location:
description: The GCP region.
parameterType: STRING
max_retry_count:
defaultValue: 5.0
description: Maximum number of times to retry the query.
isOptional: true
parameterType: NUMBER_INTEGER
project:
description: The GCP project.
parameterType: STRING
query:
description: The query to run.
parameterType: STRING
query_parameters:
description: A list of query parameters.
isOptional: true
parameterType: LIST
retry_wait_seconds:
defaultValue: 10.0
description: 'Approximate initial number of seconds to wait before
making another query attempt with exponential backoff.'
isOptional: true
parameterType: NUMBER_INTEGER
outputDefinitions:
parameters:
Output:
parameterType: STRING
comp-query-with-retry-2:
executorLabel: exec-query-with-retry-2
inputDefinitions:
parameters:
destination_uri:
defaultValue: ''
description: Optional BigQuery URI to output if the query succeeds.
isOptional: true
parameterType: STRING
job_configuration_query:
description: Additional query job configurations.
isOptional: true
parameterType: STRUCT
location:
description: The GCP region.
parameterType: STRING
max_retry_count:
defaultValue: 5.0
description: Maximum number of times to retry the query.
isOptional: true
parameterType: NUMBER_INTEGER
project:
description: The GCP project.
parameterType: STRING
query:
description: The query to run.
parameterType: STRING
query_parameters:
description: A list of query parameters.
isOptional: true
parameterType: LIST
retry_wait_seconds:
defaultValue: 10.0
description: 'Approximate initial number of seconds to wait before
making another query attempt with exponential backoff.'
isOptional: true
parameterType: NUMBER_INTEGER
outputDefinitions:
parameters:
Output:
parameterType: STRING
comp-query-with-retry-3:
executorLabel: exec-query-with-retry-3
inputDefinitions:
parameters:
destination_uri:
defaultValue: ''
description: Optional BigQuery URI to output if the query succeeds.
isOptional: true
parameterType: STRING
job_configuration_query:
description: Additional query job configurations.
isOptional: true
parameterType: STRUCT
location:
description: The GCP region.
parameterType: STRING
max_retry_count:
defaultValue: 5.0
description: Maximum number of times to retry the query.
isOptional: true
parameterType: NUMBER_INTEGER
project:
description: The GCP project.
parameterType: STRING
query:
description: The query to run.
parameterType: STRING
query_parameters:
description: A list of query parameters.
isOptional: true
parameterType: LIST
retry_wait_seconds:
defaultValue: 10.0
description: 'Approximate initial number of seconds to wait before
making another query attempt with exponential backoff.'
isOptional: true
parameterType: NUMBER_INTEGER
outputDefinitions:
parameters:
Output:
parameterType: STRING
comp-table-to-uri:
executorLabel: exec-table-to-uri
inputDefinitions:
artifacts:
table:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
parameters:
use_bq_prefix:
defaultValue: false
isOptional: true
parameterType: BOOLEAN
outputDefinitions:
parameters:
dataset_id:
parameterType: STRING
project_id:
parameterType: STRING
table_id:
parameterType: STRING
uri:
parameterType: STRING
comp-table-to-uri-2:
executorLabel: exec-table-to-uri-2
inputDefinitions:
artifacts:
table:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
parameters:
use_bq_prefix:
defaultValue: false
isOptional: true
parameterType: BOOLEAN
outputDefinitions:
parameters:
dataset_id:
parameterType: STRING
project_id:
parameterType: STRING
table_id:
parameterType: STRING
uri:
parameterType: STRING
comp-validate-inputs:
executorLabel: exec-validate-inputs
inputDefinitions:
parameters:
bigquery_destination_uri:
isOptional: true
parameterType: STRING
data_granularity_unit:
isOptional: true
parameterType: STRING
data_source_bigquery_table_path:
isOptional: true
parameterType: STRING
data_source_csv_filenames:
isOptional: true
parameterType: STRING
optimization_objective:
isOptional: true
parameterType: STRING
predefined_split_key:
isOptional: true
parameterType: STRING
source_model_uri:
isOptional: true
parameterType: STRING
target_column:
isOptional: true
parameterType: STRING
test_fraction:
isOptional: true
parameterType: NUMBER_DOUBLE
time_column:
isOptional: true
parameterType: STRING
time_series_identifier_column:
isOptional: true
parameterType: STRING
timestamp_split_key:
isOptional: true
parameterType: STRING
training_fraction:
isOptional: true
parameterType: NUMBER_DOUBLE
validation_fraction:
isOptional: true
parameterType: NUMBER_DOUBLE
window_column:
isOptional: true
parameterType: STRING
window_max_count:
isOptional: true
parameterType: NUMBER_INTEGER
window_stride_length:
isOptional: true
parameterType: NUMBER_INTEGER
comp-wrapped-in-list:
executorLabel: exec-wrapped-in-list
inputDefinitions:
parameters:
value:
parameterType: STRING
outputDefinitions:
parameters:
Output:
parameterType: LIST
deploymentSpec:
executors:
exec-bigquery-create-dataset:
container:
args:
- --executor_input
- '{{$}}'
- --function_to_execute
- bigquery_create_dataset
command:
- sh
- -c
- "\nif ! [ -x \"$(command -v pip)\" ]; then\n python3 -m ensurepip ||\
\ python3 -m ensurepip --user || apt-get install python3-pip\nfi\n\nPIP_DISABLE_PIP_VERSION_CHECK=1\
\ python3 -m pip install --quiet --no-warn-script-location 'kfp==2.0.0-rc.2'\
\ && \"$0\" \"$@\"\n"
- sh
- -ec
- 'program_path=$(mktemp -d)
printf "%s" "$0" > "$program_path/ephemeral_component.py"
python3 -m kfp.components.executor_main --component_module_path "$program_path/ephemeral_component.py" "$@"
'
- "\nimport kfp\nfrom kfp import dsl\nfrom kfp.dsl import *\nfrom typing import\
\ *\n\ndef bigquery_create_dataset(\n project: str,\n location: str,\n\
\ dataset: str,\n exists_ok: bool = False,\n) -> NamedTuple('Outputs',\
\ [('project_id', str), ('dataset_id', str)]):\n \"\"\"Creates a BigQuery\
\ dataset.\"\"\"\n # pylint: disable=g-import-not-at-top,import-outside-toplevel,redefined-outer-name,reimported\n\
\ import collections\n\n from google.cloud import bigquery\n # pylint:\
\ enable=g-import-not-at-top,import-outside-toplevel,redefined-outer-name,reimported\n\
\n client = bigquery.Client(project=project, location=location)\n ref\
\ = client.create_dataset(dataset=dataset, exists_ok=exists_ok)\n return\
\ collections.namedtuple('Outputs', ['project_id', 'dataset_id'])(\n \
\ ref.project, ref.dataset_id)\n\n"
image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20240808_0625
exec-bigquery-create-dataset-2:
container:
args:
- --executor_input
- '{{$}}'
- --function_to_execute
- bigquery_create_dataset
command:
- sh
- -c
- "\nif ! [ -x \"$(command -v pip)\" ]; then\n python3 -m ensurepip ||\
\ python3 -m ensurepip --user || apt-get install python3-pip\nfi\n\nPIP_DISABLE_PIP_VERSION_CHECK=1\
\ python3 -m pip install --quiet --no-warn-script-location 'kfp==2.0.0-rc.2'\
\ && \"$0\" \"$@\"\n"
- sh
- -ec
- 'program_path=$(mktemp -d)
printf "%s" "$0" > "$program_path/ephemeral_component.py"
python3 -m kfp.components.executor_main --component_module_path "$program_path/ephemeral_component.py" "$@"
'
- "\nimport kfp\nfrom kfp import dsl\nfrom kfp.dsl import *\nfrom typing import\
\ *\n\ndef bigquery_create_dataset(\n project: str,\n location: str,\n\
\ dataset: str,\n exists_ok: bool = False,\n) -> NamedTuple('Outputs',\
\ [('project_id', str), ('dataset_id', str)]):\n \"\"\"Creates a BigQuery\
\ dataset.\"\"\"\n # pylint: disable=g-import-not-at-top,import-outside-toplevel,redefined-outer-name,reimported\n\
\ import collections\n\n from google.cloud import bigquery\n # pylint:\
\ enable=g-import-not-at-top,import-outside-toplevel,redefined-outer-name,reimported\n\
\n client = bigquery.Client(project=project, location=location)\n ref\
\ = client.create_dataset(dataset=dataset, exists_ok=exists_ok)\n return\
\ collections.namedtuple('Outputs', ['project_id', 'dataset_id'])(\n \
\ ref.project, ref.dataset_id)\n\n"
image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20240808_0625
exec-bigquery-create-model-job:
container:
args:
- --type
- BigqueryCreateModelJob
- --project
- '{{$.inputs.parameters[''project'']}}'
- --location
- '{{$.inputs.parameters[''location'']}}'
- --payload
- '{"Concat": ["{", "\"configuration\": {", "\"query\": ", "{{$.inputs.parameters[''job_configuration_query'']}}",
", \"labels\": ", "{{$.inputs.parameters[''labels'']}}", "}", "}"]}'
- --job_configuration_query_override
- '{"Concat": ["{", "\"query\": \"", "{{$.inputs.parameters[''query'']}}",
"\"", ", \"query_parameters\": ", "{{$.inputs.parameters[''query_parameters'']}}",
"}"]}'
- --gcp_resources
- '{{$.outputs.parameters[''gcp_resources''].output_file}}'
- --executor_input
- '{{$}}'
command:
- python3
- -u
- -m
- google_cloud_pipeline_components.container.v1.bigquery.create_model.launcher
image: gcr.io/ml-pipeline/google-cloud-pipeline-components:2.3.1
exec-bigquery-delete-dataset-with-prefix:
container:
args:
- --executor_input
- '{{$}}'
- --function_to_execute
- bigquery_delete_dataset_with_prefix
command:
- sh
- -c
- "\nif ! [ -x \"$(command -v pip)\" ]; then\n python3 -m ensurepip ||\
\ python3 -m ensurepip --user || apt-get install python3-pip\nfi\n\nPIP_DISABLE_PIP_VERSION_CHECK=1\
\ python3 -m pip install --quiet --no-warn-script-location 'kfp==2.0.0-rc.2'\
\ && \"$0\" \"$@\"\n"
- sh
- -ec
- 'program_path=$(mktemp -d)
printf "%s" "$0" > "$program_path/ephemeral_component.py"
python3 -m kfp.components.executor_main --component_module_path "$program_path/ephemeral_component.py" "$@"
'
- "\nimport kfp\nfrom kfp import dsl\nfrom kfp.dsl import *\nfrom typing import\
\ *\n\ndef bigquery_delete_dataset_with_prefix(\n project: str,\n \
\ dataset_prefix: str,\n delete_contents: bool = False,\n) -> None:\n\
\ \"\"\"Deletes all BigQuery datasets matching the given prefix.\"\"\"\n\
\ # pylint: disable=g-import-not-at-top,import-outside-toplevel,redefined-outer-name,reimported\n\
\ from google.cloud import bigquery\n # pylint: enable=g-import-not-at-top,import-outside-toplevel,redefined-outer-name,reimported\n\
\n client = bigquery.Client(project=project)\n for dataset in client.list_datasets(project=project):\n\
\ if dataset.dataset_id.startswith(dataset_prefix):\n client.delete_dataset(\n\
\ dataset=dataset.dataset_id,\n delete_contents=delete_contents)\n\
\n"
image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20240808_0625
exec-bigquery-list-rows:
container:
args:
- --executor_input
- '{{$}}'
- --function_to_execute
- bigquery_list_rows
command:
- sh
- -c
- "\nif ! [ -x \"$(command -v pip)\" ]; then\n python3 -m ensurepip ||\
\ python3 -m ensurepip --user || apt-get install python3-pip\nfi\n\nPIP_DISABLE_PIP_VERSION_CHECK=1\
\ python3 -m pip install --quiet --no-warn-script-location 'kfp==2.0.0-rc.2'\
\ && \"$0\" \"$@\"\n"
- sh
- -ec
- 'program_path=$(mktemp -d)
printf "%s" "$0" > "$program_path/ephemeral_component.py"
python3 -m kfp.components.executor_main --component_module_path "$program_path/ephemeral_component.py" "$@"
'
- "\nimport kfp\nfrom kfp import dsl\nfrom kfp.dsl import *\nfrom typing import\
\ *\n\ndef bigquery_list_rows(\n project: str,\n location: str,\n\
\ table: dsl.Input[dsl.Artifact],\n) -> List[Dict[str, str]]:\n \"\"\
\"Lists the rows of the given BigQuery table.\n\n Args:\n project: The\
\ GCP project.\n location: The GCP region.\n table: A google.BQTable\
\ artifact.\n\n Returns:\n A list of dicts representing BigQuery rows.\
\ Rows are keyed by column, and\n all values are stored as strings.\n\
\ \"\"\"\n # pylint: disable=g-import-not-at-top,import-outside-toplevel,redefined-outer-name,reimported\n\
\ from google.cloud import bigquery\n # pylint: enable=g-import-not-at-top,import-outside-toplevel,redefined-outer-name,reimported\n\
\n client = bigquery.Client(project=project, location=location)\n metadata\
\ = table.metadata\n rows = client.list_rows('.'.join(\n [metadata['projectId'],\
\ metadata['datasetId'], metadata['tableId']]))\n result = []\n for row\
\ in rows:\n result.append({col: str(value) for col, value in dict(row).items()})\n\
\ return result\n\n"
image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20240808_0625
exec-bigquery-list-rows-2:
container:
args:
- --executor_input
- '{{$}}'
- --function_to_execute
- bigquery_list_rows
command:
- sh
- -c
- "\nif ! [ -x \"$(command -v pip)\" ]; then\n python3 -m ensurepip ||\
\ python3 -m ensurepip --user || apt-get install python3-pip\nfi\n\nPIP_DISABLE_PIP_VERSION_CHECK=1\
\ python3 -m pip install --quiet --no-warn-script-location 'kfp==2.0.0-rc.2'\
\ && \"$0\" \"$@\"\n"
- sh
- -ec
- 'program_path=$(mktemp -d)
printf "%s" "$0" > "$program_path/ephemeral_component.py"
python3 -m kfp.components.executor_main --component_module_path "$program_path/ephemeral_component.py" "$@"
'
- "\nimport kfp\nfrom kfp import dsl\nfrom kfp.dsl import *\nfrom typing import\
\ *\n\ndef bigquery_list_rows(\n project: str,\n location: str,\n\
\ table: dsl.Input[dsl.Artifact],\n) -> List[Dict[str, str]]:\n \"\"\
\"Lists the rows of the given BigQuery table.\n\n Args:\n project: The\
\ GCP project.\n location: The GCP region.\n table: A google.BQTable\
\ artifact.\n\n Returns:\n A list of dicts representing BigQuery rows.\
\ Rows are keyed by column, and\n all values are stored as strings.\n\
\ \"\"\"\n # pylint: disable=g-import-not-at-top,import-outside-toplevel,redefined-outer-name,reimported\n\
\ from google.cloud import bigquery\n # pylint: enable=g-import-not-at-top,import-outside-toplevel,redefined-outer-name,reimported\n\
\n client = bigquery.Client(project=project, location=location)\n metadata\
\ = table.metadata\n rows = client.list_rows('.'.join(\n [metadata['projectId'],\
\ metadata['datasetId'], metadata['tableId']]))\n result = []\n for row\
\ in rows:\n result.append({col: str(value) for col, value in dict(row).items()})\n\
\ return result\n\n"
image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20240808_0625
exec-bigquery-query-job:
container:
args:
- --type
- BigqueryQueryJob
- --project
- '{{$.inputs.parameters[''project'']}}'
- --location
- '{{$.inputs.parameters[''location'']}}'
- --payload
- '{"Concat": ["{", "\"configuration\": {", "\"query\": ", "{{$.inputs.parameters[''job_configuration_query'']}}",
", \"labels\": ", "{{$.inputs.parameters[''labels'']}}", "}", "}"]}'
- --job_configuration_query_override
- '{"Concat": ["{", "\"query\": \"", "{{$.inputs.parameters[''query'']}}",
"\"", ", \"query_parameters\": ", "{{$.inputs.parameters[''query_parameters'']}}",
", \"destination_encryption_configuration\": {", "\"kmsKeyName\": \"", "{{$.inputs.parameters[''encryption_spec_key_name'']}}",
"\"}", "}"]}'
- --gcp_resources
- '{{$.outputs.parameters[''gcp_resources''].output_file}}'
- --executor_input
- '{{$}}'
command:
- python3
- -u
- -m
- google_cloud_pipeline_components.container.v1.bigquery.query_job.launcher
image: gcr.io/ml-pipeline/google-cloud-pipeline-components:2.3.1
exec-bigquery-query-job-2:
container:
args:
- --type
- BigqueryQueryJob
- --project
- '{{$.inputs.parameters[''project'']}}'
- --location
- '{{$.inputs.parameters[''location'']}}'
- --payload
- '{"Concat": ["{", "\"configuration\": {", "\"query\": ", "{{$.inputs.parameters[''job_configuration_query'']}}",
", \"labels\": ", "{{$.inputs.parameters[''labels'']}}", "}", "}"]}'
- --job_configuration_query_override
- '{"Concat": ["{", "\"query\": \"", "{{$.inputs.parameters[''query'']}}",
"\"", ", \"query_parameters\": ", "{{$.inputs.parameters[''query_parameters'']}}",
", \"destination_encryption_configuration\": {", "\"kmsKeyName\": \"", "{{$.inputs.parameters[''encryption_spec_key_name'']}}",
"\"}", "}"]}'
- --gcp_resources
- '{{$.outputs.parameters[''gcp_resources''].output_file}}'
- --executor_input
- '{{$}}'
command:
- python3
- -u
- -m
- google_cloud_pipeline_components.container.v1.bigquery.query_job.launcher
image: gcr.io/ml-pipeline/google-cloud-pipeline-components:2.3.1
exec-bigquery-query-job-3:
container:
args:
- --type
- BigqueryQueryJob
- --project
- '{{$.inputs.parameters[''project'']}}'
- --location
- '{{$.inputs.parameters[''location'']}}'
- --payload
- '{"Concat": ["{", "\"configuration\": {", "\"query\": ", "{{$.inputs.parameters[''job_configuration_query'']}}",
", \"labels\": ", "{{$.inputs.parameters[''labels'']}}", "}", "}"]}'
- --job_configuration_query_override
- '{"Concat": ["{", "\"query\": \"", "{{$.inputs.parameters[''query'']}}",
"\"", ", \"query_parameters\": ", "{{$.inputs.parameters[''query_parameters'']}}",
", \"destination_encryption_configuration\": {", "\"kmsKeyName\": \"", "{{$.inputs.parameters[''encryption_spec_key_name'']}}",
"\"}", "}"]}'
- --gcp_resources
- '{{$.outputs.parameters[''gcp_resources''].output_file}}'
- --executor_input
- '{{$}}'
command:
- python3
- -u
- -m
- google_cloud_pipeline_components.container.v1.bigquery.query_job.launcher
image: gcr.io/ml-pipeline/google-cloud-pipeline-components:2.3.1
exec-bigquery-query-job-4:
container:
args:
- --type
- BigqueryQueryJob
- --project
- '{{$.inputs.parameters[''project'']}}'
- --location
- '{{$.inputs.parameters[''location'']}}'
- --payload
- '{"Concat": ["{", "\"configuration\": {", "\"query\": ", "{{$.inputs.parameters[''job_configuration_query'']}}",
", \"labels\": ", "{{$.inputs.parameters[''labels'']}}", "}", "}"]}'
- --job_configuration_query_override
- '{"Concat": ["{", "\"query\": \"", "{{$.inputs.parameters[''query'']}}",
"\"", ", \"query_parameters\": ", "{{$.inputs.parameters[''query_parameters'']}}",
", \"destination_encryption_configuration\": {", "\"kmsKeyName\": \"", "{{$.inputs.parameters[''encryption_spec_key_name'']}}",
"\"}", "}"]}'
- --gcp_resources
- '{{$.outputs.parameters[''gcp_resources''].output_file}}'
- --executor_input
- '{{$}}'
command:
- python3
- -u
- -m
- google_cloud_pipeline_components.container.v1.bigquery.query_job.launcher
image: gcr.io/ml-pipeline/google-cloud-pipeline-components:2.3.1
exec-bigquery-query-job-5:
container:
args:
- --type
- BigqueryQueryJob
- --project
- '{{$.inputs.parameters[''project'']}}'
- --location
- '{{$.inputs.parameters[''location'']}}'
- --payload
- '{"Concat": ["{", "\"configuration\": {", "\"query\": ", "{{$.inputs.parameters[''job_configuration_query'']}}",
", \"labels\": ", "{{$.inputs.parameters[''labels'']}}", "}", "}"]}'
- --job_configuration_query_override
- '{"Concat": ["{", "\"query\": \"", "{{$.inputs.parameters[''query'']}}",
"\"", ", \"query_parameters\": ", "{{$.inputs.parameters[''query_parameters'']}}",
", \"destination_encryption_configuration\": {", "\"kmsKeyName\": \"", "{{$.inputs.parameters[''encryption_spec_key_name'']}}",
"\"}", "}"]}'
- --gcp_resources
- '{{$.outputs.parameters[''gcp_resources''].output_file}}'
- --executor_input
- '{{$}}'
command:
- python3
- -u
- -m
- google_cloud_pipeline_components.container.v1.bigquery.query_job.launcher
image: gcr.io/ml-pipeline/google-cloud-pipeline-components:2.3.1
exec-build-job-configuration-query:
container:
args:
- --executor_input
- '{{$}}'
- --function_to_execute
- build_job_configuration_query
command:
- sh
- -c
- "\nif ! [ -x \"$(command -v pip)\" ]; then\n python3 -m ensurepip ||\
\ python3 -m ensurepip --user || apt-get install python3-pip\nfi\n\nPIP_DISABLE_PIP_VERSION_CHECK=1\
\ python3 -m pip install --quiet --no-warn-script-location 'kfp==2.0.0-rc.2'\
\ && \"$0\" \"$@\"\n"
- sh
- -ec
- 'program_path=$(mktemp -d)
printf "%s" "$0" > "$program_path/ephemeral_component.py"
python3 -m kfp.components.executor_main --component_module_path "$program_path/ephemeral_component.py" "$@"
'
- "\nimport kfp\nfrom kfp import dsl\nfrom kfp.dsl import *\nfrom typing import\
\ *\n\ndef build_job_configuration_query(\n project_id: str = '',\n \
\ dataset_id: str = '',\n table_id: str = '',\n write_disposition:\
\ str = '',\n priority: str = 'INTERACTIVE',\n) -> dict: # pylint: disable=g-bare-generic\n\
\ \"\"\"Creates a JobConfigurationQuery object.\"\"\"\n config = {\n \
\ 'priority': priority,\n }\n if all([project_id, dataset_id, table_id]):\n\
\ config['destinationTable'] = {\n 'projectId': project_id,\n\
\ 'datasetId': dataset_id,\n 'tableId': table_id,\n }\n\
\ if write_disposition:\n config['write_disposition'] = write_disposition\n\
\ return config\n\n"
image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20240808_0625
exec-build-job-configuration-query-2:
container:
args:
- --executor_input
- '{{$}}'
- --function_to_execute
- build_job_configuration_query
command:
- sh
- -c
- "\nif ! [ -x \"$(command -v pip)\" ]; then\n python3 -m ensurepip ||\
\ python3 -m ensurepip --user || apt-get install python3-pip\nfi\n\nPIP_DISABLE_PIP_VERSION_CHECK=1\
\ python3 -m pip install --quiet --no-warn-script-location 'kfp==2.0.0-rc.2'\
\ && \"$0\" \"$@\"\n"
- sh
- -ec
- 'program_path=$(mktemp -d)
printf "%s" "$0" > "$program_path/ephemeral_component.py"
python3 -m kfp.components.executor_main --component_module_path "$program_path/ephemeral_component.py" "$@"
'
- "\nimport kfp\nfrom kfp import dsl\nfrom kfp.dsl import *\nfrom typing import\
\ *\n\ndef build_job_configuration_query(\n project_id: str = '',\n \
\ dataset_id: str = '',\n table_id: str = '',\n write_disposition:\
\ str = '',\n priority: str = 'INTERACTIVE',\n) -> dict: # pylint: disable=g-bare-generic\n\
\ \"\"\"Creates a JobConfigurationQuery object.\"\"\"\n config = {\n \
\ 'priority': priority,\n }\n if all([project_id, dataset_id, table_id]):\n\
\ config['destinationTable'] = {\n 'projectId': project_id,\n\
\ 'datasetId': dataset_id,\n 'tableId': table_id,\n }\n\
\ if write_disposition:\n config['write_disposition'] = write_disposition\n\
\ return config\n\n"
image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20240808_0625
exec-build-job-configuration-query-3:
container:
args:
- --executor_input
- '{{$}}'
- --function_to_execute
- build_job_configuration_query
command:
- sh
- -c
- "\nif ! [ -x \"$(command -v pip)\" ]; then\n python3 -m ensurepip ||\
\ python3 -m ensurepip --user || apt-get install python3-pip\nfi\n\nPIP_DISABLE_PIP_VERSION_CHECK=1\
\ python3 -m pip install --quiet --no-warn-script-location 'kfp==2.0.0-rc.2'\
\ && \"$0\" \"$@\"\n"
- sh
- -ec
- 'program_path=$(mktemp -d)
printf "%s" "$0" > "$program_path/ephemeral_component.py"
python3 -m kfp.components.executor_main --component_module_path "$program_path/ephemeral_component.py" "$@"
'
- "\nimport kfp\nfrom kfp import dsl\nfrom kfp.dsl import *\nfrom typing import\
\ *\n\ndef build_job_configuration_query(\n project_id: str = '',\n \
\ dataset_id: str = '',\n table_id: str = '',\n write_disposition:\
\ str = '',\n priority: str = 'INTERACTIVE',\n) -> dict: # pylint: disable=g-bare-generic\n\
\ \"\"\"Creates a JobConfigurationQuery object.\"\"\"\n config = {\n \
\ 'priority': priority,\n }\n if all([project_id, dataset_id, table_id]):\n\
\ config['destinationTable'] = {\n 'projectId': project_id,\n\
\ 'datasetId': dataset_id,\n 'tableId': table_id,\n }\n\
\ if write_disposition:\n config['write_disposition'] = write_disposition\n\
\ return config\n\n"
image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20240808_0625
exec-build-job-configuration-query-4:
container:
args:
- --executor_input
- '{{$}}'
- --function_to_execute
- build_job_configuration_query
command:
- sh
- -c
- "\nif ! [ -x \"$(command -v pip)\" ]; then\n python3 -m ensurepip ||\
\ python3 -m ensurepip --user || apt-get install python3-pip\nfi\n\nPIP_DISABLE_PIP_VERSION_CHECK=1\
\ python3 -m pip install --quiet --no-warn-script-location 'kfp==2.0.0-rc.2'\
\ && \"$0\" \"$@\"\n"
- sh
- -ec
- 'program_path=$(mktemp -d)
printf "%s" "$0" > "$program_path/ephemeral_component.py"
python3 -m kfp.components.executor_main --component_module_path "$program_path/ephemeral_component.py" "$@"
'
- "\nimport kfp\nfrom kfp import dsl\nfrom kfp.dsl import *\nfrom typing import\
\ *\n\ndef build_job_configuration_query(\n project_id: str = '',\n \
\ dataset_id: str = '',\n table_id: str = '',\n write_disposition:\
\ str = '',\n priority: str = 'INTERACTIVE',\n) -> dict: # pylint: disable=g-bare-generic\n\
\ \"\"\"Creates a JobConfigurationQuery object.\"\"\"\n config = {\n \
\ 'priority': priority,\n }\n if all([project_id, dataset_id, table_id]):\n\
\ config['destinationTable'] = {\n 'projectId': project_id,\n\
\ 'datasetId': dataset_id,\n 'tableId': table_id,\n }\n\
\ if write_disposition:\n config['write_disposition'] = write_disposition\n\
\ return config\n\n"
image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20240808_0625
exec-build-job-configuration-query-5:
container:
args:
- --executor_input
- '{{$}}'
- --function_to_execute
- build_job_configuration_query
command:
- sh
- -c
- "\nif ! [ -x \"$(command -v pip)\" ]; then\n python3 -m ensurepip ||\
\ python3 -m ensurepip --user || apt-get install python3-pip\nfi\n\nPIP_DISABLE_PIP_VERSION_CHECK=1\
\ python3 -m pip install --quiet --no-warn-script-location 'kfp==2.0.0-rc.2'\
\ && \"$0\" \"$@\"\n"
- sh
- -ec
- 'program_path=$(mktemp -d)
printf "%s" "$0" > "$program_path/ephemeral_component.py"
python3 -m kfp.components.executor_main --component_module_path "$program_path/ephemeral_component.py" "$@"
'
- "\nimport kfp\nfrom kfp import dsl\nfrom kfp.dsl import *\nfrom typing import\
\ *\n\ndef build_job_configuration_query(\n project_id: str = '',\n \
\ dataset_id: str = '',\n table_id: str = '',\n write_disposition:\
\ str = '',\n priority: str = 'INTERACTIVE',\n) -> dict: # pylint: disable=g-bare-generic\n\
\ \"\"\"Creates a JobConfigurationQuery object.\"\"\"\n config = {\n \
\ 'priority': priority,\n }\n if all([project_id, dataset_id, table_id]):\n\
\ config['destinationTable'] = {\n 'projectId': project_id,\n\
\ 'datasetId': dataset_id,\n 'tableId': table_id,\n }\n\
\ if write_disposition:\n config['write_disposition'] = write_disposition\n\
\ return config\n\n"
image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20240808_0625
exec-build-job-configuration-query-6:
container:
args:
- --executor_input
- '{{$}}'
- --function_to_execute
- build_job_configuration_query
command:
- sh
- -c
- "\nif ! [ -x \"$(command -v pip)\" ]; then\n python3 -m ensurepip ||\
\ python3 -m ensurepip --user || apt-get install python3-pip\nfi\n\nPIP_DISABLE_PIP_VERSION_CHECK=1\
\ python3 -m pip install --quiet --no-warn-script-location 'kfp==2.0.0-rc.2'\
\ && \"$0\" \"$@\"\n"
- sh
- -ec
- 'program_path=$(mktemp -d)
printf "%s" "$0" > "$program_path/ephemeral_component.py"
python3 -m kfp.components.executor_main --component_module_path "$program_path/ephemeral_component.py" "$@"
'
- "\nimport kfp\nfrom kfp import dsl\nfrom kfp.dsl import *\nfrom typing import\
\ *\n\ndef build_job_configuration_query(\n project_id: str = '',\n \
\ dataset_id: str = '',\n table_id: str = '',\n write_disposition:\
\ str = '',\n priority: str = 'INTERACTIVE',\n) -> dict: # pylint: disable=g-bare-generic\n\
\ \"\"\"Creates a JobConfigurationQuery object.\"\"\"\n config = {\n \
\ 'priority': priority,\n }\n if all([project_id, dataset_id, table_id]):\n\
\ config['destinationTable'] = {\n 'projectId': project_id,\n\
\ 'datasetId': dataset_id,\n 'tableId': table_id,\n }\n\
\ if write_disposition:\n config['write_disposition'] = write_disposition\n\
\ return config\n\n"
image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20240808_0625
exec-build-serialized-query-parameters:
container:
args:
- --executor_input
- '{{$}}'
- --function_to_execute
- build_serialized_query_parameters
command:
- sh
- -ec
- 'program_path=$(mktemp -d)
printf "%s" "$0" > "$program_path/ephemeral_component.py"
python3 -m kfp.components.executor_main --component_module_path "$program_path/ephemeral_component.py" "$@"
'
- "\nimport kfp\nfrom kfp import dsl\nfrom kfp.dsl import *\nfrom typing import\
\ *\n\ndef build_serialized_query_parameters(\n forecast_horizon: Optional[int]\
\ = None,\n forecast_horizon_off_by_one: bool = False,\n data_granularity_unit:\
\ Optional[str] = None,\n splits: Optional[List[str]] = None,\n window:\
\ Optional[Dict[str, str]] = None,\n max_order: Optional[int] = None,\n\
) -> list: # pylint: disable=g-bare-generic\n \"\"\"Creates configuration\
\ JSON objects for BQML queries.\n\n All query parameters will be stored\
\ in a list of QueryParameter objects:\n https://cloud.google.com/bigquery/docs/reference/rest/v2/QueryParameter\n\
\n Args:\n forecast_horizon: The number of time periods into the future\
\ for which\n forecasts will be created. Future periods start after\
\ the latest timestamp\n for each time series.\n forecast_horizon_off_by_one:\
\ If True, subtract 1 from the forecast horizon\n in the query parameters.\n\
\ data_granularity_unit: The data granularity unit. Accepted values are:\n\
\ minute, hour, day, week, month, year.\n splits: Dataset splits\
\ to be used to train the model.\n window: Dict containing information\
\ about the forecast window the model\n should have. If no window is\
\ provided, the window will start after the\n latest period in the\
\ available data.\n max_order: Integer between 1 and 5 representing the\
\ size of the parameter\n search space for ARIMA_PLUS. 5 would result\
\ in the highest accuracy model,\n but also the longest training runtime.\n\
\n Returns:\n A list of QueryParameters.\n \"\"\"\n # pylint: disable=g-import-not-at-top,import-outside-toplevel\n\
\ import datetime\n # pylint: enable=g-import-not-at-top,import-outside-toplevel\n\
\n # Maps Vertex Forecasting time units to BQML time units.\n unit_map\
\ = {\n 'minute': 'per_minute',\n 'hour': 'hourly',\n 'day':\
\ 'daily',\n 'week': 'weekly',\n 'month': 'monthly',\n 'year':\
\ 'yearly',\n }\n query_parameters = []\n if data_granularity_unit is\
\ not None:\n if data_granularity_unit.lower() not in unit_map:\n \
\ raise ValueError(\n f'{data_granularity_unit} is not a valid\
\ time unit. '\n f'Must be one of: {\", \".join(unit_map.keys())}')\n\
\ query_parameters.append({\n 'name': 'data_granularity_unit',\n\
\ 'parameterType': {\n 'type': 'STRING'\n },\n\
\ 'parameterValue': {\n 'value': unit_map[data_granularity_unit.lower()],\n\
\ },\n })\n if max_order is not None:\n query_parameters.append({\n\
\ 'name': 'max_order',\n 'parameterType': {\n 'type':\
\ 'INTEGER'\n },\n 'parameterValue': {\n 'value':\
\ str(max_order)\n },\n })\n if forecast_horizon is not None:\n\
\ if forecast_horizon_off_by_one:\n forecast_horizon -= 1\n query_parameters.append({\n\
\ 'name': 'forecast_horizon',\n 'parameterType': {\n \
\ 'type': 'INTEGER'\n },\n 'parameterValue': {\n \
\ 'value': str(forecast_horizon)\n },\n })\n if splits\
\ is not None:\n query_parameters.append({\n 'name': 'splits',\n\
\ 'parameterType': {\n 'type': 'ARRAY',\n 'arrayType':\
\ {\n 'type': 'STRING'\n },\n },\n \
\ 'parameterValue': {\n 'arrayValues': [{\n \
\ 'value': split\n } for split in splits],\n },\n \
\ })\n\n if window is not None:\n query_parameters.append({\n \
\ 'name': 'prediction_count',\n 'parameterType': {\n \
\ 'type': 'INTEGER'\n },\n 'parameterValue': {\n \
\ 'value': window['count']\n },\n })\n\n start_time = window['start_time']\
\ if window else str(datetime.datetime.max)\n query_parameters.append({\n\
\ 'name': 'start_time',\n 'parameterType': {\n 'type':\
\ 'TIMESTAMP'\n },\n 'parameterValue': {\n 'value': start_time\n\
\ },\n })\n return query_parameters\n\n"
image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20240808_0625
exec-build-serialized-query-parameters-2:
container:
args:
- --executor_input
- '{{$}}'
- --function_to_execute
- build_serialized_query_parameters
command:
- sh
- -ec
- 'program_path=$(mktemp -d)
printf "%s" "$0" > "$program_path/ephemeral_component.py"
python3 -m kfp.components.executor_main --component_module_path "$program_path/ephemeral_component.py" "$@"
'
- "\nimport kfp\nfrom kfp import dsl\nfrom kfp.dsl import *\nfrom typing import\
\ *\n\ndef build_serialized_query_parameters(\n forecast_horizon: Optional[int]\
\ = None,\n forecast_horizon_off_by_one: bool = False,\n data_granularity_unit:\
\ Optional[str] = None,\n splits: Optional[List[str]] = None,\n window:\
\ Optional[Dict[str, str]] = None,\n max_order: Optional[int] = None,\n\
) -> list: # pylint: disable=g-bare-generic\n \"\"\"Creates configuration\
\ JSON objects for BQML queries.\n\n All query parameters will be stored\
\ in a list of QueryParameter objects:\n https://cloud.google.com/bigquery/docs/reference/rest/v2/QueryParameter\n\
\n Args:\n forecast_horizon: The number of time periods into the future\
\ for which\n forecasts will be created. Future periods start after\
\ the latest timestamp\n for each time series.\n forecast_horizon_off_by_one:\
\ If True, subtract 1 from the forecast horizon\n in the query parameters.\n\
\ data_granularity_unit: The data granularity unit. Accepted values are:\n\
\ minute, hour, day, week, month, year.\n splits: Dataset splits\
\ to be used to train the model.\n window: Dict containing information\
\ about the forecast window the model\n should have. If no window is\
\ provided, the window will start after the\n latest period in the\
\ available data.\n max_order: Integer between 1 and 5 representing the\
\ size of the parameter\n search space for ARIMA_PLUS. 5 would result\
\ in the highest accuracy model,\n but also the longest training runtime.\n\
\n Returns:\n A list of QueryParameters.\n \"\"\"\n # pylint: disable=g-import-not-at-top,import-outside-toplevel\n\
\ import datetime\n # pylint: enable=g-import-not-at-top,import-outside-toplevel\n\
\n # Maps Vertex Forecasting time units to BQML time units.\n unit_map\
\ = {\n 'minute': 'per_minute',\n 'hour': 'hourly',\n 'day':\
\ 'daily',\n 'week': 'weekly',\n 'month': 'monthly',\n 'year':\
\ 'yearly',\n }\n query_parameters = []\n if data_granularity_unit is\
\ not None:\n if data_granularity_unit.lower() not in unit_map:\n \
\ raise ValueError(\n f'{data_granularity_unit} is not a valid\
\ time unit. '\n f'Must be one of: {\", \".join(unit_map.keys())}')\n\
\ query_parameters.append({\n 'name': 'data_granularity_unit',\n\
\ 'parameterType': {\n 'type': 'STRING'\n },\n\
\ 'parameterValue': {\n 'value': unit_map[data_granularity_unit.lower()],\n\
\ },\n })\n if max_order is not None:\n query_parameters.append({\n\
\ 'name': 'max_order',\n 'parameterType': {\n 'type':\
\ 'INTEGER'\n },\n 'parameterValue': {\n 'value':\
\ str(max_order)\n },\n })\n if forecast_horizon is not None:\n\
\ if forecast_horizon_off_by_one:\n forecast_horizon -= 1\n query_parameters.append({\n\
\ 'name': 'forecast_horizon',\n 'parameterType': {\n \
\ 'type': 'INTEGER'\n },\n 'parameterValue': {\n \
\ 'value': str(forecast_horizon)\n },\n })\n if splits\
\ is not None:\n query_parameters.append({\n 'name': 'splits',\n\
\ 'parameterType': {\n 'type': 'ARRAY',\n 'arrayType':\
\ {\n 'type': 'STRING'\n },\n },\n \
\ 'parameterValue': {\n 'arrayValues': [{\n \
\ 'value': split\n } for split in splits],\n },\n \
\ })\n\n if window is not None:\n query_parameters.append({\n \
\ 'name': 'prediction_count',\n 'parameterType': {\n \
\ 'type': 'INTEGER'\n },\n 'parameterValue': {\n \
\ 'value': window['count']\n },\n })\n\n start_time = window['start_time']\
\ if window else str(datetime.datetime.max)\n query_parameters.append({\n\
\ 'name': 'start_time',\n 'parameterType': {\n 'type':\
\ 'TIMESTAMP'\n },\n 'parameterValue': {\n 'value': start_time\n\
\ },\n })\n return query_parameters\n\n"
image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20240808_0625
exec-build-serialized-query-parameters-3:
container:
args:
- --executor_input
- '{{$}}'
- --function_to_execute
- build_serialized_query_parameters
command:
- sh
- -ec
- 'program_path=$(mktemp -d)
printf "%s" "$0" > "$program_path/ephemeral_component.py"
python3 -m kfp.components.executor_main --component_module_path "$program_path/ephemeral_component.py" "$@"
'
- "\nimport kfp\nfrom kfp import dsl\nfrom kfp.dsl import *\nfrom typing import\
\ *\n\ndef build_serialized_query_parameters(\n forecast_horizon: Optional[int]\
\ = None,\n forecast_horizon_off_by_one: bool = False,\n data_granularity_unit:\
\ Optional[str] = None,\n splits: Optional[List[str]] = None,\n window:\
\ Optional[Dict[str, str]] = None,\n max_order: Optional[int] = None,\n\
) -> list: # pylint: disable=g-bare-generic\n \"\"\"Creates configuration\
\ JSON objects for BQML queries.\n\n All query parameters will be stored\
\ in a list of QueryParameter objects:\n https://cloud.google.com/bigquery/docs/reference/rest/v2/QueryParameter\n\
\n Args:\n forecast_horizon: The number of time periods into the future\
\ for which\n forecasts will be created. Future periods start after\
\ the latest timestamp\n for each time series.\n forecast_horizon_off_by_one:\
\ If True, subtract 1 from the forecast horizon\n in the query parameters.\n\
\ data_granularity_unit: The data granularity unit. Accepted values are:\n\
\ minute, hour, day, week, month, year.\n splits: Dataset splits\
\ to be used to train the model.\n window: Dict containing information\
\ about the forecast window the model\n should have. If no window is\
\ provided, the window will start after the\n latest period in the\
\ available data.\n max_order: Integer between 1 and 5 representing the\
\ size of the parameter\n search space for ARIMA_PLUS. 5 would result\
\ in the highest accuracy model,\n but also the longest training runtime.\n\
\n Returns:\n A list of QueryParameters.\n \"\"\"\n # pylint: disable=g-import-not-at-top,import-outside-toplevel\n\
\ import datetime\n # pylint: enable=g-import-not-at-top,import-outside-toplevel\n\
\n # Maps Vertex Forecasting time units to BQML time units.\n unit_map\
\ = {\n 'minute': 'per_minute',\n 'hour': 'hourly',\n 'day':\
\ 'daily',\n 'week': 'weekly',\n 'month': 'monthly',\n 'year':\
\ 'yearly',\n }\n query_parameters = []\n if data_granularity_unit is\
\ not None:\n if data_granularity_unit.lower() not in unit_map:\n \
\ raise ValueError(\n f'{data_granularity_unit} is not a valid\
\ time unit. '\n f'Must be one of: {\", \".join(unit_map.keys())}')\n\
\ query_parameters.append({\n 'name': 'data_granularity_unit',\n\
\ 'parameterType': {\n 'type': 'STRING'\n },\n\
\ 'parameterValue': {\n 'value': unit_map[data_granularity_unit.lower()],\n\
\ },\n })\n if max_order is not None:\n query_parameters.append({\n\
\ 'name': 'max_order',\n 'parameterType': {\n 'type':\
\ 'INTEGER'\n },\n 'parameterValue': {\n 'value':\
\ str(max_order)\n },\n })\n if forecast_horizon is not None:\n\
\ if forecast_horizon_off_by_one:\n forecast_horizon -= 1\n query_parameters.append({\n\
\ 'name': 'forecast_horizon',\n 'parameterType': {\n \
\ 'type': 'INTEGER'\n },\n 'parameterValue': {\n \
\ 'value': str(forecast_horizon)\n },\n })\n if splits\
\ is not None:\n query_parameters.append({\n 'name': 'splits',\n\
\ 'parameterType': {\n 'type': 'ARRAY',\n 'arrayType':\
\ {\n 'type': 'STRING'\n },\n },\n \
\ 'parameterValue': {\n 'arrayValues': [{\n \
\ 'value': split\n } for split in splits],\n },\n \
\ })\n\n if window is not None:\n query_parameters.append({\n \
\ 'name': 'prediction_count',\n 'parameterType': {\n \
\ 'type': 'INTEGER'\n },\n 'parameterValue': {\n \
\ 'value': window['count']\n },\n })\n\n start_time = window['start_time']\
\ if window else str(datetime.datetime.max)\n query_parameters.append({\n\
\ 'name': 'start_time',\n 'parameterType': {\n 'type':\
\ 'TIMESTAMP'\n },\n 'parameterValue': {\n 'value': start_time\n\
\ },\n })\n return query_parameters\n\n"
image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20240808_0625
exec-cond:
container:
args:
- --executor_input
- '{{$}}'
- --function_to_execute
- cond
command:
- sh
- -ec
- 'program_path=$(mktemp -d)
printf "%s" "$0" > "$program_path/ephemeral_component.py"
python3 -m kfp.components.executor_main --component_module_path "$program_path/ephemeral_component.py" "$@"
'
- "\nimport kfp\nfrom kfp import dsl\nfrom kfp.dsl import *\nfrom typing import\
\ *\n\ndef cond(predicate: bool, true_str: str, false_str: str) -> str:\n\
\ \"\"\"Returns true_str if predicate is true, else false_str.\"\"\"\n\
\ return true_str if predicate else false_str\n\n"
image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20240808_0625
exec-create-metrics-artifact:
container:
args:
- --executor_input
- '{{$}}'
- --function_to_execute
- create_metrics_artifact
command:
- sh
- -ec
- 'program_path=$(mktemp -d)
printf "%s" "$0" > "$program_path/ephemeral_component.py"
python3 -m kfp.components.executor_main --component_module_path "$program_path/ephemeral_component.py" "$@"
'
- "\nimport kfp\nfrom kfp import dsl\nfrom kfp.dsl import *\nfrom typing import\
\ *\n\ndef create_metrics_artifact(\n metrics_rows: List[Dict[str, str]],\n\
\ evaluation_metrics: dsl.Output[dsl.Metrics],\n) -> None:\n \"\"\"\
Converts the rows of a metrics table into an Artifact.\"\"\"\n metric_name_map\
\ = {\n 'MAE': 'meanAbsoluteError',\n 'RMSE': 'rootMeanSquaredError',\n\
\ 'MAPE': 'meanAbsolutePercentageError',\n }\n metrics = {metric_name_map[k]:\
\ v for k, v in dict(metrics_rows[0]).items()}\n evaluation_metrics.metadata\
\ = metrics\n\n"
image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20240808_0625
exec-feature-transform-engine:
container:
args:
- feature_transform_engine
- '{"Concat": ["--project=", "{{$.inputs.parameters[''project'']}}"]}'
- '{"Concat": ["--location=", "{{$.inputs.parameters[''location'']}}"]}'
- '{"Concat": ["--dataset_level_custom_transformation_definitions=", "{{$.inputs.parameters[''dataset_level_custom_transformation_definitions'']}}"]}'
- '{"Concat": ["--dataset_level_transformations=", "{{$.inputs.parameters[''dataset_level_transformations'']}}"]}'
- '{"Concat": ["--forecasting_time_column=", "{{$.inputs.parameters[''forecasting_time_column'']}}"]}'
- '{"IfPresent": {"InputName": "forecasting_time_series_identifier_column",
"Then": {"Concat": ["--forecasting_time_series_identifier_column=", "{{$.inputs.parameters[''forecasting_time_series_identifier_column'']}}"]}}}'
- '{"Concat": ["--forecasting_time_series_identifier_columns=", "{{$.inputs.parameters[''forecasting_time_series_identifier_columns'']}}"]}'
- '{"Concat": ["--forecasting_time_series_attribute_columns=", "{{$.inputs.parameters[''forecasting_time_series_attribute_columns'']}}"]}'
- '{"Concat": ["--forecasting_unavailable_at_forecast_columns=", "{{$.inputs.parameters[''forecasting_unavailable_at_forecast_columns'']}}"]}'
- '{"Concat": ["--forecasting_available_at_forecast_columns=", "{{$.inputs.parameters[''forecasting_available_at_forecast_columns'']}}"]}'
- '{"Concat": ["--forecasting_forecast_horizon=", "{{$.inputs.parameters[''forecasting_forecast_horizon'']}}"]}'
- '{"Concat": ["--forecasting_context_window=", "{{$.inputs.parameters[''forecasting_context_window'']}}"]}'
- '{"Concat": ["--forecasting_predefined_window_column=", "{{$.inputs.parameters[''forecasting_predefined_window_column'']}}"]}'
- '{"Concat": ["--forecasting_window_stride_length=", "{{$.inputs.parameters[''forecasting_window_stride_length'']}}"]}'
- '{"Concat": ["--forecasting_window_max_count=", "{{$.inputs.parameters[''forecasting_window_max_count'']}}"]}'
- '{"Concat": ["--forecasting_holiday_regions=", "{{$.inputs.parameters[''forecasting_holiday_regions'']}}"]}'
- '{"Concat": ["--forecasting_apply_windowing=", "{{$.inputs.parameters[''forecasting_apply_windowing'']}}"]}'
- '{"Concat": ["--predefined_split_key=", "{{$.inputs.parameters[''predefined_split_key'']}}"]}'
- '{"Concat": ["--stratified_split_key=", "{{$.inputs.parameters[''stratified_split_key'']}}"]}'
- '{"Concat": ["--timestamp_split_key=", "{{$.inputs.parameters[''timestamp_split_key'']}}"]}'
- '{"Concat": ["--training_fraction=", "{{$.inputs.parameters[''training_fraction'']}}"]}'
- '{"Concat": ["--validation_fraction=", "{{$.inputs.parameters[''validation_fraction'']}}"]}'
- '{"Concat": ["--test_fraction=", "{{$.inputs.parameters[''test_fraction'']}}"]}'
- '{"Concat": ["--stats_gen_execution_engine=", "{{$.inputs.parameters[''stats_gen_execution_engine'']}}"]}'
- '{"Concat": ["--tf_transform_execution_engine=", "{{$.inputs.parameters[''tf_transform_execution_engine'']}}"]}'
- '{"IfPresent": {"InputName": "tf_auto_transform_features", "Then": {"Concat":
["--tf_auto_transform_features=", "{{$.inputs.parameters[''tf_auto_transform_features'']}}"]}}}'
- '{"Concat": ["--tf_custom_transformation_definitions=", "{{$.inputs.parameters[''tf_custom_transformation_definitions'']}}"]}'
- '{"Concat": ["--tf_transformations_path=", "{{$.inputs.parameters[''tf_transformations_path'']}}"]}'
- '{"Concat": ["--legacy_transformations_path=", "{{$.inputs.parameters[''legacy_transformations_path'']}}"]}'
- '{"Concat": ["--data_source_csv_filenames=", "{{$.inputs.parameters[''data_source_csv_filenames'']}}"]}'
- '{"Concat": ["--data_source_bigquery_table_path=", "{{$.inputs.parameters[''data_source_bigquery_table_path'']}}"]}'
- '{"Concat": ["--bigquery_staging_full_dataset_id=", "{{$.inputs.parameters[''bigquery_staging_full_dataset_id'']}}"]}'
- '{"Concat": ["--target_column=", "{{$.inputs.parameters[''target_column'']}}"]}'
- '{"Concat": ["--weight_column=", "{{$.inputs.parameters[''weight_column'']}}"]}'
- '{"Concat": ["--prediction_type=", "{{$.inputs.parameters[''prediction_type'']}}"]}'
- '{"IfPresent": {"InputName": "model_type", "Then": {"Concat": ["--model_type=",
"{{$.inputs.parameters[''model_type'']}}"]}}}'
- '{"Concat": ["--multimodal_tabular_columns=", "{{$.inputs.parameters[''multimodal_tabular_columns'']}}"]}'
- '{"Concat": ["--multimodal_timeseries_columns=", "{{$.inputs.parameters[''multimodal_timeseries_columns'']}}"]}'
- '{"Concat": ["--multimodal_text_columns=", "{{$.inputs.parameters[''multimodal_text_columns'']}}"]}'
- '{"Concat": ["--multimodal_image_columns=", "{{$.inputs.parameters[''multimodal_image_columns'']}}"]}'
- '{"Concat": ["--run_distill=", "{{$.inputs.parameters[''run_distill'']}}"]}'
- '{"Concat": ["--run_feature_selection=", "{{$.inputs.parameters[''run_feature_selection'']}}"]}'
- '{"Concat": ["--materialized_examples_format=", "{{$.inputs.parameters[''materialized_examples_format'']}}"]}'
- '{"Concat": ["--max_selected_features=", "{{$.inputs.parameters[''max_selected_features'']}}"]}'
- '{"Concat": ["--feature_selection_staging_dir=", "{{$.inputs.parameters[''root_dir'']}}",
"/{{$.pipeline_job_uuid}}/{{$.pipeline_task_uuid}}/feature_selection_staging_dir"]}'
- '{"Concat": ["--feature_selection_algorithm=", "{{$.inputs.parameters[''feature_selection_algorithm'']}}"]}'
- '{"Concat": ["--feature_selection_execution_engine=", "{{$.inputs.parameters[''feature_selection_execution_engine'']}}"]}'
- '{"Concat": ["--feature_ranking_path=", "{{$.outputs.artifacts[''feature_ranking''].uri}}"]}'
- '{"Concat": ["--error_file_path=", "{{$.inputs.parameters[''root_dir'']}}",
"/{{$.pipeline_job_uuid}}/{{$.pipeline_task_uuid}}/error.txt"]}'
- '{"Concat": ["--stats_result_path=", "{{$.outputs.artifacts[''dataset_stats''].uri}}"]}'
- '{"Concat": ["--transform_output_artifact_path=", "{{$.outputs.artifacts[''transform_output''].uri}}"]}'
- '{"Concat": ["--transform_output_path=", "{{$.inputs.parameters[''root_dir'']}}",
"/{{$.pipeline_job_uuid}}/{{$.pipeline_task_uuid}}/transform"]}'
- '{"Concat": ["--materialized_examples_path=", "{{$.inputs.parameters[''root_dir'']}}",
"/{{$.pipeline_job_uuid}}/{{$.pipeline_task_uuid}}/materialized"]}'
- '{"Concat": ["--export_data_path=", "{{$.inputs.parameters[''root_dir'']}}",
"/{{$.pipeline_job_uuid}}/{{$.pipeline_task_uuid}}/export"]}'
- '{"Concat": ["--materialized_data_path=", "{{$.inputs.parameters[''root_dir'']}}",
"/{{$.pipeline_job_uuid}}/{{$.pipeline_task_uuid}}/materialized_data"]}'
- '{"Concat": ["--materialized_data_artifact_path=", "{{$.outputs.artifacts[''materialized_data''].uri}}"]}'
- '{"Concat": ["--bigquery_train_split_uri_path=", "{{$.outputs.parameters[''bigquery_train_split_uri''].output_file}}"]}'
- '{"Concat": ["--bigquery_validation_split_uri_path=", "{{$.outputs.parameters[''bigquery_validation_split_uri''].output_file}}"]}'
- '{"Concat": ["--bigquery_test_split_uri_path=", "{{$.outputs.parameters[''bigquery_test_split_uri''].output_file}}"]}'
- '{"Concat": ["--bigquery_downsampled_test_split_uri_path=", "{{$.outputs.parameters[''bigquery_downsampled_test_split_uri''].output_file}}"]}'
- '{"Concat": ["--split_example_counts_path=", "{{$.outputs.parameters[''split_example_counts''].output_file}}"]}'
- '{"Concat": ["--instance_schema_path=", "{{$.outputs.artifacts[''instance_schema''].path}}"]}'
- '{"Concat": ["--training_schema_path=", "{{$.outputs.artifacts[''training_schema''].path}}"]}'
- --job_name=feature-transform-engine-{{$.pipeline_job_uuid}}-{{$.pipeline_task_uuid}}
- '{"Concat": ["--dataflow_project=", "{{$.inputs.parameters[''project'']}}"]}'
- '{"Concat": ["--dataflow_staging_dir=", "{{$.inputs.parameters[''root_dir'']}}",
"/{{$.pipeline_job_uuid}}/{{$.pipeline_task_uuid}}/dataflow_staging"]}'
- '{"Concat": ["--dataflow_tmp_dir=", "{{$.inputs.parameters[''root_dir'']}}",
"/{{$.pipeline_job_uuid}}/{{$.pipeline_task_uuid}}/dataflow_tmp"]}'
- '{"Concat": ["--dataflow_max_num_workers=", "{{$.inputs.parameters[''dataflow_max_num_workers'']}}"]}'
- '{"Concat": ["--dataflow_machine_type=", "{{$.inputs.parameters[''dataflow_machine_type'']}}"]}'
- --dataflow_worker_container_image=us-docker.pkg.dev/vertex-ai/automl-tabular/dataflow-worker:20240808_0625
- --feature_transform_engine_docker_uri=us-docker.pkg.dev/vertex-ai/automl-tabular/feature-transform-engine:20240808_0625
- '{"Concat": ["--dataflow_disk_size_gb=", "{{$.inputs.parameters[''dataflow_disk_size_gb'']}}"]}'
- '{"Concat": ["--dataflow_subnetwork_fully_qualified=", "{{$.inputs.parameters[''dataflow_subnetwork'']}}"]}'
- '{"Concat": ["--dataflow_use_public_ips=", "{{$.inputs.parameters[''dataflow_use_public_ips'']}}"]}'
- '{"Concat": ["--dataflow_service_account=", "{{$.inputs.parameters[''dataflow_service_account'']}}"]}'
- '{"Concat": ["--dataflow_kms_key=", "{{$.inputs.parameters[''encryption_spec_key_name'']}}"]}'
- '{"Concat": ["--autodetect_csv_schema=", "{{$.inputs.parameters[''autodetect_csv_schema'']}}"]}'
- '{"Concat": ["--gcp_resources_path=", "{{$.outputs.parameters[''gcp_resources''].output_file}}"]}'
- '{"IfPresent": {"InputName": "group_columns", "Then": {"Concat": ["--group_columns=",
"{{$.inputs.parameters[''group_columns'']}}"]}}}'
- '{"IfPresent": {"InputName": "group_total_weight", "Then": {"Concat": ["--group_total_weight=",
"{{$.inputs.parameters[''group_total_weight'']}}"]}}}'
- '{"IfPresent": {"InputName": "temporal_total_weight", "Then": {"Concat":
["--temporal_total_weight=", "{{$.inputs.parameters[''temporal_total_weight'']}}"]}}}'
- '{"IfPresent": {"InputName": "group_temporal_total_weight", "Then": {"Concat":
["--group_temporal_total_weight=", "{{$.inputs.parameters[''group_temporal_total_weight'']}}"]}}}'
- '{"Concat": ["--encryption_spec_key_name=", "{{$.inputs.parameters[''encryption_spec_key_name'']}}"]}'
image: us-docker.pkg.dev/vertex-ai/automl-tabular/feature-transform-engine:20240808_0625
exec-get-fte-suffix:
container:
args:
- --executor_input
- '{{$}}'
- --function_to_execute
- get_fte_suffix
command:
- sh
- -ec
- 'program_path=$(mktemp -d)
printf "%s" "$0" > "$program_path/ephemeral_component.py"
python3 -m kfp.components.executor_main --component_module_path "$program_path/ephemeral_component.py" "$@"
'
- "\nimport kfp\nfrom kfp import dsl\nfrom kfp.dsl import *\nfrom typing import\
\ *\n\ndef get_fte_suffix(\n project: str,\n location: str,\n bigquery_staging_full_dataset_id:\
\ str,\n fte_table: str,\n) -> str:\n \"\"\"Infers the FTE suffix from\
\ the intermediate FTE table name.\"\"\"\n # pylint: disable=g-import-not-at-top,import-outside-toplevel,redefined-outer-name,reimported\n\
\ from google.cloud import bigquery\n # pylint: enable=g-import-not-at-top,import-outside-toplevel,redefined-outer-name,reimported\n\
\n client = bigquery.Client(project=project, location=location)\n for\
\ table in client.list_tables(bigquery_staging_full_dataset_id):\n if\
\ table.table_id.startswith(fte_table):\n return table.table_id[len(fte_table)\
\ + 1:]\n raise ValueError(\n f'No FTE output tables found in {bigquery_staging_full_dataset_id}.')\n\
\n"
image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20240808_0625
exec-get-table-location:
container:
args:
- --executor_input
- '{{$}}'
- --function_to_execute
- get_table_location
command:
- sh
- -c
- "\nif ! [ -x \"$(command -v pip)\" ]; then\n python3 -m ensurepip ||\
\ python3 -m ensurepip --user || apt-get install python3-pip\nfi\n\nPIP_DISABLE_PIP_VERSION_CHECK=1\
\ python3 -m pip install --quiet --no-warn-script-location 'kfp==2.0.0-rc.2'\
\ && \"$0\" \"$@\"\n"
- sh
- -ec
- 'program_path=$(mktemp -d)
printf "%s" "$0" > "$program_path/ephemeral_component.py"
python3 -m kfp.components.executor_main --component_module_path "$program_path/ephemeral_component.py" "$@"
'
- "\nimport kfp\nfrom kfp import dsl\nfrom kfp.dsl import *\nfrom typing import\
\ *\n\ndef get_table_location(\n project: str,\n table: Optional[str],\n\
\ default_location: str = '',\n) -> str:\n \"\"\"Returns the region\
\ the given table belongs to.\n\n Args:\n project: The GCP project.\n\
\ table: The BigQuery table to get a location for.\n default_location:\
\ Location to return if no table was given.\n\n Returns:\n A GCP region\
\ or multi-region.\n \"\"\"\n # pylint: disable=g-import-not-at-top,import-outside-toplevel,redefined-outer-name,reimported\n\
\ from google.cloud import bigquery\n # pylint: enable=g-import-not-at-top,import-outside-toplevel,redefined-outer-name,reimported\n\
\n if not table:\n return default_location\n\n client = bigquery.Client(project=project)\n\
\ if table.startswith('bq://'):\n table = table[len('bq://'):]\n elif\
\ table.startswith('bigquery://'):\n table = table[len('bigquery://'):]\n\
\ return client.get_table(table).location\n\n"
image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20240808_0625
exec-get-value:
container:
args:
- --executor_input
- '{{$}}'
- --function_to_execute
- get_value
command:
- sh
- -ec
- 'program_path=$(mktemp -d)
printf "%s" "$0" > "$program_path/ephemeral_component.py"
python3 -m kfp.components.executor_main --component_module_path "$program_path/ephemeral_component.py" "$@"
'
- "\nimport kfp\nfrom kfp import dsl\nfrom kfp.dsl import *\nfrom typing import\
\ *\n\ndef get_value(d: Dict[str, str], key: str) -> str:\n return d[key]\n\
\n"
image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20240808_0625
exec-get-window-query-priority:
container:
args:
- --executor_input
- '{{$}}'
- --function_to_execute
- get_window_query_priority
command:
- sh
- -ec
- 'program_path=$(mktemp -d)
printf "%s" "$0" > "$program_path/ephemeral_component.py"
python3 -m kfp.components.executor_main --component_module_path "$program_path/ephemeral_component.py" "$@"
'
- "\nimport kfp\nfrom kfp import dsl\nfrom kfp.dsl import *\nfrom typing import\
\ *\n\ndef get_window_query_priority(\n window: Dict[str, str],\n \
\ max_interactive: int = 100,\n) -> str:\n \"\"\"Returns a query priority\
\ depending on the window number.\"\"\"\n if int(window['window_number'])\
\ <= max_interactive:\n return 'INTERACTIVE'\n else:\n return 'BATCH'\n\
\n"
image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20240808_0625
exec-maybe-replace-with-default:
container:
args:
- --executor_input
- '{{$}}'
- --function_to_execute
- maybe_replace_with_default
command:
- sh
- -ec
- 'program_path=$(mktemp -d)
printf "%s" "$0" > "$program_path/ephemeral_component.py"
python3 -m kfp.components.executor_main --component_module_path "$program_path/ephemeral_component.py" "$@"
'
- "\nimport kfp\nfrom kfp import dsl\nfrom kfp.dsl import *\nfrom typing import\
\ *\n\ndef maybe_replace_with_default(value: str, default: str = '') ->\
\ str:\n \"\"\"Replaces string with another value if it is a dash.\"\"\"\
\n return default if not value else value\n\n"
image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20240808_0625
exec-query-with-retry:
container:
args:
- --executor_input
- '{{$}}'
- --function_to_execute
- query_with_retry
command:
- sh
- -c
- "\nif ! [ -x \"$(command -v pip)\" ]; then\n python3 -m ensurepip ||\
\ python3 -m ensurepip --user || apt-get install python3-pip\nfi\n\nPIP_DISABLE_PIP_VERSION_CHECK=1\
\ python3 -m pip install --quiet --no-warn-script-location 'kfp==2.0.0-rc.2'\
\ && \"$0\" \"$@\"\n"
- sh
- -ec
- 'program_path=$(mktemp -d)
printf "%s" "$0" > "$program_path/ephemeral_component.py"
python3 -m kfp.components.executor_main --component_module_path "$program_path/ephemeral_component.py" "$@"
'
- "\nimport kfp\nfrom kfp import dsl\nfrom kfp.dsl import *\nfrom typing import\
\ *\n\ndef query_with_retry(\n project: str,\n location: str,\n \
\ query: str,\n query_parameters: Optional[list] = None, # pylint:\
\ disable=g-bare-generic\n job_configuration_query: Optional[dict] =\
\ None, # pylint: disable=g-bare-generic\n max_retry_count: int = 5,\n\
\ retry_wait_seconds: int = 10, # Waits up to 4 minutes before 5th retry.\n\
\ destination_uri: str = '',\n) -> str:\n \"\"\"Runs a query and retries\
\ on failure.\n\n Args:\n project: The GCP project.\n location: The\
\ GCP region.\n query: The query to run.\n query_parameters: A list\
\ of query parameters.\n job_configuration_query: Additional query job\
\ configurations.\n max_retry_count: Maximum number of times to retry\
\ the query.\n retry_wait_seconds: Approximate initial number of seconds\
\ to wait before\n making another query attempt with exponential backoff.\n\
\ destination_uri: Optional BigQuery URI to output if the query succeeds.\n\
\n Returns:\n The given destination URI.\n \"\"\"\n # pylint: disable=g-import-not-at-top,import-outside-toplevel,redefined-outer-name,reimported\n\
\ import logging\n import random\n import time\n\n from google.api_core\
\ import exceptions\n from google.cloud import bigquery\n # pylint: enable=g-import-not-at-top,import-outside-toplevel,redefined-outer-name,reimported\n\
\n query_parameters = query_parameters or []\n job_configuration_query\
\ = job_configuration_query or {}\n client = bigquery.Client(project=project,\
\ location=location)\n\n job_configuration_query['queryParameters'] = query_parameters\n\
\ job_config = bigquery.QueryJobConfig.from_api_repr(\n {'query':\
\ job_configuration_query})\n retry_count = 0\n while True:\n try:\n\
\ client.query(query, job_config=job_config).result()\n break\n\
\ except (exceptions.BadRequest, exceptions.Forbidden) as e:\n if\
\ retry_count >= max_retry_count:\n logging.info('Maximum retries\
\ reached.')\n raise\n wait_time = (\n retry_wait_seconds\
\ * (2 ** retry_count) * random.uniform(1, 1.5))\n logging.info(\n\
\ 'Query failed with %s. Retrying after %d seconds.', e, wait_time)\n\
\ time.sleep(wait_time)\n retry_count += 1\n return destination_uri\n\
\n"
image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20240808_0625
exec-query-with-retry-2:
container:
args:
- --executor_input
- '{{$}}'
- --function_to_execute
- query_with_retry
command:
- sh
- -c
- "\nif ! [ -x \"$(command -v pip)\" ]; then\n python3 -m ensurepip ||\
\ python3 -m ensurepip --user || apt-get install python3-pip\nfi\n\nPIP_DISABLE_PIP_VERSION_CHECK=1\
\ python3 -m pip install --quiet --no-warn-script-location 'kfp==2.0.0-rc.2'\
\ && \"$0\" \"$@\"\n"
- sh
- -ec
- 'program_path=$(mktemp -d)
printf "%s" "$0" > "$program_path/ephemeral_component.py"
python3 -m kfp.components.executor_main --component_module_path "$program_path/ephemeral_component.py" "$@"
'
- "\nimport kfp\nfrom kfp import dsl\nfrom kfp.dsl import *\nfrom typing import\
\ *\n\ndef query_with_retry(\n project: str,\n location: str,\n \
\ query: str,\n query_parameters: Optional[list] = None, # pylint:\
\ disable=g-bare-generic\n job_configuration_query: Optional[dict] =\
\ None, # pylint: disable=g-bare-generic\n max_retry_count: int = 5,\n\
\ retry_wait_seconds: int = 10, # Waits up to 4 minutes before 5th retry.\n\
\ destination_uri: str = '',\n) -> str:\n \"\"\"Runs a query and retries\
\ on failure.\n\n Args:\n project: The GCP project.\n location: The\
\ GCP region.\n query: The query to run.\n query_parameters: A list\
\ of query parameters.\n job_configuration_query: Additional query job\
\ configurations.\n max_retry_count: Maximum number of times to retry\
\ the query.\n retry_wait_seconds: Approximate initial number of seconds\
\ to wait before\n making another query attempt with exponential backoff.\n\
\ destination_uri: Optional BigQuery URI to output if the query succeeds.\n\
\n Returns:\n The given destination URI.\n \"\"\"\n # pylint: disable=g-import-not-at-top,import-outside-toplevel,redefined-outer-name,reimported\n\
\ import logging\n import random\n import time\n\n from google.api_core\
\ import exceptions\n from google.cloud import bigquery\n # pylint: enable=g-import-not-at-top,import-outside-toplevel,redefined-outer-name,reimported\n\
\n query_parameters = query_parameters or []\n job_configuration_query\
\ = job_configuration_query or {}\n client = bigquery.Client(project=project,\
\ location=location)\n\n job_configuration_query['queryParameters'] = query_parameters\n\
\ job_config = bigquery.QueryJobConfig.from_api_repr(\n {'query':\
\ job_configuration_query})\n retry_count = 0\n while True:\n try:\n\
\ client.query(query, job_config=job_config).result()\n break\n\
\ except (exceptions.BadRequest, exceptions.Forbidden) as e:\n if\
\ retry_count >= max_retry_count:\n logging.info('Maximum retries\
\ reached.')\n raise\n wait_time = (\n retry_wait_seconds\
\ * (2 ** retry_count) * random.uniform(1, 1.5))\n logging.info(\n\
\ 'Query failed with %s. Retrying after %d seconds.', e, wait_time)\n\
\ time.sleep(wait_time)\n retry_count += 1\n return destination_uri\n\
\n"
image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20240808_0625
exec-query-with-retry-3:
container:
args:
- --executor_input
- '{{$}}'
- --function_to_execute
- query_with_retry
command:
- sh
- -c
- "\nif ! [ -x \"$(command -v pip)\" ]; then\n python3 -m ensurepip ||\
\ python3 -m ensurepip --user || apt-get install python3-pip\nfi\n\nPIP_DISABLE_PIP_VERSION_CHECK=1\
\ python3 -m pip install --quiet --no-warn-script-location 'kfp==2.0.0-rc.2'\
\ && \"$0\" \"$@\"\n"
- sh
- -ec
- 'program_path=$(mktemp -d)
printf "%s" "$0" > "$program_path/ephemeral_component.py"
python3 -m kfp.components.executor_main --component_module_path "$program_path/ephemeral_component.py" "$@"
'
- "\nimport kfp\nfrom kfp import dsl\nfrom kfp.dsl import *\nfrom typing import\
\ *\n\ndef query_with_retry(\n project: str,\n location: str,\n \
\ query: str,\n query_parameters: Optional[list] = None, # pylint:\
\ disable=g-bare-generic\n job_configuration_query: Optional[dict] =\
\ None, # pylint: disable=g-bare-generic\n max_retry_count: int = 5,\n\
\ retry_wait_seconds: int = 10, # Waits up to 4 minutes before 5th retry.\n\
\ destination_uri: str = '',\n) -> str:\n \"\"\"Runs a query and retries\
\ on failure.\n\n Args:\n project: The GCP project.\n location: The\
\ GCP region.\n query: The query to run.\n query_parameters: A list\
\ of query parameters.\n job_configuration_query: Additional query job\
\ configurations.\n max_retry_count: Maximum number of times to retry\
\ the query.\n retry_wait_seconds: Approximate initial number of seconds\
\ to wait before\n making another query attempt with exponential backoff.\n\
\ destination_uri: Optional BigQuery URI to output if the query succeeds.\n\
\n Returns:\n The given destination URI.\n \"\"\"\n # pylint: disable=g-import-not-at-top,import-outside-toplevel,redefined-outer-name,reimported\n\
\ import logging\n import random\n import time\n\n from google.api_core\
\ import exceptions\n from google.cloud import bigquery\n # pylint: enable=g-import-not-at-top,import-outside-toplevel,redefined-outer-name,reimported\n\
\n query_parameters = query_parameters or []\n job_configuration_query\
\ = job_configuration_query or {}\n client = bigquery.Client(project=project,\
\ location=location)\n\n job_configuration_query['queryParameters'] = query_parameters\n\
\ job_config = bigquery.QueryJobConfig.from_api_repr(\n {'query':\
\ job_configuration_query})\n retry_count = 0\n while True:\n try:\n\
\ client.query(query, job_config=job_config).result()\n break\n\
\ except (exceptions.BadRequest, exceptions.Forbidden) as e:\n if\
\ retry_count >= max_retry_count:\n logging.info('Maximum retries\
\ reached.')\n raise\n wait_time = (\n retry_wait_seconds\
\ * (2 ** retry_count) * random.uniform(1, 1.5))\n logging.info(\n\
\ 'Query failed with %s. Retrying after %d seconds.', e, wait_time)\n\
\ time.sleep(wait_time)\n retry_count += 1\n return destination_uri\n\
\n"
image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20240808_0625
exec-table-to-uri:
container:
args:
- --executor_input
- '{{$}}'
- --function_to_execute
- table_to_uri
command:
- sh
- -ec
- 'program_path=$(mktemp -d)
printf "%s" "$0" > "$program_path/ephemeral_component.py"
python3 -m kfp.components.executor_main --component_module_path "$program_path/ephemeral_component.py" "$@"
'
- "\nimport kfp\nfrom kfp import dsl\nfrom kfp.dsl import *\nfrom typing import\
\ *\n\ndef table_to_uri(\n table: dsl.Input[dsl.Artifact],\n use_bq_prefix:\
\ bool = False,\n) -> NamedTuple(\n 'Outputs',\n [\n ('project_id',\
\ str),\n ('dataset_id', str),\n ('table_id', str),\n \
\ ('uri', str),\n ],\n):\n \"\"\"Converts a google.BQTable to a URI.\"\
\"\"\n # pylint: disable=g-import-not-at-top,import-outside-toplevel\n\
\ import collections\n # pylint: enable=g-import-not-at-top,import-outside-toplevel\n\
\n outputs = [\n table.metadata['projectId'],\n table.metadata['datasetId'],\n\
\ table.metadata['tableId'],\n ]\n bq_uri = '.'.join(outputs)\n \
\ if use_bq_prefix:\n bq_uri = 'bq://' + bq_uri\n outputs.append(bq_uri)\n\
\ return collections.namedtuple(\n 'Outputs',\n ['project_id',\
\ 'dataset_id', 'table_id', 'uri'],\n )(*outputs)\n\n"
image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20240808_0625
exec-table-to-uri-2:
container:
args:
- --executor_input
- '{{$}}'
- --function_to_execute
- table_to_uri
command:
- sh
- -ec
- 'program_path=$(mktemp -d)
printf "%s" "$0" > "$program_path/ephemeral_component.py"
python3 -m kfp.components.executor_main --component_module_path "$program_path/ephemeral_component.py" "$@"
'
- "\nimport kfp\nfrom kfp import dsl\nfrom kfp.dsl import *\nfrom typing import\
\ *\n\ndef table_to_uri(\n table: dsl.Input[dsl.Artifact],\n use_bq_prefix:\
\ bool = False,\n) -> NamedTuple(\n 'Outputs',\n [\n ('project_id',\
\ str),\n ('dataset_id', str),\n ('table_id', str),\n \
\ ('uri', str),\n ],\n):\n \"\"\"Converts a google.BQTable to a URI.\"\
\"\"\n # pylint: disable=g-import-not-at-top,import-outside-toplevel\n\
\ import collections\n # pylint: enable=g-import-not-at-top,import-outside-toplevel\n\
\n outputs = [\n table.metadata['projectId'],\n table.metadata['datasetId'],\n\
\ table.metadata['tableId'],\n ]\n bq_uri = '.'.join(outputs)\n \
\ if use_bq_prefix:\n bq_uri = 'bq://' + bq_uri\n outputs.append(bq_uri)\n\
\ return collections.namedtuple(\n 'Outputs',\n ['project_id',\
\ 'dataset_id', 'table_id', 'uri'],\n )(*outputs)\n\n"
image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20240808_0625
exec-validate-inputs:
container:
args:
- --executor_input
- '{{$}}'
- --function_to_execute
- validate_inputs
command:
- sh
- -ec
- 'program_path=$(mktemp -d)
printf "%s" "$0" > "$program_path/ephemeral_component.py"
python3 -m kfp.components.executor_main --component_module_path "$program_path/ephemeral_component.py" "$@"
'
- "\nimport kfp\nfrom kfp import dsl\nfrom kfp.dsl import *\nfrom typing import\
\ *\n\ndef validate_inputs(\n time_column: Optional[str] = None,\n \
\ time_series_identifier_column: Optional[str] = None,\n target_column:\
\ Optional[str] = None,\n data_source_bigquery_table_path: Optional[str]\
\ = None,\n training_fraction: Optional[float] = None,\n validation_fraction:\
\ Optional[float] = None,\n test_fraction: Optional[float] = None,\n\
\ predefined_split_key: Optional[str] = None,\n timestamp_split_key:\
\ Optional[str] = None,\n data_source_csv_filenames: Optional[str] =\
\ None,\n source_model_uri: Optional[str] = None,\n bigquery_destination_uri:\
\ Optional[str] = None,\n window_column: Optional[str] = None,\n window_stride_length:\
\ Optional[int] = None,\n window_max_count: Optional[int] = None,\n \
\ optimization_objective: Optional[str] = None,\n data_granularity_unit:\
\ Optional[str] = None,\n) -> None:\n \"\"\"Checks training pipeline input\
\ parameters are valid.\"\"\"\n # pylint: disable=g-import-not-at-top,import-outside-toplevel\n\
\ import re\n # pylint: enable=g-import-not-at-top,import-outside-toplevel\n\
\n project_pattern = r'([a-z0-9.-]+:)?[a-z][a-z0-9-_]{4,28}[a-z0-9]'\n\
\ dataset_pattern = r'[a-zA-Z0-9_]+'\n table_pattern = r'[^\\.\\:`]+'\n\
\ dataset_uri_pattern = re.compile(\n f'(bq://)?{project_pattern}[.:]{dataset_pattern}')\n\
\ table_uri_pattern = re.compile(\n f'(bq://)?{project_pattern}[.:]{dataset_pattern}[.:]{table_pattern}')\n\
\n # Validate BigQuery column and dataset names.\n bigquery_column_parameters\
\ = [\n time_column,\n time_series_identifier_column,\n target_column,\n\
\ ]\n column_pattern = re.compile(r'[a-zA-Z_][a-zA-Z0-9_]{1,300}')\n \
\ for column in bigquery_column_parameters:\n if column and not column_pattern.fullmatch(column):\n\
\ raise ValueError(f'Invalid column name: {column}.')\n if (bigquery_destination_uri\
\ and\n not dataset_uri_pattern.fullmatch(bigquery_destination_uri)):\n\
\ raise ValueError(\n f'Invalid BigQuery dataset URI: {bigquery_destination_uri}.')\n\
\ if (source_model_uri and not table_uri_pattern.fullmatch(source_model_uri)):\n\
\ raise ValueError(f'Invalid BigQuery table URI: {source_model_uri}.')\n\
\n # Validate data source.\n data_source_count = sum([bool(source) for\
\ source in [\n data_source_bigquery_table_path, data_source_csv_filenames]])\n\
\ if data_source_count > 1:\n raise ValueError(f'Expected 1 data source,\
\ found {data_source_count}.')\n if (data_source_bigquery_table_path\n\
\ and not table_uri_pattern.fullmatch(data_source_bigquery_table_path)):\n\
\ raise ValueError(\n f'Invalid BigQuery table URI: {data_source_bigquery_table_path}.')\n\
\ gcs_path_pattern = re.compile(r'gs:\\/\\/(.+)\\/([^\\/]+)')\n if data_source_csv_filenames:\n\
\ csv_list = [filename.strip()\n for filename in data_source_csv_filenames.split(',')]\n\
\ for gcs_path in csv_list:\n if not gcs_path_pattern.fullmatch(gcs_path):\n\
\ raise ValueError(f'Invalid path to CSV stored in GCS: {gcs_path}.')\n\
\n # Validate split spec.\n fraction_splits = [\n training_fraction,\n\
\ validation_fraction,\n test_fraction,\n ]\n fraction_splits\
\ = [None if fraction == -1 else fraction\n for fraction\
\ in fraction_splits]\n split_count = sum([\n bool(source)\n \
\ for source in [predefined_split_key,\n any(fraction_splits)]\n\
\ ])\n if split_count > 1:\n raise ValueError(f'Expected 1 split type,\
\ found {split_count}.')\n if (predefined_split_key and\n not column_pattern.fullmatch(predefined_split_key)):\n\
\ raise ValueError(f'Invalid column name: {predefined_split_key}.')\n\
\ if any(fraction_splits):\n if not all(fraction_splits):\n raise\
\ ValueError(\n f'All fractions must be non-zero. Got: {fraction_splits}.')\n\
\ if sum(fraction_splits) != 1:\n raise ValueError(\n f'Fraction\
\ splits must sum to 1. Got: {sum(fraction_splits)}.')\n if (timestamp_split_key\
\ and\n not column_pattern.fullmatch(timestamp_split_key)):\n raise\
\ ValueError(f'Invalid column name: {timestamp_split_key}.')\n if timestamp_split_key\
\ and not all(fraction_splits):\n raise ValueError('All fractions must\
\ be non-zero for timestamp split.')\n\n # Validate window config.\n if\
\ window_stride_length == -1:\n window_stride_length = None\n if window_max_count\
\ == -1:\n window_max_count = None\n window_configs = [window_column,\
\ window_stride_length, window_max_count]\n window_config_count = sum([bool(config)\
\ for config in window_configs])\n if window_config_count > 1:\n raise\
\ ValueError(f'Expected 1 window config, found {window_config_count}.')\n\
\ if window_column and not column_pattern.fullmatch(window_column):\n \
\ raise ValueError(f'Invalid column name: {window_column}.')\n if window_stride_length\
\ and (window_stride_length < 1 or\n window_stride_length\
\ > 1000):\n raise ValueError('Stride must be between 1 and 1000. Got:\
\ '\n f'{window_stride_length}.')\n if window_max_count\
\ and (window_max_count < 1000 or\n window_max_count\
\ > int(1e8)):\n raise ValueError('Max count must be between 1000 and\
\ 100000000. Got: '\n f'{window_max_count}.')\n\n #\
\ Validate eval metric.\n valid_optimization_objectives = ['rmse', 'mae',\
\ 'rmsle']\n if optimization_objective:\n if optimization_objective\
\ not in valid_optimization_objectives:\n raise ValueError(\n \
\ 'Optimization objective should be one of the following: '\n \
\ f'{valid_optimization_objectives}, got: {optimization_objective}.')\n\
\n # Validate data granularity unit.\n valid_data_granularity_units =\
\ [\n 'minute', 'hour', 'day', 'week', 'month', 'year']\n if data_granularity_unit:\n\
\ if data_granularity_unit not in valid_data_granularity_units:\n \
\ raise ValueError(\n 'Granularity unit should be one of the\
\ following: '\n f'{valid_data_granularity_units}, got: {data_granularity_unit}.')\n\
\n"
image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20240808_0625
exec-wrapped-in-list:
container:
args:
- --executor_input
- '{{$}}'
- --function_to_execute
- wrapped_in_list
command:
- sh
- -ec
- 'program_path=$(mktemp -d)
printf "%s" "$0" > "$program_path/ephemeral_component.py"
python3 -m kfp.components.executor_main --component_module_path "$program_path/ephemeral_component.py" "$@"
'
- "\nimport kfp\nfrom kfp import dsl\nfrom kfp.dsl import *\nfrom typing import\
\ *\n\ndef wrapped_in_list(value: str) -> List[str]:\n \"\"\"Wraps a string\
\ in a list.\"\"\"\n return [value]\n\n"
image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20240808_0625
pipelineInfo:
description: Trains a BQML ARIMA_PLUS model.
name: automl-tabular-bqml-arima-train
root:
dag:
outputs:
artifacts:
create-metrics-artifact-evaluation_metrics:
artifactSelectors:
- outputArtifactKey: create-metrics-artifact-evaluation_metrics
producerSubtask: exit-handler-1
tasks:
bigquery-delete-dataset-with-prefix:
cachingOptions: {}
componentRef:
name: comp-bigquery-delete-dataset-with-prefix
dependentTasks:
- exit-handler-1
inputs:
parameters:
dataset_prefix:
runtimeValue:
constant: tmp_{{$.pipeline_job_uuid}}
delete_contents:
runtimeValue:
constant: true
project:
componentInputParameter: project
taskInfo:
name: delete-tmp-dataset
triggerPolicy:
strategy: ALL_UPSTREAM_TASKS_COMPLETED
exit-handler-1:
componentRef:
name: comp-exit-handler-1
inputs:
parameters:
pipelinechannel--bigquery_destination_uri:
componentInputParameter: bigquery_destination_uri
pipelinechannel--data_granularity_unit:
componentInputParameter: data_granularity_unit
pipelinechannel--data_source_bigquery_table_path:
componentInputParameter: data_source_bigquery_table_path
pipelinechannel--data_source_csv_filenames:
componentInputParameter: data_source_csv_filenames
pipelinechannel--encryption_spec_key_name:
componentInputParameter: encryption_spec_key_name
pipelinechannel--forecast_horizon:
componentInputParameter: forecast_horizon
pipelinechannel--location:
componentInputParameter: location
pipelinechannel--max_order:
componentInputParameter: max_order
pipelinechannel--override_destination:
componentInputParameter: override_destination
pipelinechannel--predefined_split_key:
componentInputParameter: predefined_split_key
pipelinechannel--project:
componentInputParameter: project
pipelinechannel--root_dir:
componentInputParameter: root_dir
pipelinechannel--run_evaluation:
componentInputParameter: run_evaluation
pipelinechannel--target_column:
componentInputParameter: target_column
pipelinechannel--test_fraction:
componentInputParameter: test_fraction
pipelinechannel--time_column:
componentInputParameter: time_column
pipelinechannel--time_series_identifier_column:
componentInputParameter: time_series_identifier_column
pipelinechannel--timestamp_split_key:
componentInputParameter: timestamp_split_key
pipelinechannel--training_fraction:
componentInputParameter: training_fraction
pipelinechannel--validation_fraction:
componentInputParameter: validation_fraction
pipelinechannel--window_column:
componentInputParameter: window_column
pipelinechannel--window_max_count:
componentInputParameter: window_max_count
pipelinechannel--window_stride_length:
componentInputParameter: window_stride_length
taskInfo:
name: exit-handler-1
inputDefinitions:
parameters:
bigquery_destination_uri:
defaultValue: ''
description: 'URI of the desired destination dataset. If not
specified, resources will be created under a new dataset in the project.
Unlike in Vertex Forecasting, all resources will be given hardcoded names
under this dataset, and the model artifact will also be exported here.'
isOptional: true
parameterType: STRING
data_granularity_unit:
description: 'The data granularity unit. Accepted values are:
minute, hour, day, week, month, year.'
parameterType: STRING
data_source_bigquery_table_path:
defaultValue: ''
description: 'The BigQuery table path of format
bq://bq_project.bq_dataset.bq_table'
isOptional: true
parameterType: STRING
data_source_csv_filenames:
defaultValue: ''
description: 'A string that represents a list of comma
separated CSV filenames.'
isOptional: true
parameterType: STRING
encryption_spec_key_name:
defaultValue: ''
description: The KMS key name.
isOptional: true
parameterType: STRING
forecast_horizon:
description: 'The number of time periods into the future for which
forecasts will be created. Future periods start after the latest timestamp
for each time series.'
parameterType: NUMBER_INTEGER
location:
description: The GCP region for Vertex AI.
parameterType: STRING
max_order:
defaultValue: 5.0
description: 'Integer between 1 and 5 representing the size of the parameter
search space for ARIMA_PLUS. 5 would result in the highest accuracy model,
but also the longest training runtime.'
isOptional: true
parameterType: NUMBER_INTEGER
override_destination:
defaultValue: false
description: 'Whether to overwrite the metrics and evaluated
examples tables if they already exist. If this is False and the tables
exist, this pipeline will fail.'
isOptional: true
parameterType: BOOLEAN
predefined_split_key:
defaultValue: ''
description: The predefined_split column name.
isOptional: true
parameterType: STRING
project:
description: The GCP project that runs the pipeline components.
parameterType: STRING
root_dir:
description: The Cloud Storage location to store the output.
parameterType: STRING
run_evaluation:
defaultValue: true
description: Whether to run evaluation steps during training.
isOptional: true
parameterType: BOOLEAN
target_column:
description: Name of the column that the model is to predict values for.
parameterType: STRING
test_fraction:
defaultValue: -1.0
description: float = The test fraction.
isOptional: true
parameterType: NUMBER_DOUBLE
time_column:
description: 'Name of the column that identifies time order in the time
series.'
parameterType: STRING
time_series_identifier_column:
description: 'Name of the column that identifies the time
series.'
parameterType: STRING
timestamp_split_key:
defaultValue: ''
description: The timestamp_split column name.
isOptional: true
parameterType: STRING
training_fraction:
defaultValue: -1.0
description: The training fraction.
isOptional: true
parameterType: NUMBER_DOUBLE
validation_fraction:
defaultValue: -1.0
description: The validation fraction.
isOptional: true
parameterType: NUMBER_DOUBLE
window_column:
defaultValue: ''
description: 'Name of the column that should be used to filter input rows.
The column should contain either booleans or string booleans; if the value
of the row is True, generate a sliding window from that row.'
isOptional: true
parameterType: STRING
window_max_count:
defaultValue: -1.0
description: 'Number of rows that should be used to generate input
examples. If the total row count is larger than this number, the input
data will be randomly sampled to hit the count.'
isOptional: true
parameterType: NUMBER_INTEGER
window_stride_length:
defaultValue: -1.0
description: 'Step length used to generate input examples. Every
window_stride_length rows will be used to generate a sliding window.'
isOptional: true
parameterType: NUMBER_INTEGER
outputDefinitions:
artifacts:
create-metrics-artifact-evaluation_metrics:
artifactType:
schemaTitle: system.Metrics
schemaVersion: 0.0.1
schemaVersion: 2.1.0
sdkVersion: kfp-2.0.0-rc.2
| 833 |
0 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/automl | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/automl/forecasting/prophet_predict_pipeline.yaml | # PIPELINE DEFINITION
# Name: prophet-predict
# Description: Creates a batch prediction using a Prophet model.
# Inputs:
# bigquery_destination_uri: str [Default: '']
# data_source_bigquery_table_path: str [Default: '']
# data_source_csv_filenames: str [Default: '']
# encryption_spec_key_name: str [Default: '']
# location: str
# machine_type: str [Default: 'n1-standard-2']
# max_num_workers: int [Default: 10.0]
# model_name: str
# project: str
# target_column: str
# time_column: str
# time_series_identifier_column: str
components:
comp-bigquery-create-dataset:
executorLabel: exec-bigquery-create-dataset
inputDefinitions:
parameters:
dataset:
parameterType: STRING
exists_ok:
defaultValue: false
isOptional: true
parameterType: BOOLEAN
location:
parameterType: STRING
project:
parameterType: STRING
outputDefinitions:
parameters:
dataset_id:
parameterType: STRING
project_id:
parameterType: STRING
comp-bigquery-delete-dataset-with-prefix:
executorLabel: exec-bigquery-delete-dataset-with-prefix
inputDefinitions:
parameters:
dataset_prefix:
parameterType: STRING
delete_contents:
defaultValue: false
isOptional: true
parameterType: BOOLEAN
project:
parameterType: STRING
comp-bigquery-query-job:
executorLabel: exec-bigquery-query-job
inputDefinitions:
parameters:
encryption_spec_key_name:
defaultValue: ''
description: 'Describes the Cloud
KMS encryption key that will be used to protect destination
BigQuery table. The BigQuery Service Account associated with your
project requires access to this encryption key. If
encryption_spec_key_name are both specified in here and in
job_configuration_query, the value in here will override the other
one.'
isOptional: true
parameterType: STRING
job_configuration_query:
defaultValue: {}
description: 'A json formatted string
describing the rest of the job configuration. For more details, see
https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationQuery'
isOptional: true
parameterType: STRUCT
labels:
defaultValue: {}
description: 'The labels associated with this job. You can
use these to organize and group your jobs. Label keys and values can
be no longer than 63 characters, can only containlowercase letters,
numeric characters, underscores and dashes. International characters
are allowed. Label values are optional. Label keys must start with a
letter and each label in the list must have a different key.
Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.'
isOptional: true
parameterType: STRUCT
location:
defaultValue: us-central1
description: 'Location for creating the BigQuery job. If not
set, default to `US` multi-region. For more details, see
https://cloud.google.com/bigquery/docs/locations#specifying_your_location'
isOptional: true
parameterType: STRING
project:
defaultValue: '{{$.pipeline_google_cloud_project_id}}'
description: Project to run the BigQuery query job. Defaults to the project
in which the PipelineJob is run.
isOptional: true
parameterType: STRING
query:
defaultValue: ''
description: 'SQL query text to execute. Only standard SQL is
supported. If query are both specified in here and in
job_configuration_query, the value in here will override the other
one.'
isOptional: true
parameterType: STRING
query_parameters:
defaultValue: []
description: 'jobs.query parameters for
standard SQL queries. If query_parameters are both specified in here
and in job_configuration_query, the value in here will override the
other one.'
isOptional: true
parameterType: LIST
outputDefinitions:
artifacts:
destination_table:
artifactType:
schemaTitle: google.BQTable
schemaVersion: 0.0.1
description: 'Describes the table where the query results should be stored.
This property must be set for large results that exceed the maximum
response size.
For queries that produce anonymous (cached) results, this field will
be populated by BigQuery.'
parameters:
gcp_resources:
description: 'Serialized gcp_resources proto tracking the BigQuery job.
For more details, see
https://github.com/kubeflow/pipelines/blob/master/components/google-cloud/google_cloud_pipeline_components/proto/README.md.'
parameterType: STRING
comp-bigquery-query-job-2:
executorLabel: exec-bigquery-query-job-2
inputDefinitions:
parameters:
encryption_spec_key_name:
defaultValue: ''
description: 'Describes the Cloud
KMS encryption key that will be used to protect destination
BigQuery table. The BigQuery Service Account associated with your
project requires access to this encryption key. If
encryption_spec_key_name are both specified in here and in
job_configuration_query, the value in here will override the other
one.'
isOptional: true
parameterType: STRING
job_configuration_query:
defaultValue: {}
description: 'A json formatted string
describing the rest of the job configuration. For more details, see
https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationQuery'
isOptional: true
parameterType: STRUCT
labels:
defaultValue: {}
description: 'The labels associated with this job. You can
use these to organize and group your jobs. Label keys and values can
be no longer than 63 characters, can only containlowercase letters,
numeric characters, underscores and dashes. International characters
are allowed. Label values are optional. Label keys must start with a
letter and each label in the list must have a different key.
Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.'
isOptional: true
parameterType: STRUCT
location:
defaultValue: us-central1
description: 'Location for creating the BigQuery job. If not
set, default to `US` multi-region. For more details, see
https://cloud.google.com/bigquery/docs/locations#specifying_your_location'
isOptional: true
parameterType: STRING
project:
defaultValue: '{{$.pipeline_google_cloud_project_id}}'
description: Project to run the BigQuery query job. Defaults to the project
in which the PipelineJob is run.
isOptional: true
parameterType: STRING
query:
defaultValue: ''
description: 'SQL query text to execute. Only standard SQL is
supported. If query are both specified in here and in
job_configuration_query, the value in here will override the other
one.'
isOptional: true
parameterType: STRING
query_parameters:
defaultValue: []
description: 'jobs.query parameters for
standard SQL queries. If query_parameters are both specified in here
and in job_configuration_query, the value in here will override the
other one.'
isOptional: true
parameterType: LIST
outputDefinitions:
artifacts:
destination_table:
artifactType:
schemaTitle: google.BQTable
schemaVersion: 0.0.1
description: 'Describes the table where the query results should be stored.
This property must be set for large results that exceed the maximum
response size.
For queries that produce anonymous (cached) results, this field will
be populated by BigQuery.'
parameters:
gcp_resources:
description: 'Serialized gcp_resources proto tracking the BigQuery job.
For more details, see
https://github.com/kubeflow/pipelines/blob/master/components/google-cloud/google_cloud_pipeline_components/proto/README.md.'
parameterType: STRING
comp-build-job-configuration-query:
executorLabel: exec-build-job-configuration-query
inputDefinitions:
parameters:
dataset_id:
defaultValue: ''
isOptional: true
parameterType: STRING
priority:
defaultValue: INTERACTIVE
isOptional: true
parameterType: STRING
project_id:
defaultValue: ''
isOptional: true
parameterType: STRING
table_id:
defaultValue: ''
isOptional: true
parameterType: STRING
write_disposition:
defaultValue: ''
isOptional: true
parameterType: STRING
outputDefinitions:
parameters:
Output:
parameterType: STRUCT
comp-build-job-configuration-query-2:
executorLabel: exec-build-job-configuration-query-2
inputDefinitions:
parameters:
dataset_id:
defaultValue: ''
isOptional: true
parameterType: STRING
priority:
defaultValue: INTERACTIVE
isOptional: true
parameterType: STRING
project_id:
defaultValue: ''
isOptional: true
parameterType: STRING
table_id:
defaultValue: ''
isOptional: true
parameterType: STRING
write_disposition:
defaultValue: ''
isOptional: true
parameterType: STRING
outputDefinitions:
parameters:
Output:
parameterType: STRUCT
comp-exit-handler-1:
dag:
tasks:
bigquery-create-dataset:
cachingOptions: {}
componentRef:
name: comp-bigquery-create-dataset
dependentTasks:
- get-table-location
- validate-inputs
inputs:
parameters:
dataset:
runtimeValue:
constant: tmp_{{$.pipeline_job_uuid}}
location:
taskOutputParameter:
outputParameterKey: Output
producerTask: get-table-location
project:
componentInputParameter: pipelinechannel--project
taskInfo:
name: create-tmp-dataset
bigquery-query-job:
cachingOptions:
enableCache: true
componentRef:
name: comp-bigquery-query-job
dependentTasks:
- build-job-configuration-query
- get-first-valid
- get-table-location
inputs:
parameters:
encryption_spec_key_name:
componentInputParameter: pipelinechannel--encryption_spec_key_name
job_configuration_query:
taskOutputParameter:
outputParameterKey: Output
producerTask: build-job-configuration-query
location:
taskOutputParameter:
outputParameterKey: Output
producerTask: get-table-location
pipelinechannel--get-first-valid-Output:
taskOutputParameter:
outputParameterKey: Output
producerTask: get-first-valid
pipelinechannel--time_column:
componentInputParameter: pipelinechannel--time_column
pipelinechannel--time_series_identifier_column:
componentInputParameter: pipelinechannel--time_series_identifier_column
project:
componentInputParameter: pipelinechannel--project
query:
runtimeValue:
constant: "\n WITH\n base_data AS (\n SELECT\
\ * FROM `{{$.inputs.parameters['pipelinechannel--get-first-valid-Output']}}`\n\
\ )\n SELECT\n CAST({{$.inputs.parameters['pipelinechannel--time_series_identifier_column']}}\
\ AS STRING) AS {{$.inputs.parameters['pipelinechannel--time_series_identifier_column']}},\n\
\ ARRAY_AGG(TIMESTAMP({{$.inputs.parameters['pipelinechannel--time_column']}})\
\ ORDER BY {{$.inputs.parameters['pipelinechannel--time_column']}})\
\ AS {{$.inputs.parameters['pipelinechannel--time_column']}},\n\
\ \n \n \n FROM base_data\n GROUP\
\ BY {{$.inputs.parameters['pipelinechannel--time_series_identifier_column']}}\n\
\ "
taskInfo:
name: remove-feature-columns
bigquery-query-job-2:
cachingOptions:
enableCache: true
componentRef:
name: comp-bigquery-query-job-2
dependentTasks:
- build-job-configuration-query-2
- get-table-location-2
- table-to-uri-2
inputs:
parameters:
encryption_spec_key_name:
componentInputParameter: pipelinechannel--encryption_spec_key_name
job_configuration_query:
taskOutputParameter:
outputParameterKey: Output
producerTask: build-job-configuration-query-2
location:
taskOutputParameter:
outputParameterKey: Output
producerTask: get-table-location-2
pipelinechannel--table-to-uri-2-uri:
taskOutputParameter:
outputParameterKey: uri
producerTask: table-to-uri-2
pipelinechannel--target_column:
componentInputParameter: pipelinechannel--target_column
pipelinechannel--time_column:
componentInputParameter: pipelinechannel--time_column
pipelinechannel--time_series_identifier_column:
componentInputParameter: pipelinechannel--time_series_identifier_column
project:
componentInputParameter: pipelinechannel--project
query:
runtimeValue:
constant: "\n WITH\n predictions AS (\n SELECT\n\
\ {{$.inputs.parameters['pipelinechannel--time_series_identifier_column']}},\n\
\ JSON_QUERY_ARRAY(prediction, '$.{{$.inputs.parameters['pipelinechannel--time_column']}}')\
\ AS {{$.inputs.parameters['pipelinechannel--time_column']}},\n\
\ JSON_EXTRACT(\n prediction,\n \
\ '$.predicted_on_{{$.inputs.parameters['pipelinechannel--time_column']}}'\n\
\ ) AS predicted_on_{{$.inputs.parameters['pipelinechannel--time_column']}},\n\
\ JSON_QUERY_ARRAY(\n prediction,\n \
\ '$.predicted_{{$.inputs.parameters['pipelinechannel--target_column']}}'\n\
\ ) AS predicted_{{$.inputs.parameters['pipelinechannel--target_column']}},\n\
\ FROM `{{$.inputs.parameters['pipelinechannel--table-to-uri-2-uri']}}`\n\
\ )\n SELECT\n {{$.inputs.parameters['pipelinechannel--time_series_identifier_column']}},\n\
\ PARSE_TIMESTAMP(\n '\\\"%Y-%m-%dT%H:%M:%SZ\\\
\"',\n predicted_on_{{$.inputs.parameters['pipelinechannel--time_column']}}\n\
\ ) AS predicted_on_{{$.inputs.parameters['pipelinechannel--time_column']}},\n\
\ PARSE_TIMESTAMP(\n '\\\"%Y-%m-%dT%H:%M:%SZ\\\
\"',\n {{$.inputs.parameters['pipelinechannel--time_column']}}[SAFE_OFFSET(index)]\n\
\ ) AS {{$.inputs.parameters['pipelinechannel--time_column']}},\n\
\ STRUCT(\n CAST(predicted_{{$.inputs.parameters['pipelinechannel--target_column']}}[SAFE_OFFSET(index)]\
\ AS FLOAT64)\n AS value\n ) AS predicted_{{$.inputs.parameters['pipelinechannel--target_column']}}\n\
\ FROM predictions\n CROSS JOIN\n UNNEST(GENERATE_ARRAY(0,\
\ ARRAY_LENGTH({{$.inputs.parameters['pipelinechannel--time_column']}})\
\ - 1)) AS index\n "
taskInfo:
name: create-predictions-table
build-job-configuration-query:
cachingOptions:
enableCache: true
componentRef:
name: comp-build-job-configuration-query
dependentTasks:
- bigquery-create-dataset
inputs:
parameters:
dataset_id:
runtimeValue:
constant: '{{$.inputs.parameters[''pipelinechannel--bigquery-create-dataset-dataset_id'']}}'
pipelinechannel--bigquery-create-dataset-dataset_id:
taskOutputParameter:
outputParameterKey: dataset_id
producerTask: bigquery-create-dataset
pipelinechannel--bigquery-create-dataset-project_id:
taskOutputParameter:
outputParameterKey: project_id
producerTask: bigquery-create-dataset
project_id:
runtimeValue:
constant: '{{$.inputs.parameters[''pipelinechannel--bigquery-create-dataset-project_id'']}}'
table_id:
runtimeValue:
constant: data
write_disposition:
runtimeValue:
constant: WRITE_EMPTY
taskInfo:
name: build-job-configuration-query
build-job-configuration-query-2:
cachingOptions:
enableCache: true
componentRef:
name: comp-build-job-configuration-query-2
dependentTasks:
- table-to-uri-2
inputs:
parameters:
dataset_id:
runtimeValue:
constant: '{{$.inputs.parameters[''pipelinechannel--table-to-uri-2-dataset_id'']}}'
pipelinechannel--table-to-uri-2-dataset_id:
taskOutputParameter:
outputParameterKey: dataset_id
producerTask: table-to-uri-2
pipelinechannel--table-to-uri-2-project_id:
taskOutputParameter:
outputParameterKey: project_id
producerTask: table-to-uri-2
pipelinechannel--table-to-uri-2-table_id:
taskOutputParameter:
outputParameterKey: table_id
producerTask: table-to-uri-2
project_id:
runtimeValue:
constant: '{{$.inputs.parameters[''pipelinechannel--table-to-uri-2-project_id'']}}'
table_id:
runtimeValue:
constant: '{{$.inputs.parameters[''pipelinechannel--table-to-uri-2-table_id'']}}'
write_disposition:
runtimeValue:
constant: WRITE_TRUNCATE
taskInfo:
name: build-job-configuration-query-2
get-first-valid:
cachingOptions:
enableCache: true
componentRef:
name: comp-get-first-valid
dependentTasks:
- load-table-from-uri
inputs:
parameters:
pipelinechannel--data_source_bigquery_table_path:
componentInputParameter: pipelinechannel--data_source_bigquery_table_path
pipelinechannel--load-table-from-uri-Output:
taskOutputParameter:
outputParameterKey: Output
producerTask: load-table-from-uri
values:
runtimeValue:
constant: '["{{$.inputs.parameters[''pipelinechannel--data_source_bigquery_table_path'']}}",
"{{$.inputs.parameters[''pipelinechannel--load-table-from-uri-Output'']}}"]'
taskInfo:
name: get-first-valid
get-table-location:
cachingOptions:
enableCache: true
componentRef:
name: comp-get-table-location
inputs:
parameters:
default_location:
componentInputParameter: pipelinechannel--location
project:
componentInputParameter: pipelinechannel--project
table:
componentInputParameter: pipelinechannel--data_source_bigquery_table_path
taskInfo:
name: get-table-location
get-table-location-2:
cachingOptions:
enableCache: true
componentRef:
name: comp-get-table-location-2
dependentTasks:
- table-to-uri-2
inputs:
parameters:
project:
componentInputParameter: pipelinechannel--project
table:
taskOutputParameter:
outputParameterKey: uri
producerTask: table-to-uri-2
taskInfo:
name: get-table-location-2
load-table-from-uri:
cachingOptions:
enableCache: true
componentRef:
name: comp-load-table-from-uri
dependentTasks:
- bigquery-create-dataset
- get-table-location
inputs:
parameters:
destination:
runtimeValue:
constant: '{{$.inputs.parameters[''pipelinechannel--bigquery-create-dataset-project_id'']}}.{{$.inputs.parameters[''pipelinechannel--bigquery-create-dataset-dataset_id'']}}.csv_export'
location:
taskOutputParameter:
outputParameterKey: Output
producerTask: get-table-location
pipelinechannel--bigquery-create-dataset-dataset_id:
taskOutputParameter:
outputParameterKey: dataset_id
producerTask: bigquery-create-dataset
pipelinechannel--bigquery-create-dataset-project_id:
taskOutputParameter:
outputParameterKey: project_id
producerTask: bigquery-create-dataset
project:
componentInputParameter: pipelinechannel--project
source_format:
runtimeValue:
constant: CSV
source_uris:
componentInputParameter: pipelinechannel--data_source_csv_filenames
taskInfo:
name: load-table-from-uri
make-vertex-model-artifact:
cachingOptions:
enableCache: true
componentRef:
name: comp-make-vertex-model-artifact
inputs:
parameters:
location:
componentInputParameter: pipelinechannel--location
model_resource_name:
componentInputParameter: pipelinechannel--model_name
taskInfo:
name: make-vertex-model-artifact
maybe-replace-with-default:
cachingOptions:
enableCache: true
componentRef:
name: comp-maybe-replace-with-default
inputs:
parameters:
default:
componentInputParameter: pipelinechannel--project
value:
componentInputParameter: pipelinechannel--bigquery_destination_uri
taskInfo:
name: maybe-replace-with-default
model-batch-predict:
cachingOptions:
enableCache: true
componentRef:
name: comp-model-batch-predict
dependentTasks:
- make-vertex-model-artifact
- maybe-replace-with-default
- table-to-uri
inputs:
artifacts:
model:
taskOutputArtifact:
outputArtifactKey: vertex_model
producerTask: make-vertex-model-artifact
parameters:
bigquery_destination_output_uri:
runtimeValue:
constant: bq://{{$.inputs.parameters['pipelinechannel--maybe-replace-with-default-Output']}}
bigquery_source_input_uri:
runtimeValue:
constant: bq://{{$.inputs.parameters['pipelinechannel--table-to-uri-uri']}}
instances_format:
runtimeValue:
constant: bigquery
job_display_name:
runtimeValue:
constant: batch-predict-{{$.pipeline_job_uuid}}-{{$.pipeline_task_uuid}}
location:
componentInputParameter: pipelinechannel--location
machine_type:
componentInputParameter: pipelinechannel--machine_type
max_replica_count:
componentInputParameter: pipelinechannel--max_num_workers
pipelinechannel--maybe-replace-with-default-Output:
taskOutputParameter:
outputParameterKey: Output
producerTask: maybe-replace-with-default
pipelinechannel--table-to-uri-uri:
taskOutputParameter:
outputParameterKey: uri
producerTask: table-to-uri
predictions_format:
runtimeValue:
constant: bigquery
project:
componentInputParameter: pipelinechannel--project
taskInfo:
name: model-batch-predict
table-to-uri:
cachingOptions:
enableCache: true
componentRef:
name: comp-table-to-uri
dependentTasks:
- bigquery-query-job
inputs:
artifacts:
table:
taskOutputArtifact:
outputArtifactKey: destination_table
producerTask: bigquery-query-job
taskInfo:
name: table-to-uri
table-to-uri-2:
cachingOptions:
enableCache: true
componentRef:
name: comp-table-to-uri-2
dependentTasks:
- model-batch-predict
inputs:
artifacts:
table:
taskOutputArtifact:
outputArtifactKey: bigquery_output_table
producerTask: model-batch-predict
taskInfo:
name: table-to-uri-2
validate-inputs:
cachingOptions:
enableCache: true
componentRef:
name: comp-validate-inputs
inputs:
parameters:
bigquery_destination_uri:
componentInputParameter: pipelinechannel--bigquery_destination_uri
data_source_bigquery_table_path:
componentInputParameter: pipelinechannel--data_source_bigquery_table_path
data_source_csv_filenames:
componentInputParameter: pipelinechannel--data_source_csv_filenames
taskInfo:
name: validate-inputs
inputDefinitions:
parameters:
pipelinechannel--bigquery_destination_uri:
parameterType: STRING
pipelinechannel--data_source_bigquery_table_path:
parameterType: STRING
pipelinechannel--data_source_csv_filenames:
parameterType: STRING
pipelinechannel--encryption_spec_key_name:
parameterType: STRING
pipelinechannel--location:
parameterType: STRING
pipelinechannel--machine_type:
parameterType: STRING
pipelinechannel--max_num_workers:
parameterType: NUMBER_INTEGER
pipelinechannel--model_name:
parameterType: STRING
pipelinechannel--project:
parameterType: STRING
pipelinechannel--target_column:
parameterType: STRING
pipelinechannel--time_column:
parameterType: STRING
pipelinechannel--time_series_identifier_column:
parameterType: STRING
comp-get-first-valid:
executorLabel: exec-get-first-valid
inputDefinitions:
parameters:
values:
parameterType: STRING
outputDefinitions:
parameters:
Output:
parameterType: STRING
comp-get-table-location:
executorLabel: exec-get-table-location
inputDefinitions:
parameters:
default_location:
defaultValue: ''
description: Location to return if no table was given.
isOptional: true
parameterType: STRING
project:
description: The GCP project.
parameterType: STRING
table:
description: The BigQuery table to get a location for.
parameterType: STRING
outputDefinitions:
parameters:
Output:
parameterType: STRING
comp-get-table-location-2:
executorLabel: exec-get-table-location-2
inputDefinitions:
parameters:
default_location:
defaultValue: ''
description: Location to return if no table was given.
isOptional: true
parameterType: STRING
project:
description: The GCP project.
parameterType: STRING
table:
description: The BigQuery table to get a location for.
parameterType: STRING
outputDefinitions:
parameters:
Output:
parameterType: STRING
comp-load-table-from-uri:
executorLabel: exec-load-table-from-uri
inputDefinitions:
parameters:
destination:
description: Table into which data is to be loaded.
parameterType: STRING
location:
description: The GCP region.
parameterType: STRING
project:
description: The GCP project.
parameterType: STRING
source_format:
defaultValue: CSV
description: 'The file format for the files being imported. Only CSV is
supported.'
isOptional: true
parameterType: STRING
source_uris:
description: 'URIs of data files to be loaded; in format
gs://<bucket_name>/<object_name_or_glob>.'
parameterType: STRING
outputDefinitions:
parameters:
Output:
parameterType: STRING
comp-make-vertex-model-artifact:
executorLabel: exec-make-vertex-model-artifact
inputDefinitions:
parameters:
location:
parameterType: STRING
model_resource_name:
parameterType: STRING
outputDefinitions:
artifacts:
vertex_model:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
comp-maybe-replace-with-default:
executorLabel: exec-maybe-replace-with-default
inputDefinitions:
parameters:
default:
defaultValue: ''
isOptional: true
parameterType: STRING
value:
parameterType: STRING
outputDefinitions:
parameters:
Output:
parameterType: STRING
comp-model-batch-predict:
executorLabel: exec-model-batch-predict
inputDefinitions:
artifacts:
model:
artifactType:
schemaTitle: google.VertexModel
schemaVersion: 0.0.1
description: 'The Model used to get predictions via this job. Must share
the same
ancestor Location. Starting this job has no impact on any existing
deployments of the Model and their resources. Either this or
`unmanaged_container_model` must be specified.'
isOptional: true
unmanaged_container_model:
artifactType:
schemaTitle: google.UnmanagedContainerModel
schemaVersion: 0.0.1
description: 'The unmanaged container model used to get predictions via
this job.
This should be used for models that are not uploaded to Vertex. Either
this or model must be specified.'
isOptional: true
parameters:
accelerator_count:
defaultValue: 0.0
description: 'The number of accelerators to attach
to the `machine_type`. Only used if `machine_type` is set. For more
details about the machine spec, see
https://cloud.google.com/vertex-ai/docs/reference/rest/v1/MachineSpec'
isOptional: true
parameterType: NUMBER_INTEGER
accelerator_type:
defaultValue: ''
description: 'The type of accelerator(s) that may be
attached to the machine as per `accelerator_count`. Only used if
`machine_type` is set. For more details about the machine spec, see
https://cloud.google.com/vertex-ai/docs/reference/rest/v1/MachineSpec'
isOptional: true
parameterType: STRING
bigquery_destination_output_uri:
defaultValue: ''
description: 'The BigQuery project location where the output is to be written
to. In
the given project a new dataset is created with name
`prediction_<model-display-name>_<job-create-time>` where is made
BigQuery-dataset-name compatible (for example, most special characters
become underscores), and timestamp is in YYYY_MM_DDThh_mm_ss_sssZ
"based on ISO-8601" format. In the dataset two tables will be created,
`predictions`, and `errors`. If the Model has both `instance`
and `prediction` schemata defined then the tables have columns as
follows: The `predictions` table contains instances for which the
prediction succeeded, it has columns as per a concatenation of the
Model''s instance and prediction schemata. The `errors` table
contains rows for which the prediction has failed, it has instance
columns, as per the instance schema, followed by a single "errors"
column, which as values has [google.rpc.Status](Status)
represented as a STRUCT, and containing only `code` and
`message`. For more details about this output config, see
https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#OutputConfig.'
isOptional: true
parameterType: STRING
bigquery_source_input_uri:
defaultValue: ''
description: 'BigQuery URI to a table, up to 2000 characters long. For example:
`projectId.bqDatasetId.bqTableId` For more details about this input
config, see
https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#InputConfig.'
isOptional: true
parameterType: STRING
encryption_spec_key_name:
defaultValue: ''
description: 'Customer-managed encryption
key options for a BatchPredictionJob. If this is set, then all
resources created by the BatchPredictionJob will be encrypted with the
provided encryption key. Has the form:
`projects/my-project/locations/my-location/keyRings/my-kr/cryptoKeys/my-key`.
The key needs to be in the same region as where the compute resource
is created.'
isOptional: true
parameterType: STRING
excluded_fields:
defaultValue: []
description: 'Fields that will be excluded in the prediction instance that
is
sent to the Model.
Excluded will be attached to the batch prediction output if
key_field is not specified.
When `excluded_fields` is populated, `included_fields` must be empty.
The input must be JSONL with objects at each line, CSV, BigQuery
or TfRecord.
may be specified via the Model''s `parameters_schema_uri`.'
isOptional: true
parameterType: LIST
explanation_metadata:
defaultValue: {}
description: 'Explanation metadata
configuration for this BatchPredictionJob. Can be specified only if
`generate_explanation` is set to `True`. This value overrides the
value of `Model.explanation_metadata`. All fields of
`explanation_metadata` are optional in the request. If a field of the
`explanation_metadata` object is not populated, the corresponding
field of the `Model.explanation_metadata` object is inherited. For
more details, see
https://cloud.google.com/vertex-ai/docs/reference/rest/v1/ExplanationSpec#explanationmetadata.'
isOptional: true
parameterType: STRUCT
explanation_parameters:
defaultValue: {}
description: 'Parameters to configure
explaining for Model''s predictions. Can be specified only if
`generate_explanation` is set to `True`. This value overrides the
value of `Model.explanation_parameters`. All fields of
`explanation_parameters` are optional in the request. If a field of
the `explanation_parameters` object is not populated, the
corresponding field of the `Model.explanation_parameters` object is
inherited. For more details, see
https://cloud.google.com/vertex-ai/docs/reference/rest/v1/ExplanationSpec#ExplanationParameters.'
isOptional: true
parameterType: STRUCT
gcs_destination_output_uri_prefix:
defaultValue: ''
description: 'The Google Cloud
Storage location of the directory where the output is to be written
to. In the given directory a new directory is created. Its name is
`prediction-<model-display-name>-<job-create-time>`, where timestamp
is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format. Inside of it files
`predictions_0001.<extension>`, `predictions_0002.<extension>`,
..., `predictions_N.<extension>` are created where `<extension>`
depends on chosen `predictions_format`, and N may equal 0001 and
depends on the total number of successfully predicted instances. If
the Model has both `instance` and `prediction` schemata defined
then each such file contains predictions as per the
`predictions_format`. If prediction for any instance failed
(partially or completely), then an additional
`errors_0001.<extension>`, `errors_0002.<extension>`,...,
`errors_N.<extension>` files are created (N depends on total number
of failed predictions). These files contain the failed instances, as
per their schema, followed by an additional `error` field which as
value has `google.rpc.Status` containing only `code` and
`message` fields. For more details about this output config, see
https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#OutputConfig.'
isOptional: true
parameterType: STRING
gcs_source_uris:
defaultValue: []
description: 'Google Cloud Storage URI(-s) to your instances to run batch
prediction
on. They must match `instances_format`. May contain wildcards. For more
information on wildcards, see [WildcardNames](https://cloud.google.com/storage/docs/gsutil/addlhelp/WildcardNames).
For more details about this input config, see [InputConfig](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#InputConfig).'
isOptional: true
parameterType: LIST
generate_explanation:
defaultValue: false
description: 'Generate explanation along with
the batch prediction results. This will cause the batch prediction
output to include explanations based on the `prediction_format`: -
`bigquery`: output includes a column named `explanation`. The value is
a struct that conforms to the [aiplatform.gapic.Explanation] object. -
`jsonl`: The JSON objects on each line include an additional entry
keyed `explanation`. The value of the entry is a JSON object that
conforms to the [aiplatform.gapic.Explanation] object. - `csv`:
Generating explanations for CSV format is not supported. If this
field is set to true, either the Model.explanation_spec or
explanation_metadata and explanation_parameters must be populated.'
isOptional: true
parameterType: BOOLEAN
included_fields:
defaultValue: []
description: 'Fields that will be included in the prediction instance that
is
sent to the Model.
If `instance_type` is `array`, the order of field names in
`included_fields` also determines the order of the values in the array.
When `included_fields` is populated, `excluded_fields` must be empty.
The input must be JSONL with objects at each line, CSV, BigQuery
or TfRecord.'
isOptional: true
parameterType: LIST
instance_type:
defaultValue: ''
description: "The format of the instance that the Model\naccepts. Vertex\
\ AI will convert compatible\n[InstancesFormat](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#InputConfig)\n\
to the specified format. Supported values are:\n`object`: Each input is\
\ converted to JSON object format.\n * For `bigquery`, each row is converted\
\ to an object.\n * For `jsonl`, each line of the JSONL input must be\
\ an object.\n * Does not apply to `csv`, `file-list`, `tf-record`, or\
\ `tf-record-gzip`.\n`array`: Each input is converted to JSON array format.\n\
\ * For `bigquery`, each row is converted to an array. The order\n \
\ of columns is determined by the BigQuery column order, unless\n \
\ [included_fields](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#InputConfig)\
\ is populated.\n `included_fields` must be populated for specifying\
\ field orders.\n * For `jsonl`, if each line of the JSONL input is an\
\ object,\n `included_fields` must be populated for specifying field\
\ orders.\n * Does not apply to `csv`, `file-list`, `tf-record`, or\n\
\ `tf-record-gzip`.\nIf not specified, Vertex AI converts the batch\
\ prediction input as\nfollows:\n * For `bigquery` and `csv`, the behavior\
\ is the same as `array`. The\n order of columns is the same as defined\
\ in the file or table, unless\n included_fields is populated.\n * For\
\ `jsonl`, the prediction instance format is determined by\n each line\
\ of the input.\n * For `tf-record`/`tf-record-gzip`, each record will\
\ be converted to\n an object in the format of `{\"b64\": <value>}`,\
\ where `<value>` is\n the Base64-encoded string of the content of the\
\ record.\n * For `file-list`, each file in the list will be converted\
\ to an\n object in the format of `{\"b64\": <value>}`, where `<value>`\
\ is\n the Base64-encoded string of the content of the file."
isOptional: true
parameterType: STRING
instances_format:
defaultValue: jsonl
description: 'The format in which instances are
given, must be one of the [Model](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.models)''s
supportedInputStorageFormats.
For more details about this input config, see
[InputConfig](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#InputConfig.)'
isOptional: true
parameterType: STRING
job_display_name:
description: The user-defined name of this BatchPredictionJob.
parameterType: STRING
key_field:
defaultValue: ''
description: "The name of the field that is considered as a key.\nThe values\
\ identified by the key field is not included in the\ntransformed instances\
\ that is sent to the Model. This is similar to\nspecifying this name\
\ of the field in [excluded_fields](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#InputConfig).\
\ In addition,\nthe batch prediction output will not include the instances.\
\ Instead the\noutput will only include the value of the key field, in\
\ a field named\n`key` in the output:\n * For `jsonl` output format, the\
\ output will have a `key` field\n instead of the `instance` field.\n\
\ * For `csv`/`bigquery` output format, the output will have have a `key`\n\
\ column instead of the instance feature columns.\nThe input must be\
\ JSONL with objects at each line, CSV, BigQuery\nor TfRecord."
isOptional: true
parameterType: STRING
labels:
defaultValue: {}
description: 'The labels with user-defined metadata to
organize your BatchPredictionJobs. Label keys and values can be no
longer than 64 characters (Unicode codepoints), can only contain
lowercase letters, numeric characters, underscores and dashes.
International characters are allowed. See https://goo.gl/xmQnxf for
more information and examples of labels.'
isOptional: true
parameterType: STRUCT
location:
defaultValue: us-central1
description: Location for creating the BatchPredictionJob.
isOptional: true
parameterType: STRING
machine_type:
defaultValue: ''
description: 'The type of machine for running batch
prediction on dedicated resources. If the Model supports
DEDICATED_RESOURCES this config may be provided (and the job will use
these resources). If the Model doesn''t support AUTOMATIC_RESOURCES,
this config must be provided. For more details about the
BatchDedicatedResources, see
https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#BatchDedicatedResources.
For more details about the machine spec, see
https://cloud.google.com/vertex-ai/docs/reference/rest/v1/MachineSpec'
isOptional: true
parameterType: STRING
manual_batch_tuning_parameters_batch_size:
defaultValue: 0.0
description: 'The number of
the records (e.g. instances) of the operation given in each batch to a
machine replica. Machine type, and size of a single record should be
considered when setting this parameter, higher value speeds up the
batch operation''s execution, but too high value will result in a whole
batch not fitting in a machine''s memory, and the whole operation will
fail.'
isOptional: true
parameterType: NUMBER_INTEGER
max_replica_count:
defaultValue: 0.0
description: 'The maximum number of machine replicas the batch operation
may be scaled
to. Only used if `machine_type` is set.'
isOptional: true
parameterType: NUMBER_INTEGER
model_parameters:
defaultValue: {}
description: The parameters that govern the predictions. The schema of the
parameters
isOptional: true
parameterType: STRUCT
predictions_format:
defaultValue: jsonl
description: 'The format in which Vertex AI gives the predictions. Must
be one of the
Model''s supportedOutputStorageFormats.
For more details about this output config, see [OutputConfig](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#OutputConfig).'
isOptional: true
parameterType: STRING
project:
defaultValue: '{{$.pipeline_google_cloud_project_id}}'
description: Project to create the BatchPredictionJob. Defaults to the project
in which the PipelineJob is run.
isOptional: true
parameterType: STRING
starting_replica_count:
defaultValue: 0.0
description: 'The number of machine replicas
used at the start of the batch operation. If not set, Vertex AI
decides starting number, not greater than `max_replica_count`. Only
used if `machine_type` is set.'
isOptional: true
parameterType: NUMBER_INTEGER
outputDefinitions:
artifacts:
batchpredictionjob:
artifactType:
schemaTitle: google.VertexBatchPredictionJob
schemaVersion: 0.0.1
description: '[**Deprecated. Use gcs_output_directory and bigquery_output_table
instead.**] Artifact
representation of the created batch prediction job.'
bigquery_output_table:
artifactType:
schemaTitle: google.BQTable
schemaVersion: 0.0.1
description: 'Artifact tracking the batch prediction job output. This is
only
available if
bigquery_output_table is specified.'
gcs_output_directory:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
description: 'Artifact tracking the batch prediction job output. This is
only
available if
gcs_destination_output_uri_prefix is specified.'
parameters:
gcp_resources:
description: 'Serialized gcp_resources proto tracking the batch prediction
job.
For more details, see
https://github.com/kubeflow/pipelines/blob/master/components/google-cloud/google_cloud_pipeline_components/proto/README.md.'
parameterType: STRING
comp-table-to-uri:
executorLabel: exec-table-to-uri
inputDefinitions:
artifacts:
table:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
parameters:
use_bq_prefix:
defaultValue: false
isOptional: true
parameterType: BOOLEAN
outputDefinitions:
parameters:
dataset_id:
parameterType: STRING
project_id:
parameterType: STRING
table_id:
parameterType: STRING
uri:
parameterType: STRING
comp-table-to-uri-2:
executorLabel: exec-table-to-uri-2
inputDefinitions:
artifacts:
table:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
parameters:
use_bq_prefix:
defaultValue: false
isOptional: true
parameterType: BOOLEAN
outputDefinitions:
parameters:
dataset_id:
parameterType: STRING
project_id:
parameterType: STRING
table_id:
parameterType: STRING
uri:
parameterType: STRING
comp-validate-inputs:
executorLabel: exec-validate-inputs
inputDefinitions:
parameters:
bigquery_destination_uri:
isOptional: true
parameterType: STRING
data_granularity_unit:
isOptional: true
parameterType: STRING
data_source_bigquery_table_path:
isOptional: true
parameterType: STRING
data_source_csv_filenames:
isOptional: true
parameterType: STRING
optimization_objective:
isOptional: true
parameterType: STRING
predefined_split_key:
isOptional: true
parameterType: STRING
source_model_uri:
isOptional: true
parameterType: STRING
target_column:
isOptional: true
parameterType: STRING
test_fraction:
isOptional: true
parameterType: NUMBER_DOUBLE
time_column:
isOptional: true
parameterType: STRING
time_series_identifier_column:
isOptional: true
parameterType: STRING
timestamp_split_key:
isOptional: true
parameterType: STRING
training_fraction:
isOptional: true
parameterType: NUMBER_DOUBLE
validation_fraction:
isOptional: true
parameterType: NUMBER_DOUBLE
window_column:
isOptional: true
parameterType: STRING
window_max_count:
isOptional: true
parameterType: NUMBER_INTEGER
window_stride_length:
isOptional: true
parameterType: NUMBER_INTEGER
deploymentSpec:
executors:
exec-bigquery-create-dataset:
container:
args:
- --executor_input
- '{{$}}'
- --function_to_execute
- bigquery_create_dataset
command:
- sh
- -c
- "\nif ! [ -x \"$(command -v pip)\" ]; then\n python3 -m ensurepip ||\
\ python3 -m ensurepip --user || apt-get install python3-pip\nfi\n\nPIP_DISABLE_PIP_VERSION_CHECK=1\
\ python3 -m pip install --quiet --no-warn-script-location 'kfp==2.0.0-rc.2'\
\ && \"$0\" \"$@\"\n"
- sh
- -ec
- 'program_path=$(mktemp -d)
printf "%s" "$0" > "$program_path/ephemeral_component.py"
python3 -m kfp.components.executor_main --component_module_path "$program_path/ephemeral_component.py" "$@"
'
- "\nimport kfp\nfrom kfp import dsl\nfrom kfp.dsl import *\nfrom typing import\
\ *\n\ndef bigquery_create_dataset(\n project: str,\n location: str,\n\
\ dataset: str,\n exists_ok: bool = False,\n) -> NamedTuple('Outputs',\
\ [('project_id', str), ('dataset_id', str)]):\n \"\"\"Creates a BigQuery\
\ dataset.\"\"\"\n # pylint: disable=g-import-not-at-top,import-outside-toplevel,redefined-outer-name,reimported\n\
\ import collections\n\n from google.cloud import bigquery\n # pylint:\
\ enable=g-import-not-at-top,import-outside-toplevel,redefined-outer-name,reimported\n\
\n client = bigquery.Client(project=project, location=location)\n ref\
\ = client.create_dataset(dataset=dataset, exists_ok=exists_ok)\n return\
\ collections.namedtuple('Outputs', ['project_id', 'dataset_id'])(\n \
\ ref.project, ref.dataset_id)\n\n"
image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20240808_0625
exec-bigquery-delete-dataset-with-prefix:
container:
args:
- --executor_input
- '{{$}}'
- --function_to_execute
- bigquery_delete_dataset_with_prefix
command:
- sh
- -c
- "\nif ! [ -x \"$(command -v pip)\" ]; then\n python3 -m ensurepip ||\
\ python3 -m ensurepip --user || apt-get install python3-pip\nfi\n\nPIP_DISABLE_PIP_VERSION_CHECK=1\
\ python3 -m pip install --quiet --no-warn-script-location 'kfp==2.0.0-rc.2'\
\ && \"$0\" \"$@\"\n"
- sh
- -ec
- 'program_path=$(mktemp -d)
printf "%s" "$0" > "$program_path/ephemeral_component.py"
python3 -m kfp.components.executor_main --component_module_path "$program_path/ephemeral_component.py" "$@"
'
- "\nimport kfp\nfrom kfp import dsl\nfrom kfp.dsl import *\nfrom typing import\
\ *\n\ndef bigquery_delete_dataset_with_prefix(\n project: str,\n \
\ dataset_prefix: str,\n delete_contents: bool = False,\n) -> None:\n\
\ \"\"\"Deletes all BigQuery datasets matching the given prefix.\"\"\"\n\
\ # pylint: disable=g-import-not-at-top,import-outside-toplevel,redefined-outer-name,reimported\n\
\ from google.cloud import bigquery\n # pylint: enable=g-import-not-at-top,import-outside-toplevel,redefined-outer-name,reimported\n\
\n client = bigquery.Client(project=project)\n for dataset in client.list_datasets(project=project):\n\
\ if dataset.dataset_id.startswith(dataset_prefix):\n client.delete_dataset(\n\
\ dataset=dataset.dataset_id,\n delete_contents=delete_contents)\n\
\n"
image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20240808_0625
exec-bigquery-query-job:
container:
args:
- --type
- BigqueryQueryJob
- --project
- '{{$.inputs.parameters[''project'']}}'
- --location
- '{{$.inputs.parameters[''location'']}}'
- --payload
- '{"Concat": ["{", "\"configuration\": {", "\"query\": ", "{{$.inputs.parameters[''job_configuration_query'']}}",
", \"labels\": ", "{{$.inputs.parameters[''labels'']}}", "}", "}"]}'
- --job_configuration_query_override
- '{"Concat": ["{", "\"query\": \"", "{{$.inputs.parameters[''query'']}}",
"\"", ", \"query_parameters\": ", "{{$.inputs.parameters[''query_parameters'']}}",
", \"destination_encryption_configuration\": {", "\"kmsKeyName\": \"", "{{$.inputs.parameters[''encryption_spec_key_name'']}}",
"\"}", "}"]}'
- --gcp_resources
- '{{$.outputs.parameters[''gcp_resources''].output_file}}'
- --executor_input
- '{{$}}'
command:
- python3
- -u
- -m
- google_cloud_pipeline_components.container.v1.bigquery.query_job.launcher
image: gcr.io/ml-pipeline/google-cloud-pipeline-components:2.3.1
exec-bigquery-query-job-2:
container:
args:
- --type
- BigqueryQueryJob
- --project
- '{{$.inputs.parameters[''project'']}}'
- --location
- '{{$.inputs.parameters[''location'']}}'
- --payload
- '{"Concat": ["{", "\"configuration\": {", "\"query\": ", "{{$.inputs.parameters[''job_configuration_query'']}}",
", \"labels\": ", "{{$.inputs.parameters[''labels'']}}", "}", "}"]}'
- --job_configuration_query_override
- '{"Concat": ["{", "\"query\": \"", "{{$.inputs.parameters[''query'']}}",
"\"", ", \"query_parameters\": ", "{{$.inputs.parameters[''query_parameters'']}}",
", \"destination_encryption_configuration\": {", "\"kmsKeyName\": \"", "{{$.inputs.parameters[''encryption_spec_key_name'']}}",
"\"}", "}"]}'
- --gcp_resources
- '{{$.outputs.parameters[''gcp_resources''].output_file}}'
- --executor_input
- '{{$}}'
command:
- python3
- -u
- -m
- google_cloud_pipeline_components.container.v1.bigquery.query_job.launcher
image: gcr.io/ml-pipeline/google-cloud-pipeline-components:2.3.1
exec-build-job-configuration-query:
container:
args:
- --executor_input
- '{{$}}'
- --function_to_execute
- build_job_configuration_query
command:
- sh
- -c
- "\nif ! [ -x \"$(command -v pip)\" ]; then\n python3 -m ensurepip ||\
\ python3 -m ensurepip --user || apt-get install python3-pip\nfi\n\nPIP_DISABLE_PIP_VERSION_CHECK=1\
\ python3 -m pip install --quiet --no-warn-script-location 'kfp==2.0.0-rc.2'\
\ && \"$0\" \"$@\"\n"
- sh
- -ec
- 'program_path=$(mktemp -d)
printf "%s" "$0" > "$program_path/ephemeral_component.py"
python3 -m kfp.components.executor_main --component_module_path "$program_path/ephemeral_component.py" "$@"
'
- "\nimport kfp\nfrom kfp import dsl\nfrom kfp.dsl import *\nfrom typing import\
\ *\n\ndef build_job_configuration_query(\n project_id: str = '',\n \
\ dataset_id: str = '',\n table_id: str = '',\n write_disposition:\
\ str = '',\n priority: str = 'INTERACTIVE',\n) -> dict: # pylint: disable=g-bare-generic\n\
\ \"\"\"Creates a JobConfigurationQuery object.\"\"\"\n config = {\n \
\ 'priority': priority,\n }\n if all([project_id, dataset_id, table_id]):\n\
\ config['destinationTable'] = {\n 'projectId': project_id,\n\
\ 'datasetId': dataset_id,\n 'tableId': table_id,\n }\n\
\ if write_disposition:\n config['write_disposition'] = write_disposition\n\
\ return config\n\n"
image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20240808_0625
exec-build-job-configuration-query-2:
container:
args:
- --executor_input
- '{{$}}'
- --function_to_execute
- build_job_configuration_query
command:
- sh
- -c
- "\nif ! [ -x \"$(command -v pip)\" ]; then\n python3 -m ensurepip ||\
\ python3 -m ensurepip --user || apt-get install python3-pip\nfi\n\nPIP_DISABLE_PIP_VERSION_CHECK=1\
\ python3 -m pip install --quiet --no-warn-script-location 'kfp==2.0.0-rc.2'\
\ && \"$0\" \"$@\"\n"
- sh
- -ec
- 'program_path=$(mktemp -d)
printf "%s" "$0" > "$program_path/ephemeral_component.py"
python3 -m kfp.components.executor_main --component_module_path "$program_path/ephemeral_component.py" "$@"
'
- "\nimport kfp\nfrom kfp import dsl\nfrom kfp.dsl import *\nfrom typing import\
\ *\n\ndef build_job_configuration_query(\n project_id: str = '',\n \
\ dataset_id: str = '',\n table_id: str = '',\n write_disposition:\
\ str = '',\n priority: str = 'INTERACTIVE',\n) -> dict: # pylint: disable=g-bare-generic\n\
\ \"\"\"Creates a JobConfigurationQuery object.\"\"\"\n config = {\n \
\ 'priority': priority,\n }\n if all([project_id, dataset_id, table_id]):\n\
\ config['destinationTable'] = {\n 'projectId': project_id,\n\
\ 'datasetId': dataset_id,\n 'tableId': table_id,\n }\n\
\ if write_disposition:\n config['write_disposition'] = write_disposition\n\
\ return config\n\n"
image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20240808_0625
exec-get-first-valid:
container:
args:
- --executor_input
- '{{$}}'
- --function_to_execute
- get_first_valid
command:
- sh
- -ec
- 'program_path=$(mktemp -d)
printf "%s" "$0" > "$program_path/ephemeral_component.py"
python3 -m kfp.components.executor_main --component_module_path "$program_path/ephemeral_component.py" "$@"
'
- "\nimport kfp\nfrom kfp import dsl\nfrom kfp.dsl import *\nfrom typing import\
\ *\n\ndef get_first_valid(values: str) -> str:\n \"\"\"Returns the first\
\ truthy value from the given serialized JSON list.\"\"\"\n # pylint: disable=g-import-not-at-top,import-outside-toplevel,redefined-outer-name,reimported\n\
\ import json\n # pylint: enable=g-import-not-at-top,import-outside-toplevel,redefined-outer-name,reimported\n\
\n for value in json.loads(values):\n if value:\n return value\n\
\ raise ValueError('No valid values.')\n\n"
image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20240808_0625
exec-get-table-location:
container:
args:
- --executor_input
- '{{$}}'
- --function_to_execute
- get_table_location
command:
- sh
- -c
- "\nif ! [ -x \"$(command -v pip)\" ]; then\n python3 -m ensurepip ||\
\ python3 -m ensurepip --user || apt-get install python3-pip\nfi\n\nPIP_DISABLE_PIP_VERSION_CHECK=1\
\ python3 -m pip install --quiet --no-warn-script-location 'kfp==2.0.0-rc.2'\
\ && \"$0\" \"$@\"\n"
- sh
- -ec
- 'program_path=$(mktemp -d)
printf "%s" "$0" > "$program_path/ephemeral_component.py"
python3 -m kfp.components.executor_main --component_module_path "$program_path/ephemeral_component.py" "$@"
'
- "\nimport kfp\nfrom kfp import dsl\nfrom kfp.dsl import *\nfrom typing import\
\ *\n\ndef get_table_location(\n project: str,\n table: Optional[str],\n\
\ default_location: str = '',\n) -> str:\n \"\"\"Returns the region\
\ the given table belongs to.\n\n Args:\n project: The GCP project.\n\
\ table: The BigQuery table to get a location for.\n default_location:\
\ Location to return if no table was given.\n\n Returns:\n A GCP region\
\ or multi-region.\n \"\"\"\n # pylint: disable=g-import-not-at-top,import-outside-toplevel,redefined-outer-name,reimported\n\
\ from google.cloud import bigquery\n # pylint: enable=g-import-not-at-top,import-outside-toplevel,redefined-outer-name,reimported\n\
\n if not table:\n return default_location\n\n client = bigquery.Client(project=project)\n\
\ if table.startswith('bq://'):\n table = table[len('bq://'):]\n elif\
\ table.startswith('bigquery://'):\n table = table[len('bigquery://'):]\n\
\ return client.get_table(table).location\n\n"
image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20240808_0625
exec-get-table-location-2:
container:
args:
- --executor_input
- '{{$}}'
- --function_to_execute
- get_table_location
command:
- sh
- -c
- "\nif ! [ -x \"$(command -v pip)\" ]; then\n python3 -m ensurepip ||\
\ python3 -m ensurepip --user || apt-get install python3-pip\nfi\n\nPIP_DISABLE_PIP_VERSION_CHECK=1\
\ python3 -m pip install --quiet --no-warn-script-location 'kfp==2.0.0-rc.2'\
\ && \"$0\" \"$@\"\n"
- sh
- -ec
- 'program_path=$(mktemp -d)
printf "%s" "$0" > "$program_path/ephemeral_component.py"
python3 -m kfp.components.executor_main --component_module_path "$program_path/ephemeral_component.py" "$@"
'
- "\nimport kfp\nfrom kfp import dsl\nfrom kfp.dsl import *\nfrom typing import\
\ *\n\ndef get_table_location(\n project: str,\n table: Optional[str],\n\
\ default_location: str = '',\n) -> str:\n \"\"\"Returns the region\
\ the given table belongs to.\n\n Args:\n project: The GCP project.\n\
\ table: The BigQuery table to get a location for.\n default_location:\
\ Location to return if no table was given.\n\n Returns:\n A GCP region\
\ or multi-region.\n \"\"\"\n # pylint: disable=g-import-not-at-top,import-outside-toplevel,redefined-outer-name,reimported\n\
\ from google.cloud import bigquery\n # pylint: enable=g-import-not-at-top,import-outside-toplevel,redefined-outer-name,reimported\n\
\n if not table:\n return default_location\n\n client = bigquery.Client(project=project)\n\
\ if table.startswith('bq://'):\n table = table[len('bq://'):]\n elif\
\ table.startswith('bigquery://'):\n table = table[len('bigquery://'):]\n\
\ return client.get_table(table).location\n\n"
image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20240808_0625
exec-load-table-from-uri:
container:
args:
- --executor_input
- '{{$}}'
- --function_to_execute
- load_table_from_uri
command:
- sh
- -c
- "\nif ! [ -x \"$(command -v pip)\" ]; then\n python3 -m ensurepip ||\
\ python3 -m ensurepip --user || apt-get install python3-pip\nfi\n\nPIP_DISABLE_PIP_VERSION_CHECK=1\
\ python3 -m pip install --quiet --no-warn-script-location 'kfp==2.0.0-rc.2'\
\ && \"$0\" \"$@\"\n"
- sh
- -ec
- 'program_path=$(mktemp -d)
printf "%s" "$0" > "$program_path/ephemeral_component.py"
python3 -m kfp.components.executor_main --component_module_path "$program_path/ephemeral_component.py" "$@"
'
- "\nimport kfp\nfrom kfp import dsl\nfrom kfp.dsl import *\nfrom typing import\
\ *\n\ndef load_table_from_uri(\n project: str,\n location: str,\n\
\ source_uris: str,\n destination: str,\n source_format: str =\
\ 'CSV',\n) -> str:\n \"\"\"Creates a table from a list of URIs.\n\n Args:\n\
\ project: The GCP project.\n location: The GCP region.\n source_uris:\
\ URIs of data files to be loaded; in format\n gs://<bucket_name>/<object_name_or_glob>.\n\
\ destination: Table into which data is to be loaded.\n source_format:\
\ The file format for the files being imported. Only CSV is\n supported.\n\
\n Returns:\n The destination table containing imported data.\n \"\"\
\"\n # pylint: disable=g-import-not-at-top,import-outside-toplevel,redefined-outer-name,reimported\n\
\ from google.cloud import bigquery\n # pylint: enable=g-import-not-at-top,import-outside-toplevel,redefined-outer-name,reimported\n\
\n if not source_uris:\n return ''\n\n csv_list = [filename.strip()\
\ for filename in source_uris.split(',')]\n client = bigquery.Client(project=project,\
\ location=location)\n job_config = bigquery.LoadJobConfig(\n autodetect=True,\
\ source_format=source_format)\n client.load_table_from_uri(\n source_uris=csv_list,\n\
\ destination=destination,\n project=project,\n location=location,\n\
\ job_config=job_config).result()\n return destination\n\n"
image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20240808_0625
exec-make-vertex-model-artifact:
container:
args:
- --executor_input
- '{{$}}'
- --function_to_execute
- make_vertex_model_artifact
command:
- sh
- -ec
- 'program_path=$(mktemp -d)
printf "%s" "$0" > "$program_path/ephemeral_component.py"
python3 -m kfp.components.executor_main --component_module_path "$program_path/ephemeral_component.py" "$@"
'
- "\nimport kfp\nfrom kfp import dsl\nfrom kfp.dsl import *\nfrom typing import\
\ *\n\ndef make_vertex_model_artifact(\n location: str,\n model_resource_name:\
\ str,\n vertex_model: dsl.Output[dsl.Artifact],\n) -> None:\n \"\"\"\
Creates a google.VertexModel artifact.\"\"\"\n vertex_model.metadata =\
\ {'resourceName': model_resource_name}\n vertex_model.uri = (f'https://{location}-aiplatform.googleapis.com'\n\
\ f'/v1/{model_resource_name}')\n\n"
image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20240808_0625
exec-maybe-replace-with-default:
container:
args:
- --executor_input
- '{{$}}'
- --function_to_execute
- maybe_replace_with_default
command:
- sh
- -ec
- 'program_path=$(mktemp -d)
printf "%s" "$0" > "$program_path/ephemeral_component.py"
python3 -m kfp.components.executor_main --component_module_path "$program_path/ephemeral_component.py" "$@"
'
- "\nimport kfp\nfrom kfp import dsl\nfrom kfp.dsl import *\nfrom typing import\
\ *\n\ndef maybe_replace_with_default(value: str, default: str = '') ->\
\ str:\n \"\"\"Replaces string with another value if it is a dash.\"\"\"\
\n return default if not value else value\n\n"
image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20240808_0625
exec-model-batch-predict:
container:
args:
- --type
- BatchPredictionJob
- --payload
- '{"Concat": ["{", "\"display_name\": \"", "{{$.inputs.parameters[''job_display_name'']}}",
"\", ", {"IfPresent": {"InputName": "model", "Then": {"Concat": ["\"model\":
\"", "{{$.inputs.artifacts[''model''].metadata[''resourceName'']}}", "\","]}}},
" \"input_config\": {", "\"instances_format\": \"", "{{$.inputs.parameters[''instances_format'']}}",
"\"", ", \"gcs_source\": {", "\"uris\":", "{{$.inputs.parameters[''gcs_source_uris'']}}",
"}", ", \"bigquery_source\": {", "\"input_uri\": \"", "{{$.inputs.parameters[''bigquery_source_input_uri'']}}",
"\"", "}", "}", ", \"instance_config\": {", "\"instance_type\": \"", "{{$.inputs.parameters[''instance_type'']}}",
"\"", ", \"key_field\": \"", "{{$.inputs.parameters[''key_field'']}}", "\"
", {"IfPresent": {"InputName": "included_fields", "Then": {"Concat": [",
\"included_fields\": ", "{{$.inputs.parameters[''included_fields'']}}"]}}},
{"IfPresent": {"InputName": "excluded_fields", "Then": {"Concat": [", \"excluded_fields\":
", "{{$.inputs.parameters[''excluded_fields'']}}"]}}}, "}", ", \"model_parameters\":
", "{{$.inputs.parameters[''model_parameters'']}}", ", \"output_config\":
{", "\"predictions_format\": \"", "{{$.inputs.parameters[''predictions_format'']}}",
"\"", ", \"gcs_destination\": {", "\"output_uri_prefix\": \"", "{{$.inputs.parameters[''gcs_destination_output_uri_prefix'']}}",
"\"", "}", ", \"bigquery_destination\": {", "\"output_uri\": \"", "{{$.inputs.parameters[''bigquery_destination_output_uri'']}}",
"\"", "}", "}", ", \"dedicated_resources\": {", "\"machine_spec\": {", "\"machine_type\":
\"", "{{$.inputs.parameters[''machine_type'']}}", "\"", ", \"accelerator_type\":
\"", "{{$.inputs.parameters[''accelerator_type'']}}", "\"", ", \"accelerator_count\":
", "{{$.inputs.parameters[''accelerator_count'']}}", "}", ", \"starting_replica_count\":
", "{{$.inputs.parameters[''starting_replica_count'']}}", ", \"max_replica_count\":
", "{{$.inputs.parameters[''max_replica_count'']}}", "}", ", \"manual_batch_tuning_parameters\":
{", "\"batch_size\": ", "{{$.inputs.parameters[''manual_batch_tuning_parameters_batch_size'']}}",
"}", ", \"generate_explanation\": ", "{{$.inputs.parameters[''generate_explanation'']}}",
", \"explanation_spec\": {", "\"parameters\": ", "{{$.inputs.parameters[''explanation_parameters'']}}",
", \"metadata\": ", "{{$.inputs.parameters[''explanation_metadata'']}}",
"}", ", \"labels\": ", "{{$.inputs.parameters[''labels'']}}", ", \"encryption_spec\":
{\"kms_key_name\":\"", "{{$.inputs.parameters[''encryption_spec_key_name'']}}",
"\"}", "}"]}'
- --project
- '{{$.inputs.parameters[''project'']}}'
- --location
- '{{$.inputs.parameters[''location'']}}'
- --gcp_resources
- '{{$.outputs.parameters[''gcp_resources''].output_file}}'
- --executor_input
- '{{$}}'
command:
- python3
- -u
- -m
- google_cloud_pipeline_components.container.v1.batch_prediction_job.launcher
image: gcr.io/ml-pipeline/google-cloud-pipeline-components:2.3.1
exec-table-to-uri:
container:
args:
- --executor_input
- '{{$}}'
- --function_to_execute
- table_to_uri
command:
- sh
- -ec
- 'program_path=$(mktemp -d)
printf "%s" "$0" > "$program_path/ephemeral_component.py"
python3 -m kfp.components.executor_main --component_module_path "$program_path/ephemeral_component.py" "$@"
'
- "\nimport kfp\nfrom kfp import dsl\nfrom kfp.dsl import *\nfrom typing import\
\ *\n\ndef table_to_uri(\n table: dsl.Input[dsl.Artifact],\n use_bq_prefix:\
\ bool = False,\n) -> NamedTuple(\n 'Outputs',\n [\n ('project_id',\
\ str),\n ('dataset_id', str),\n ('table_id', str),\n \
\ ('uri', str),\n ],\n):\n \"\"\"Converts a google.BQTable to a URI.\"\
\"\"\n # pylint: disable=g-import-not-at-top,import-outside-toplevel\n\
\ import collections\n # pylint: enable=g-import-not-at-top,import-outside-toplevel\n\
\n outputs = [\n table.metadata['projectId'],\n table.metadata['datasetId'],\n\
\ table.metadata['tableId'],\n ]\n bq_uri = '.'.join(outputs)\n \
\ if use_bq_prefix:\n bq_uri = 'bq://' + bq_uri\n outputs.append(bq_uri)\n\
\ return collections.namedtuple(\n 'Outputs',\n ['project_id',\
\ 'dataset_id', 'table_id', 'uri'],\n )(*outputs)\n\n"
image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20240808_0625
exec-table-to-uri-2:
container:
args:
- --executor_input
- '{{$}}'
- --function_to_execute
- table_to_uri
command:
- sh
- -ec
- 'program_path=$(mktemp -d)
printf "%s" "$0" > "$program_path/ephemeral_component.py"
python3 -m kfp.components.executor_main --component_module_path "$program_path/ephemeral_component.py" "$@"
'
- "\nimport kfp\nfrom kfp import dsl\nfrom kfp.dsl import *\nfrom typing import\
\ *\n\ndef table_to_uri(\n table: dsl.Input[dsl.Artifact],\n use_bq_prefix:\
\ bool = False,\n) -> NamedTuple(\n 'Outputs',\n [\n ('project_id',\
\ str),\n ('dataset_id', str),\n ('table_id', str),\n \
\ ('uri', str),\n ],\n):\n \"\"\"Converts a google.BQTable to a URI.\"\
\"\"\n # pylint: disable=g-import-not-at-top,import-outside-toplevel\n\
\ import collections\n # pylint: enable=g-import-not-at-top,import-outside-toplevel\n\
\n outputs = [\n table.metadata['projectId'],\n table.metadata['datasetId'],\n\
\ table.metadata['tableId'],\n ]\n bq_uri = '.'.join(outputs)\n \
\ if use_bq_prefix:\n bq_uri = 'bq://' + bq_uri\n outputs.append(bq_uri)\n\
\ return collections.namedtuple(\n 'Outputs',\n ['project_id',\
\ 'dataset_id', 'table_id', 'uri'],\n )(*outputs)\n\n"
image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20240808_0625
exec-validate-inputs:
container:
args:
- --executor_input
- '{{$}}'
- --function_to_execute
- validate_inputs
command:
- sh
- -ec
- 'program_path=$(mktemp -d)
printf "%s" "$0" > "$program_path/ephemeral_component.py"
python3 -m kfp.components.executor_main --component_module_path "$program_path/ephemeral_component.py" "$@"
'
- "\nimport kfp\nfrom kfp import dsl\nfrom kfp.dsl import *\nfrom typing import\
\ *\n\ndef validate_inputs(\n time_column: Optional[str] = None,\n \
\ time_series_identifier_column: Optional[str] = None,\n target_column:\
\ Optional[str] = None,\n data_source_bigquery_table_path: Optional[str]\
\ = None,\n training_fraction: Optional[float] = None,\n validation_fraction:\
\ Optional[float] = None,\n test_fraction: Optional[float] = None,\n\
\ predefined_split_key: Optional[str] = None,\n timestamp_split_key:\
\ Optional[str] = None,\n data_source_csv_filenames: Optional[str] =\
\ None,\n source_model_uri: Optional[str] = None,\n bigquery_destination_uri:\
\ Optional[str] = None,\n window_column: Optional[str] = None,\n window_stride_length:\
\ Optional[int] = None,\n window_max_count: Optional[int] = None,\n \
\ optimization_objective: Optional[str] = None,\n data_granularity_unit:\
\ Optional[str] = None,\n) -> None:\n \"\"\"Checks training pipeline input\
\ parameters are valid.\"\"\"\n # pylint: disable=g-import-not-at-top,import-outside-toplevel\n\
\ import re\n # pylint: enable=g-import-not-at-top,import-outside-toplevel\n\
\n project_pattern = r'([a-z0-9.-]+:)?[a-z][a-z0-9-_]{4,28}[a-z0-9]'\n\
\ dataset_pattern = r'[a-zA-Z0-9_]+'\n table_pattern = r'[^\\.\\:`]+'\n\
\ dataset_uri_pattern = re.compile(\n f'(bq://)?{project_pattern}[.:]{dataset_pattern}')\n\
\ table_uri_pattern = re.compile(\n f'(bq://)?{project_pattern}[.:]{dataset_pattern}[.:]{table_pattern}')\n\
\n # Validate BigQuery column and dataset names.\n bigquery_column_parameters\
\ = [\n time_column,\n time_series_identifier_column,\n target_column,\n\
\ ]\n column_pattern = re.compile(r'[a-zA-Z_][a-zA-Z0-9_]{1,300}')\n \
\ for column in bigquery_column_parameters:\n if column and not column_pattern.fullmatch(column):\n\
\ raise ValueError(f'Invalid column name: {column}.')\n if (bigquery_destination_uri\
\ and\n not dataset_uri_pattern.fullmatch(bigquery_destination_uri)):\n\
\ raise ValueError(\n f'Invalid BigQuery dataset URI: {bigquery_destination_uri}.')\n\
\ if (source_model_uri and not table_uri_pattern.fullmatch(source_model_uri)):\n\
\ raise ValueError(f'Invalid BigQuery table URI: {source_model_uri}.')\n\
\n # Validate data source.\n data_source_count = sum([bool(source) for\
\ source in [\n data_source_bigquery_table_path, data_source_csv_filenames]])\n\
\ if data_source_count > 1:\n raise ValueError(f'Expected 1 data source,\
\ found {data_source_count}.')\n if (data_source_bigquery_table_path\n\
\ and not table_uri_pattern.fullmatch(data_source_bigquery_table_path)):\n\
\ raise ValueError(\n f'Invalid BigQuery table URI: {data_source_bigquery_table_path}.')\n\
\ gcs_path_pattern = re.compile(r'gs:\\/\\/(.+)\\/([^\\/]+)')\n if data_source_csv_filenames:\n\
\ csv_list = [filename.strip()\n for filename in data_source_csv_filenames.split(',')]\n\
\ for gcs_path in csv_list:\n if not gcs_path_pattern.fullmatch(gcs_path):\n\
\ raise ValueError(f'Invalid path to CSV stored in GCS: {gcs_path}.')\n\
\n # Validate split spec.\n fraction_splits = [\n training_fraction,\n\
\ validation_fraction,\n test_fraction,\n ]\n fraction_splits\
\ = [None if fraction == -1 else fraction\n for fraction\
\ in fraction_splits]\n split_count = sum([\n bool(source)\n \
\ for source in [predefined_split_key,\n any(fraction_splits)]\n\
\ ])\n if split_count > 1:\n raise ValueError(f'Expected 1 split type,\
\ found {split_count}.')\n if (predefined_split_key and\n not column_pattern.fullmatch(predefined_split_key)):\n\
\ raise ValueError(f'Invalid column name: {predefined_split_key}.')\n\
\ if any(fraction_splits):\n if not all(fraction_splits):\n raise\
\ ValueError(\n f'All fractions must be non-zero. Got: {fraction_splits}.')\n\
\ if sum(fraction_splits) != 1:\n raise ValueError(\n f'Fraction\
\ splits must sum to 1. Got: {sum(fraction_splits)}.')\n if (timestamp_split_key\
\ and\n not column_pattern.fullmatch(timestamp_split_key)):\n raise\
\ ValueError(f'Invalid column name: {timestamp_split_key}.')\n if timestamp_split_key\
\ and not all(fraction_splits):\n raise ValueError('All fractions must\
\ be non-zero for timestamp split.')\n\n # Validate window config.\n if\
\ window_stride_length == -1:\n window_stride_length = None\n if window_max_count\
\ == -1:\n window_max_count = None\n window_configs = [window_column,\
\ window_stride_length, window_max_count]\n window_config_count = sum([bool(config)\
\ for config in window_configs])\n if window_config_count > 1:\n raise\
\ ValueError(f'Expected 1 window config, found {window_config_count}.')\n\
\ if window_column and not column_pattern.fullmatch(window_column):\n \
\ raise ValueError(f'Invalid column name: {window_column}.')\n if window_stride_length\
\ and (window_stride_length < 1 or\n window_stride_length\
\ > 1000):\n raise ValueError('Stride must be between 1 and 1000. Got:\
\ '\n f'{window_stride_length}.')\n if window_max_count\
\ and (window_max_count < 1000 or\n window_max_count\
\ > int(1e8)):\n raise ValueError('Max count must be between 1000 and\
\ 100000000. Got: '\n f'{window_max_count}.')\n\n #\
\ Validate eval metric.\n valid_optimization_objectives = ['rmse', 'mae',\
\ 'rmsle']\n if optimization_objective:\n if optimization_objective\
\ not in valid_optimization_objectives:\n raise ValueError(\n \
\ 'Optimization objective should be one of the following: '\n \
\ f'{valid_optimization_objectives}, got: {optimization_objective}.')\n\
\n # Validate data granularity unit.\n valid_data_granularity_units =\
\ [\n 'minute', 'hour', 'day', 'week', 'month', 'year']\n if data_granularity_unit:\n\
\ if data_granularity_unit not in valid_data_granularity_units:\n \
\ raise ValueError(\n 'Granularity unit should be one of the\
\ following: '\n f'{valid_data_granularity_units}, got: {data_granularity_unit}.')\n\
\n"
image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20240808_0625
pipelineInfo:
description: Creates a batch prediction using a Prophet model.
name: prophet-predict
root:
dag:
tasks:
bigquery-delete-dataset-with-prefix:
cachingOptions: {}
componentRef:
name: comp-bigquery-delete-dataset-with-prefix
dependentTasks:
- exit-handler-1
inputs:
parameters:
dataset_prefix:
runtimeValue:
constant: tmp_{{$.pipeline_job_uuid}}
delete_contents:
runtimeValue:
constant: true
project:
componentInputParameter: project
taskInfo:
name: delete-tmp-dataset
triggerPolicy:
strategy: ALL_UPSTREAM_TASKS_COMPLETED
exit-handler-1:
componentRef:
name: comp-exit-handler-1
inputs:
parameters:
pipelinechannel--bigquery_destination_uri:
componentInputParameter: bigquery_destination_uri
pipelinechannel--data_source_bigquery_table_path:
componentInputParameter: data_source_bigquery_table_path
pipelinechannel--data_source_csv_filenames:
componentInputParameter: data_source_csv_filenames
pipelinechannel--encryption_spec_key_name:
componentInputParameter: encryption_spec_key_name
pipelinechannel--location:
componentInputParameter: location
pipelinechannel--machine_type:
componentInputParameter: machine_type
pipelinechannel--max_num_workers:
componentInputParameter: max_num_workers
pipelinechannel--model_name:
componentInputParameter: model_name
pipelinechannel--project:
componentInputParameter: project
pipelinechannel--target_column:
componentInputParameter: target_column
pipelinechannel--time_column:
componentInputParameter: time_column
pipelinechannel--time_series_identifier_column:
componentInputParameter: time_series_identifier_column
taskInfo:
name: exit-handler-1
inputDefinitions:
parameters:
bigquery_destination_uri:
defaultValue: ''
description: 'URI of the desired destination dataset. If not
specified, resources will be created under a new dataset in the project.
Unlike in Vertex Forecasting, all resources will be given hardcoded names
under this dataset, and the model artifact will also be exported here.'
isOptional: true
parameterType: STRING
data_source_bigquery_table_path:
defaultValue: ''
description: 'The BigQuery table path of format
bq://bq_project.bq_dataset.bq_table'
isOptional: true
parameterType: STRING
data_source_csv_filenames:
defaultValue: ''
description: 'A string that represents a list of comma
separated CSV filenames.'
isOptional: true
parameterType: STRING
encryption_spec_key_name:
defaultValue: ''
description: The KMS key name.
isOptional: true
parameterType: STRING
location:
description: The GCP region for Vertex AI.
parameterType: STRING
machine_type:
defaultValue: n1-standard-2
description: The machine type used for batch prediction.
isOptional: true
parameterType: STRING
max_num_workers:
defaultValue: 10.0
description: The max number of workers used for batch prediction.
isOptional: true
parameterType: NUMBER_INTEGER
model_name:
description: 'The name of the Model resource, in a form of
projects/{project}/locations/{location}/models/{model}.'
parameterType: STRING
project:
description: The GCP project that runs the pipeline components.
parameterType: STRING
target_column:
description: Name of the column that the model is to predict values for.
parameterType: STRING
time_column:
description: 'Name of the column that identifies time order in the time
series.'
parameterType: STRING
time_series_identifier_column:
description: 'Name of the column that identifies the time
series.'
parameterType: STRING
schemaVersion: 2.1.0
sdkVersion: kfp-2.0.0-rc.2
| 834 |
0 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/automl | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/automl/forecasting/prophet_trainer_pipeline.yaml | # PIPELINE DEFINITION
# Name: prophet-train
# Description: Trains one Prophet model per time series.
# Inputs:
# data_granularity_unit: str
# data_source_bigquery_table_path: str [Default: '']
# data_source_csv_filenames: str [Default: '']
# dataflow_service_account: str [Default: '']
# dataflow_subnetwork: str [Default: '']
# dataflow_use_public_ips: bool [Default: True]
# encryption_spec_key_name: str [Default: '']
# evaluation_dataflow_disk_size_gb: int [Default: 40.0]
# evaluation_dataflow_machine_type: str [Default: 'n1-standard-1']
# evaluation_dataflow_max_num_workers: int [Default: 10.0]
# forecast_horizon: int
# location: str
# max_num_trials: int [Default: 6.0]
# optimization_objective: str
# predefined_split_key: str [Default: '']
# project: str
# root_dir: str
# run_evaluation: bool [Default: True]
# target_column: str
# test_fraction: float [Default: -1.0]
# time_column: str
# time_series_identifier_column: str
# timestamp_split_key: str [Default: '']
# trainer_dataflow_disk_size_gb: int [Default: 40.0]
# trainer_dataflow_machine_type: str [Default: 'n1-standard-1']
# trainer_dataflow_max_num_workers: int [Default: 10.0]
# training_fraction: float [Default: -1.0]
# validation_fraction: float [Default: -1.0]
# window_column: str [Default: '']
# window_max_count: int [Default: -1.0]
# window_stride_length: int [Default: -1.0]
components:
comp-bigquery-create-dataset:
executorLabel: exec-bigquery-create-dataset
inputDefinitions:
parameters:
dataset:
parameterType: STRING
exists_ok:
defaultValue: false
isOptional: true
parameterType: BOOLEAN
location:
parameterType: STRING
project:
parameterType: STRING
outputDefinitions:
parameters:
dataset_id:
parameterType: STRING
project_id:
parameterType: STRING
comp-bigquery-delete-dataset-with-prefix:
executorLabel: exec-bigquery-delete-dataset-with-prefix
inputDefinitions:
parameters:
dataset_prefix:
parameterType: STRING
delete_contents:
defaultValue: false
isOptional: true
parameterType: BOOLEAN
project:
parameterType: STRING
comp-bigquery-query-job:
executorLabel: exec-bigquery-query-job
inputDefinitions:
parameters:
encryption_spec_key_name:
defaultValue: ''
description: 'Describes the Cloud
KMS encryption key that will be used to protect destination
BigQuery table. The BigQuery Service Account associated with your
project requires access to this encryption key. If
encryption_spec_key_name are both specified in here and in
job_configuration_query, the value in here will override the other
one.'
isOptional: true
parameterType: STRING
job_configuration_query:
defaultValue: {}
description: 'A json formatted string
describing the rest of the job configuration. For more details, see
https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationQuery'
isOptional: true
parameterType: STRUCT
labels:
defaultValue: {}
description: 'The labels associated with this job. You can
use these to organize and group your jobs. Label keys and values can
be no longer than 63 characters, can only containlowercase letters,
numeric characters, underscores and dashes. International characters
are allowed. Label values are optional. Label keys must start with a
letter and each label in the list must have a different key.
Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.'
isOptional: true
parameterType: STRUCT
location:
defaultValue: us-central1
description: 'Location for creating the BigQuery job. If not
set, default to `US` multi-region. For more details, see
https://cloud.google.com/bigquery/docs/locations#specifying_your_location'
isOptional: true
parameterType: STRING
project:
defaultValue: '{{$.pipeline_google_cloud_project_id}}'
description: Project to run the BigQuery query job. Defaults to the project
in which the PipelineJob is run.
isOptional: true
parameterType: STRING
query:
defaultValue: ''
description: 'SQL query text to execute. Only standard SQL is
supported. If query are both specified in here and in
job_configuration_query, the value in here will override the other
one.'
isOptional: true
parameterType: STRING
query_parameters:
defaultValue: []
description: 'jobs.query parameters for
standard SQL queries. If query_parameters are both specified in here
and in job_configuration_query, the value in here will override the
other one.'
isOptional: true
parameterType: LIST
outputDefinitions:
artifacts:
destination_table:
artifactType:
schemaTitle: google.BQTable
schemaVersion: 0.0.1
description: 'Describes the table where the query results should be stored.
This property must be set for large results that exceed the maximum
response size.
For queries that produce anonymous (cached) results, this field will
be populated by BigQuery.'
parameters:
gcp_resources:
description: 'Serialized gcp_resources proto tracking the BigQuery job.
For more details, see
https://github.com/kubeflow/pipelines/blob/master/components/google-cloud/google_cloud_pipeline_components/proto/README.md.'
parameterType: STRING
comp-build-job-configuration-query:
executorLabel: exec-build-job-configuration-query
inputDefinitions:
parameters:
dataset_id:
defaultValue: ''
isOptional: true
parameterType: STRING
priority:
defaultValue: INTERACTIVE
isOptional: true
parameterType: STRING
project_id:
defaultValue: ''
isOptional: true
parameterType: STRING
table_id:
defaultValue: ''
isOptional: true
parameterType: STRING
write_disposition:
defaultValue: ''
isOptional: true
parameterType: STRING
outputDefinitions:
parameters:
Output:
parameterType: STRUCT
comp-condition-2:
dag:
tasks:
model-evaluation-regression:
cachingOptions:
enableCache: true
componentRef:
name: comp-model-evaluation-regression
inputs:
artifacts:
predictions_gcs_source:
componentInputArtifact: pipelinechannel--prophet-trainer-evaluated_examples_directory
parameters:
dataflow_disk_size_gb:
componentInputParameter: pipelinechannel--evaluation_dataflow_disk_size_gb
dataflow_machine_type:
componentInputParameter: pipelinechannel--evaluation_dataflow_machine_type
dataflow_max_workers_num:
componentInputParameter: pipelinechannel--evaluation_dataflow_max_num_workers
dataflow_service_account:
componentInputParameter: pipelinechannel--dataflow_service_account
dataflow_subnetwork:
componentInputParameter: pipelinechannel--dataflow_subnetwork
dataflow_use_public_ips:
componentInputParameter: pipelinechannel--dataflow_use_public_ips
encryption_spec_key_name:
componentInputParameter: pipelinechannel--encryption_spec_key_name
ground_truth_gcs_source:
runtimeValue:
constant: []
location:
componentInputParameter: pipelinechannel--location
pipelinechannel--target_column:
componentInputParameter: pipelinechannel--target_column
prediction_score_column:
runtimeValue:
constant: prediction.predicted_{{$.inputs.parameters['pipelinechannel--target_column']}}
predictions_format:
runtimeValue:
constant: jsonl
project:
componentInputParameter: pipelinechannel--project
target_field_name:
componentInputParameter: pipelinechannel--target_column
taskInfo:
name: model-evaluation-regression
inputDefinitions:
artifacts:
pipelinechannel--prophet-trainer-evaluated_examples_directory:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
parameters:
pipelinechannel--dataflow_service_account:
parameterType: STRING
pipelinechannel--dataflow_subnetwork:
parameterType: STRING
pipelinechannel--dataflow_use_public_ips:
parameterType: BOOLEAN
pipelinechannel--encryption_spec_key_name:
parameterType: STRING
pipelinechannel--evaluation_dataflow_disk_size_gb:
parameterType: NUMBER_INTEGER
pipelinechannel--evaluation_dataflow_machine_type:
parameterType: STRING
pipelinechannel--evaluation_dataflow_max_num_workers:
parameterType: NUMBER_INTEGER
pipelinechannel--location:
parameterType: STRING
pipelinechannel--project:
parameterType: STRING
pipelinechannel--run_evaluation:
parameterType: BOOLEAN
pipelinechannel--target_column:
parameterType: STRING
comp-exit-handler-1:
dag:
tasks:
bigquery-create-dataset:
cachingOptions: {}
componentRef:
name: comp-bigquery-create-dataset
dependentTasks:
- get-table-location
- validate-inputs
inputs:
parameters:
dataset:
runtimeValue:
constant: tmp_{{$.pipeline_job_uuid}}
location:
taskOutputParameter:
outputParameterKey: Output
producerTask: get-table-location
project:
componentInputParameter: pipelinechannel--project
taskInfo:
name: create-tmp-dataset
bigquery-query-job:
cachingOptions:
enableCache: true
componentRef:
name: comp-bigquery-query-job
dependentTasks:
- bigquery-create-dataset
- build-job-configuration-query
- get-fte-suffix
- get-table-location
inputs:
parameters:
encryption_spec_key_name:
componentInputParameter: pipelinechannel--encryption_spec_key_name
job_configuration_query:
taskOutputParameter:
outputParameterKey: Output
producerTask: build-job-configuration-query
location:
taskOutputParameter:
outputParameterKey: Output
producerTask: get-table-location
pipelinechannel--bigquery-create-dataset-dataset_id:
taskOutputParameter:
outputParameterKey: dataset_id
producerTask: bigquery-create-dataset
pipelinechannel--bigquery-create-dataset-project_id:
taskOutputParameter:
outputParameterKey: project_id
producerTask: bigquery-create-dataset
pipelinechannel--get-fte-suffix-Output:
taskOutputParameter:
outputParameterKey: Output
producerTask: get-fte-suffix
pipelinechannel--target_column:
componentInputParameter: pipelinechannel--target_column
pipelinechannel--time_column:
componentInputParameter: pipelinechannel--time_column
pipelinechannel--time_series_identifier_column:
componentInputParameter: pipelinechannel--time_series_identifier_column
project:
componentInputParameter: pipelinechannel--project
query:
runtimeValue:
constant: "\n WITH\n base_data AS (\n SELECT\
\ * FROM `{{$.inputs.parameters['pipelinechannel--bigquery-create-dataset-project_id']}}.{{$.inputs.parameters['pipelinechannel--bigquery-create-dataset-dataset_id']}}.fte_time_series_output_{{$.inputs.parameters['pipelinechannel--get-fte-suffix-Output']}}`\n\
\ )\n SELECT\n CAST({{$.inputs.parameters['pipelinechannel--time_series_identifier_column']}}\
\ AS STRING) AS {{$.inputs.parameters['pipelinechannel--time_series_identifier_column']}},\n\
\ ARRAY_AGG(TIMESTAMP({{$.inputs.parameters['pipelinechannel--time_column']}})\
\ ORDER BY {{$.inputs.parameters['pipelinechannel--time_column']}})\
\ AS {{$.inputs.parameters['pipelinechannel--time_column']}},\n\
\ ARRAY_AGG({{$.inputs.parameters['pipelinechannel--target_column']}}\
\ ORDER BY {{$.inputs.parameters['pipelinechannel--time_column']}})\
\ AS {{$.inputs.parameters['pipelinechannel--target_column']}},\n\
\ ARRAY_AGG(split__{{$.inputs.parameters['pipelinechannel--get-fte-suffix-Output']}}\
\ ORDER BY {{$.inputs.parameters['pipelinechannel--time_column']}})\
\ AS split__{{$.inputs.parameters['pipelinechannel--get-fte-suffix-Output']}},\n\
\ ARRAY_AGG(window__{{$.inputs.parameters['pipelinechannel--get-fte-suffix-Output']}}\
\ ORDER BY {{$.inputs.parameters['pipelinechannel--time_column']}})\
\ AS window__{{$.inputs.parameters['pipelinechannel--get-fte-suffix-Output']}},\n\
\ FROM base_data\n GROUP BY {{$.inputs.parameters['pipelinechannel--time_series_identifier_column']}}\n\
\ "
taskInfo:
name: aggregate-by-time-series-id
build-job-configuration-query:
cachingOptions:
enableCache: true
componentRef:
name: comp-build-job-configuration-query
dependentTasks:
- bigquery-create-dataset
inputs:
parameters:
dataset_id:
runtimeValue:
constant: '{{$.inputs.parameters[''pipelinechannel--bigquery-create-dataset-dataset_id'']}}'
pipelinechannel--bigquery-create-dataset-dataset_id:
taskOutputParameter:
outputParameterKey: dataset_id
producerTask: bigquery-create-dataset
pipelinechannel--bigquery-create-dataset-project_id:
taskOutputParameter:
outputParameterKey: project_id
producerTask: bigquery-create-dataset
project_id:
runtimeValue:
constant: '{{$.inputs.parameters[''pipelinechannel--bigquery-create-dataset-project_id'']}}'
table_id:
runtimeValue:
constant: data
write_disposition:
runtimeValue:
constant: WRITE_EMPTY
taskInfo:
name: build-job-configuration-query
condition-2:
componentRef:
name: comp-condition-2
dependentTasks:
- prophet-trainer
inputs:
artifacts:
pipelinechannel--prophet-trainer-evaluated_examples_directory:
taskOutputArtifact:
outputArtifactKey: evaluated_examples_directory
producerTask: prophet-trainer
parameters:
pipelinechannel--dataflow_service_account:
componentInputParameter: pipelinechannel--dataflow_service_account
pipelinechannel--dataflow_subnetwork:
componentInputParameter: pipelinechannel--dataflow_subnetwork
pipelinechannel--dataflow_use_public_ips:
componentInputParameter: pipelinechannel--dataflow_use_public_ips
pipelinechannel--encryption_spec_key_name:
componentInputParameter: pipelinechannel--encryption_spec_key_name
pipelinechannel--evaluation_dataflow_disk_size_gb:
componentInputParameter: pipelinechannel--evaluation_dataflow_disk_size_gb
pipelinechannel--evaluation_dataflow_machine_type:
componentInputParameter: pipelinechannel--evaluation_dataflow_machine_type
pipelinechannel--evaluation_dataflow_max_num_workers:
componentInputParameter: pipelinechannel--evaluation_dataflow_max_num_workers
pipelinechannel--location:
componentInputParameter: pipelinechannel--location
pipelinechannel--project:
componentInputParameter: pipelinechannel--project
pipelinechannel--run_evaluation:
componentInputParameter: pipelinechannel--run_evaluation
pipelinechannel--target_column:
componentInputParameter: pipelinechannel--target_column
taskInfo:
name: run-evaluation
triggerPolicy:
condition: inputs.parameter_values['pipelinechannel--run_evaluation']
== true
feature-transform-engine:
cachingOptions:
enableCache: true
componentRef:
name: comp-feature-transform-engine
dependentTasks:
- bigquery-create-dataset
- wrapped-in-list
inputs:
parameters:
autodetect_csv_schema:
runtimeValue:
constant: true
bigquery_staging_full_dataset_id:
runtimeValue:
constant: '{{$.inputs.parameters[''pipelinechannel--bigquery-create-dataset-project_id'']}}.{{$.inputs.parameters[''pipelinechannel--bigquery-create-dataset-dataset_id'']}}'
data_source_bigquery_table_path:
componentInputParameter: pipelinechannel--data_source_bigquery_table_path
data_source_csv_filenames:
componentInputParameter: pipelinechannel--data_source_csv_filenames
forecasting_apply_windowing:
runtimeValue:
constant: false
forecasting_context_window:
runtimeValue:
constant: 0.0
forecasting_forecast_horizon:
componentInputParameter: pipelinechannel--forecast_horizon
forecasting_predefined_window_column:
componentInputParameter: pipelinechannel--window_column
forecasting_time_column:
componentInputParameter: pipelinechannel--time_column
forecasting_time_series_identifier_columns:
taskOutputParameter:
outputParameterKey: Output
producerTask: wrapped-in-list
forecasting_window_max_count:
componentInputParameter: pipelinechannel--window_max_count
forecasting_window_stride_length:
componentInputParameter: pipelinechannel--window_stride_length
location:
componentInputParameter: pipelinechannel--location
pipelinechannel--bigquery-create-dataset-dataset_id:
taskOutputParameter:
outputParameterKey: dataset_id
producerTask: bigquery-create-dataset
pipelinechannel--bigquery-create-dataset-project_id:
taskOutputParameter:
outputParameterKey: project_id
producerTask: bigquery-create-dataset
predefined_split_key:
componentInputParameter: pipelinechannel--predefined_split_key
prediction_type:
runtimeValue:
constant: time_series
project:
componentInputParameter: pipelinechannel--project
root_dir:
componentInputParameter: pipelinechannel--root_dir
target_column:
componentInputParameter: pipelinechannel--target_column
test_fraction:
componentInputParameter: pipelinechannel--test_fraction
tf_auto_transform_features:
runtimeValue:
constant: {}
timestamp_split_key:
componentInputParameter: pipelinechannel--timestamp_split_key
training_fraction:
componentInputParameter: pipelinechannel--training_fraction
validation_fraction:
componentInputParameter: pipelinechannel--validation_fraction
taskInfo:
name: feature-transform-engine
get-fte-suffix:
cachingOptions:
enableCache: true
componentRef:
name: comp-get-fte-suffix
dependentTasks:
- bigquery-create-dataset
- feature-transform-engine
inputs:
parameters:
bigquery_staging_full_dataset_id:
runtimeValue:
constant: '{{$.inputs.parameters[''pipelinechannel--bigquery-create-dataset-project_id'']}}.{{$.inputs.parameters[''pipelinechannel--bigquery-create-dataset-dataset_id'']}}'
fte_table:
runtimeValue:
constant: fte_time_series_output
location:
componentInputParameter: pipelinechannel--location
pipelinechannel--bigquery-create-dataset-dataset_id:
taskOutputParameter:
outputParameterKey: dataset_id
producerTask: bigquery-create-dataset
pipelinechannel--bigquery-create-dataset-project_id:
taskOutputParameter:
outputParameterKey: project_id
producerTask: bigquery-create-dataset
project:
componentInputParameter: pipelinechannel--project
taskInfo:
name: get-fte-suffix
get-table-location:
cachingOptions:
enableCache: true
componentRef:
name: comp-get-table-location
inputs:
parameters:
default_location:
componentInputParameter: pipelinechannel--location
project:
componentInputParameter: pipelinechannel--project
table:
componentInputParameter: pipelinechannel--data_source_bigquery_table_path
taskInfo:
name: get-table-location
model-upload:
cachingOptions:
enableCache: true
componentRef:
name: comp-model-upload
dependentTasks:
- prophet-trainer
inputs:
artifacts:
unmanaged_container_model:
taskOutputArtifact:
outputArtifactKey: unmanaged_container_model
producerTask: prophet-trainer
parameters:
description:
runtimeValue:
constant: Prophet model.
display_name:
runtimeValue:
constant: prophet_{{$.pipeline_job_uuid}}
location:
componentInputParameter: pipelinechannel--location
project:
componentInputParameter: pipelinechannel--project
taskInfo:
name: model-upload
prophet-trainer:
cachingOptions:
enableCache: true
componentRef:
name: comp-prophet-trainer
dependentTasks:
- get-fte-suffix
- table-to-uri
inputs:
parameters:
data_granularity_unit:
componentInputParameter: pipelinechannel--data_granularity_unit
dataflow_disk_size_gb:
componentInputParameter: pipelinechannel--trainer_dataflow_disk_size_gb
dataflow_machine_type:
componentInputParameter: pipelinechannel--trainer_dataflow_machine_type
dataflow_max_num_workers:
componentInputParameter: pipelinechannel--trainer_dataflow_max_num_workers
dataflow_service_account:
componentInputParameter: pipelinechannel--dataflow_service_account
dataflow_subnetwork:
componentInputParameter: pipelinechannel--dataflow_subnetwork
dataflow_use_public_ips:
componentInputParameter: pipelinechannel--dataflow_use_public_ips
encryption_spec_key_name:
componentInputParameter: pipelinechannel--encryption_spec_key_name
forecast_horizon:
componentInputParameter: pipelinechannel--forecast_horizon
location:
componentInputParameter: pipelinechannel--location
max_num_trials:
componentInputParameter: pipelinechannel--max_num_trials
optimization_objective:
componentInputParameter: pipelinechannel--optimization_objective
pipelinechannel--get-fte-suffix-Output:
taskOutputParameter:
outputParameterKey: Output
producerTask: get-fte-suffix
pipelinechannel--table-to-uri-uri:
taskOutputParameter:
outputParameterKey: uri
producerTask: table-to-uri
predefined_split_column:
runtimeValue:
constant: split__{{$.inputs.parameters['pipelinechannel--get-fte-suffix-Output']}}
project:
componentInputParameter: pipelinechannel--project
root_dir:
componentInputParameter: pipelinechannel--root_dir
source_bigquery_uri:
runtimeValue:
constant: bq://{{$.inputs.parameters['pipelinechannel--table-to-uri-uri']}}
target_column:
componentInputParameter: pipelinechannel--target_column
time_column:
componentInputParameter: pipelinechannel--time_column
time_series_identifier_column:
componentInputParameter: pipelinechannel--time_series_identifier_column
window_column:
runtimeValue:
constant: window__{{$.inputs.parameters['pipelinechannel--get-fte-suffix-Output']}}
taskInfo:
name: prophet-trainer
table-to-uri:
cachingOptions:
enableCache: true
componentRef:
name: comp-table-to-uri
dependentTasks:
- bigquery-query-job
inputs:
artifacts:
table:
taskOutputArtifact:
outputArtifactKey: destination_table
producerTask: bigquery-query-job
taskInfo:
name: table-to-uri
validate-inputs:
cachingOptions:
enableCache: true
componentRef:
name: comp-validate-inputs
inputs:
parameters:
data_granularity_unit:
componentInputParameter: pipelinechannel--data_granularity_unit
data_source_bigquery_table_path:
componentInputParameter: pipelinechannel--data_source_bigquery_table_path
data_source_csv_filenames:
componentInputParameter: pipelinechannel--data_source_csv_filenames
optimization_objective:
componentInputParameter: pipelinechannel--optimization_objective
predefined_split_key:
componentInputParameter: pipelinechannel--predefined_split_key
target_column:
componentInputParameter: pipelinechannel--target_column
test_fraction:
componentInputParameter: pipelinechannel--test_fraction
time_column:
componentInputParameter: pipelinechannel--time_column
time_series_identifier_column:
componentInputParameter: pipelinechannel--time_series_identifier_column
timestamp_split_key:
componentInputParameter: pipelinechannel--timestamp_split_key
training_fraction:
componentInputParameter: pipelinechannel--training_fraction
validation_fraction:
componentInputParameter: pipelinechannel--validation_fraction
window_column:
componentInputParameter: pipelinechannel--window_column
window_max_count:
componentInputParameter: pipelinechannel--window_max_count
window_stride_length:
componentInputParameter: pipelinechannel--window_stride_length
taskInfo:
name: validate-inputs
wrapped-in-list:
cachingOptions:
enableCache: true
componentRef:
name: comp-wrapped-in-list
inputs:
parameters:
value:
componentInputParameter: pipelinechannel--time_series_identifier_column
taskInfo:
name: wrapped-in-list
inputDefinitions:
parameters:
pipelinechannel--data_granularity_unit:
parameterType: STRING
pipelinechannel--data_source_bigquery_table_path:
parameterType: STRING
pipelinechannel--data_source_csv_filenames:
parameterType: STRING
pipelinechannel--dataflow_service_account:
parameterType: STRING
pipelinechannel--dataflow_subnetwork:
parameterType: STRING
pipelinechannel--dataflow_use_public_ips:
parameterType: BOOLEAN
pipelinechannel--encryption_spec_key_name:
parameterType: STRING
pipelinechannel--evaluation_dataflow_disk_size_gb:
parameterType: NUMBER_INTEGER
pipelinechannel--evaluation_dataflow_machine_type:
parameterType: STRING
pipelinechannel--evaluation_dataflow_max_num_workers:
parameterType: NUMBER_INTEGER
pipelinechannel--forecast_horizon:
parameterType: NUMBER_INTEGER
pipelinechannel--location:
parameterType: STRING
pipelinechannel--max_num_trials:
parameterType: NUMBER_INTEGER
pipelinechannel--optimization_objective:
parameterType: STRING
pipelinechannel--predefined_split_key:
parameterType: STRING
pipelinechannel--project:
parameterType: STRING
pipelinechannel--root_dir:
parameterType: STRING
pipelinechannel--run_evaluation:
parameterType: BOOLEAN
pipelinechannel--target_column:
parameterType: STRING
pipelinechannel--test_fraction:
parameterType: NUMBER_DOUBLE
pipelinechannel--time_column:
parameterType: STRING
pipelinechannel--time_series_identifier_column:
parameterType: STRING
pipelinechannel--timestamp_split_key:
parameterType: STRING
pipelinechannel--trainer_dataflow_disk_size_gb:
parameterType: NUMBER_INTEGER
pipelinechannel--trainer_dataflow_machine_type:
parameterType: STRING
pipelinechannel--trainer_dataflow_max_num_workers:
parameterType: NUMBER_INTEGER
pipelinechannel--training_fraction:
parameterType: NUMBER_DOUBLE
pipelinechannel--validation_fraction:
parameterType: NUMBER_DOUBLE
pipelinechannel--window_column:
parameterType: STRING
pipelinechannel--window_max_count:
parameterType: NUMBER_INTEGER
pipelinechannel--window_stride_length:
parameterType: NUMBER_INTEGER
comp-feature-transform-engine:
executorLabel: exec-feature-transform-engine
inputDefinitions:
parameters:
autodetect_csv_schema:
defaultValue: false
description: 'If True, infers the column types
when importing CSVs into BigQuery.'
isOptional: true
parameterType: BOOLEAN
bigquery_staging_full_dataset_id:
defaultValue: ''
description: Dataset in "projectId.datasetId" format for storing intermediate-FTE
BigQuery tables. If the specified dataset does not exist in BigQuery,
FTE will create the dataset. If no bigquery_staging_full_dataset_id is
specified, all intermediate tables will be stored in a dataset created
under the provided project in the input data source's location during
FTE execution called "vertex_feature_transform_engine_staging_{location.replace('-',
'_')}". All tables generated by FTE will have a 30 day TTL.
isOptional: true
parameterType: STRING
data_source_bigquery_table_path:
defaultValue: ''
description: BigQuery input data source to run feature transform on.
isOptional: true
parameterType: STRING
data_source_csv_filenames:
defaultValue: ''
description: CSV input data source to run feature transform on.
isOptional: true
parameterType: STRING
dataflow_disk_size_gb:
defaultValue: 40.0
description: The disk size, in gigabytes, to use on each Dataflow worker
instance. If not set, default to 40.
isOptional: true
parameterType: NUMBER_INTEGER
dataflow_machine_type:
defaultValue: n1-standard-16
description: The machine type used for dataflow jobs. If not set, default
to n1-standard-16.
isOptional: true
parameterType: STRING
dataflow_max_num_workers:
defaultValue: 25.0
description: The number of workers to run the dataflow job. If not set,
default to 25.
isOptional: true
parameterType: NUMBER_INTEGER
dataflow_service_account:
defaultValue: ''
description: Custom service account to run Dataflow jobs.
isOptional: true
parameterType: STRING
dataflow_subnetwork:
defaultValue: ''
description: 'Dataflow''s fully qualified subnetwork name, when empty the
default subnetwork will be used. More details: https://cloud.google.com/dataflow/docs/guides/specifying-networks#example_network_and_subnetwork_specifications'
isOptional: true
parameterType: STRING
dataflow_use_public_ips:
defaultValue: true
description: Specifies whether Dataflow workers use public IP addresses.
isOptional: true
parameterType: BOOLEAN
dataset_level_custom_transformation_definitions:
defaultValue: []
description: 'List of dataset-level custom transformation definitions. Custom,
bring-your-own dataset-level transform functions, where users can define
and import their own transform function and use it with FTE''s built-in
transformations. Using custom transformations is an experimental feature
and it is currently not supported during batch prediction.
[ { "transformation": "ConcatCols", "module_path": "/path/to/custom_transform_fn_dlt.py",
"function_name": "concat_cols" } ] Using custom transform function together
with FTE''s built-in transformations: .. code-block:: python [ { "transformation":
"Join", "right_table_uri": "bq://test-project.dataset_test.table", "join_keys":
[["join_key_col", "join_key_col"]] },{ "transformation": "ConcatCols",
"cols": ["feature_1", "feature_2"], "output_col": "feature_1_2" } ]'
isOptional: true
parameterType: LIST
dataset_level_transformations:
defaultValue: []
description: "List of dataset-level transformations.\n[ { \"transformation\"\
: \"Join\", \"right_table_uri\": \"bq://test-project.dataset_test.table\"\
, \"join_keys\": [[\"join_key_col\", \"join_key_col\"]] }, ... ] Additional\
\ information about FTE's currently supported built-in\n transformations:\n\
\ Join: Joins features from right_table_uri. For each join key, the\
\ left table keys will be included and the right table keys will be dropped.\n\
\ Example: .. code-block:: python { \"transformation\": \"Join\"\
, \"right_table_uri\": \"bq://test-project.dataset_test.table\", \"join_keys\"\
: [[\"join_key_col\", \"join_key_col\"]] }\n Arguments:\n \
\ right_table_uri: Right table BigQuery uri to join with input_full_table_id.\n\
\ join_keys: Features to join on. For each nested list, the\
\ first element is a left table column and the second is its corresponding\
\ right table column.\n TimeAggregate: Creates a new feature composed\
\ of values of an existing feature from a fixed time period ago or in\
\ the future.\n Ex: A feature for sales by store 1 year ago.\n \
\ Example: .. code-block:: python { \"transformation\": \"TimeAggregate\"\
, \"time_difference\": 40, \"time_difference_units\": \"DAY\", \"time_series_identifier_columns\"\
: [\"store_id\"], \"time_column\": \"time_col\", \"time_difference_target_column\"\
: \"target_col\", \"output_column\": \"output_col\" }\n Arguments:\n\
\ time_difference: Number of time_difference_units to look\
\ back or into the future on our time_difference_target_column.\n \
\ time_difference_units: Units of time_difference to look back\
\ or into the future on our time_difference_target_column. Must be one\
\ of * 'DAY' * 'WEEK' (Equivalent to 7 DAYs) * 'MONTH' * 'QUARTER' * 'YEAR'\n\
\ time_series_identifier_columns: Names of the time series\
\ identifier columns.\n time_column: Name of the time column.\n\
\ time_difference_target_column: Column we wish to get the\
\ value of time_difference time_difference_units in the past or future.\n\
\ output_column: Name of our new time aggregate feature.\n\
\ is_future: Whether we wish to look forward in time. Defaults\
\ to False. PartitionByMax/PartitionByMin/PartitionByAvg/PartitionBySum:\
\ Performs a partition by reduce operation (one of max, min, avg, or sum)\
\ with a fixed historic time period. Ex: Getting avg sales (the reduce\
\ column) for each store (partition_by_column) over the previous 5 days\
\ (time_column, time_ago_units, and time_ago).\n Example: .. code-block::\
\ python { \"transformation\": \"PartitionByMax\", \"reduce_column\"\
: \"sell_price\", \"partition_by_columns\": [\"store_id\", \"state_id\"\
], \"time_column\": \"date\", \"time_ago\": 1, \"time_ago_units\": \"\
WEEK\", \"output_column\": \"partition_by_reduce_max_output\" }\n \
\ Arguments:\n reduce_column: Column to apply the reduce\
\ operation on. Reduce operations include the\n following:\
\ Max, Min, Avg, Sum.\n partition_by_columns: List of columns\
\ to partition by.\n time_column: Time column for the partition\
\ by operation's window function.\n time_ago: Number of time_ago_units\
\ to look back on our target_column, starting from time_column (inclusive).\n\
\ time_ago_units: Units of time_ago to look back on our target_column.\
\ Must be one of * 'DAY' * 'WEEK'\n output_column: Name of\
\ our output feature."
isOptional: true
parameterType: LIST
encryption_spec_key_name:
defaultValue: ''
description: Customer-managed encryption key.
isOptional: true
parameterType: STRING
feature_selection_algorithm:
defaultValue: AMI
description: "The algorithm of feature selection. One of \"AMI\", \"CMIM\"\
, \"JMIM\", \"MRMR\", default to be \"AMI\". The algorithms available\
\ are: AMI(Adjusted Mutual Information):\nReference: https://scikit-learn.org/stable/modules/generated/sklearn.metrics.adjusted_mutual_info_score.html\
\ Arrays are not yet supported in this algorithm. CMIM(Conditional Mutual\
\ Information Maximization): Reference paper: Mohamed Bennasar, Yulia\
\ Hicks, Rossitza Setchi, \u201CFeature selection using Joint Mutual Information\
\ Maximisation,\u201D Expert Systems with Applications, vol. 42, issue\
\ 22, 1 December 2015, Pages 8520-8532. JMIM(Joint Mutual Information\
\ Maximization\nReference:\n paper: Mohamed Bennasar, Yulia Hicks, Rossitza\
\ Setchi, \u201CFeature selection using Joint Mutual Information Maximisation,\u201D\
\ Expert Systems with Applications, vol. 42, issue 22, 1 December 2015,\
\ Pages 8520-8532. MRMR(MIQ Minimum-redundancy Maximum-relevance): Reference\
\ paper: Hanchuan Peng, Fuhui Long, and Chris Ding. \"Feature selection\
\ based on mutual information criteria of max-dependency, max-relevance,\
\ and min-redundancy.\" IEEE Transactions on pattern analysis and machine\
\ intelligence 27, no.\n 8: 1226-1238."
isOptional: true
parameterType: STRING
feature_selection_execution_engine:
defaultValue: dataflow
description: Execution engine to run feature selection, value can be dataflow,
bigquery.
isOptional: true
parameterType: STRING
forecasting_apply_windowing:
defaultValue: true
description: Whether to apply window strategy.
isOptional: true
parameterType: BOOLEAN
forecasting_available_at_forecast_columns:
defaultValue: []
description: Forecasting available at forecast columns.
isOptional: true
parameterType: LIST
forecasting_context_window:
defaultValue: -1.0
description: Forecasting context window.
isOptional: true
parameterType: NUMBER_INTEGER
forecasting_forecast_horizon:
defaultValue: -1.0
description: Forecasting horizon.
isOptional: true
parameterType: NUMBER_INTEGER
forecasting_holiday_regions:
defaultValue: []
description: 'The geographical region based on which the holiday effect
is applied in modeling by adding holiday categorical array feature that
include all holidays matching the date. This option only allowed when
data granularity is day. By default, holiday effect modeling is disabled.
To turn it on, specify the holiday region using this option.
Top level: * ''GLOBAL''
Second level: continental regions: * ''NA'': North America
* ''JAPAC'': Japan and Asia Pacific
* ''EMEA'': Europe, the Middle East and Africa
* ''LAC'': Latin America and the Caribbean
Third level: countries from ISO 3166-1 Country codes.
Valid regions: * ''GLOBAL'' * ''NA'' * ''JAPAC'' * ''EMEA'' * ''LAC''
* ''AE''
* ''AR'' * ''AT'' * ''AU'' * ''BE'' * ''BR'' * ''CA'' * ''CH'' * ''CL''
* ''CN'' * ''CO''
* ''CZ'' * ''DE'' * ''DK'' * ''DZ'' * ''EC'' * ''EE'' * ''EG'' * ''ES''
* ''FI'' * ''FR''
* ''GB'' * ''GR'' * ''HK'' * ''HU'' * ''ID'' * ''IE'' * ''IL'' * ''IN''
* ''IR'' * ''IT''
* ''JP'' * ''KR'' * ''LV'' * ''MA'' * ''MX'' * ''MY'' * ''NG'' * ''NL''
* ''NO'' * ''NZ''
* ''PE'' * ''PH'' * ''PK'' * ''PL'' * ''PT'' * ''RO'' * ''RS'' * ''RU''
* ''SA'' * ''SE''
* ''SG'' * ''SI'' * ''SK'' * ''TH'' * ''TR'' * ''TW'' * ''UA'' * ''US''
* ''VE'' * ''VN''
* ''ZA'''
isOptional: true
parameterType: LIST
forecasting_predefined_window_column:
defaultValue: ''
description: Forecasting predefined window column.
isOptional: true
parameterType: STRING
forecasting_time_column:
defaultValue: ''
description: Forecasting time column.
isOptional: true
parameterType: STRING
forecasting_time_series_attribute_columns:
defaultValue: []
description: Forecasting time series attribute columns.
isOptional: true
parameterType: LIST
forecasting_time_series_identifier_column:
description: '[Deprecated] A forecasting time series identifier column.
Raises an exception if used - use the "time_series_identifier_column"
field instead.'
isOptional: true
parameterType: STRING
forecasting_time_series_identifier_columns:
defaultValue: []
description: The list of forecasting time series identifier columns.
isOptional: true
parameterType: LIST
forecasting_unavailable_at_forecast_columns:
defaultValue: []
description: Forecasting unavailable at forecast columns.
isOptional: true
parameterType: LIST
forecasting_window_max_count:
defaultValue: -1.0
description: Forecasting window max count.
isOptional: true
parameterType: NUMBER_INTEGER
forecasting_window_stride_length:
defaultValue: -1.0
description: Forecasting window stride length.
isOptional: true
parameterType: NUMBER_INTEGER
group_columns:
isOptional: true
parameterType: LIST
group_temporal_total_weight:
defaultValue: 0.0
isOptional: true
parameterType: NUMBER_DOUBLE
group_total_weight:
defaultValue: 0.0
isOptional: true
parameterType: NUMBER_DOUBLE
legacy_transformations_path:
defaultValue: ''
isOptional: true
parameterType: STRING
location:
description: Location for the created GCP services.
parameterType: STRING
materialized_examples_format:
defaultValue: tfrecords_gzip
description: The format to use for the materialized examples. Should be
either 'tfrecords_gzip' (default) or 'parquet'.
isOptional: true
parameterType: STRING
max_selected_features:
defaultValue: 1000.0
description: Maximum number of features to select. If specified, the transform
config will be purged by only using the selected features that ranked
top in the feature ranking, which has the ranking value for all supported
features. If the number of input features is smaller than max_selected_features
specified, we will still run the feature selection process and generate
the feature ranking, no features will be excluded. The value will be
set to 1000 by default if run_feature_selection is enabled.
isOptional: true
parameterType: NUMBER_INTEGER
model_type:
description: 'Model type, which we wish to engineer features for. Can be
one of: neural_network, boosted_trees, l2l, seq2seq, tft, or tide. Defaults
to the empty value, `None`.'
isOptional: true
parameterType: STRING
multimodal_image_columns:
defaultValue: []
description: List of multimodal image columns. Defaults to an empty list.
isOptional: true
parameterType: LIST
multimodal_tabular_columns:
defaultValue: []
description: List of multimodal tabular columns. Defaults to an empty list
isOptional: true
parameterType: LIST
multimodal_text_columns:
defaultValue: []
description: List of multimodal text columns. Defaults to an empty list
isOptional: true
parameterType: LIST
multimodal_timeseries_columns:
defaultValue: []
description: List of multimodal timeseries columns. Defaults to an empty
list
isOptional: true
parameterType: LIST
predefined_split_key:
defaultValue: ''
description: Predefined split key.
isOptional: true
parameterType: STRING
prediction_type:
defaultValue: ''
description: Model prediction type. One of "classification", "regression",
"time_series".
isOptional: true
parameterType: STRING
project:
description: Project to run feature transform engine.
parameterType: STRING
root_dir:
description: The Cloud Storage location to store the output.
parameterType: STRING
run_distill:
defaultValue: false
description: (deprecated) Whether the distillation should be applied to
the training.
isOptional: true
parameterType: BOOLEAN
run_feature_selection:
defaultValue: false
description: Whether the feature selection should be applied to the dataset.
isOptional: true
parameterType: BOOLEAN
stats_gen_execution_engine:
defaultValue: dataflow
description: 'Execution engine to perform statistics generation. Can be
one of: "dataflow" (by default) or "bigquery". Using "bigquery" as the
execution engine is experimental.'
isOptional: true
parameterType: STRING
stratified_split_key:
defaultValue: ''
description: Stratified split key.
isOptional: true
parameterType: STRING
target_column:
defaultValue: ''
description: Target column of input data.
isOptional: true
parameterType: STRING
temporal_total_weight:
defaultValue: 0.0
isOptional: true
parameterType: NUMBER_DOUBLE
test_fraction:
defaultValue: -1.0
description: Fraction of input data for testing.
isOptional: true
parameterType: NUMBER_DOUBLE
tf_auto_transform_features:
defaultValue: {}
description: 'Dict mapping auto and/or type-resolutions to TF transform
features. FTE will automatically configure a set of built-in transformations
for each feature based on its data statistics. If users do not want auto
type resolution, but want the set of transformations for a given type
to be automatically generated, they may specify pre-resolved transformations
types. The following type hint dict keys are supported: * ''auto'' * ''categorical''
* ''numeric'' * ''text'' * ''timestamp'' Example: `{ "auto": ["feature1"],
"categorical": ["feature2", "feature3"], }`. Note that the target and
weight column may not be included as an auto transformation unless users
are running forecasting.'
isOptional: true
parameterType: STRUCT
tf_custom_transformation_definitions:
defaultValue: []
description: 'List of TensorFlow-based custom transformation definitions. Custom,
bring-your-own transform functions, where users can define and import
their own transform function and use it with FTE''s built-in transformations.
`[ { "transformation": "PlusOne", "module_path": "gs://bucket/custom_transform_fn.py",
"function_name": "plus_one_transform" }, { "transformation": "MultiplyTwo",
"module_path": "gs://bucket/custom_transform_fn.py", "function_name":
"multiply_two_transform" } ] Using custom transform function together
with FTE''s built-in transformations: .. code-block:: python [ { "transformation":
"CastToFloat", "input_columns": ["feature_1"], "output_columns": ["feature_1"]
},{ "transformation": "PlusOne", "input_columns": ["feature_1"] "output_columns":
["feature_1_plused_one"] },{ "transformation": "MultiplyTwo", "input_columns":
["feature_1"] "output_columns": ["feature_1_multiplied_two"] } ]'
isOptional: true
parameterType: LIST
tf_transform_execution_engine:
defaultValue: dataflow
description: 'Execution engine to perform row-level TF transformations.
Can be one of: "dataflow" (by default) or "bigquery". Using "bigquery"
as the execution engine is experimental and is for allowlisted customers
only. In addition, executing on "bigquery" only supports auto transformations
(i.e., specified by tf_auto_transform_features) and will raise an error
when tf_custom_transformation_definitions or tf_transformations_path is
set.'
isOptional: true
parameterType: STRING
tf_transformations_path:
defaultValue: ''
description: "Path to TensorFlow-based transformation configuration. Path\
\ to a JSON file used to specified FTE's TF transformation configurations.\
\ In the following, we provide some sample transform configurations to\
\ demonstrate FTE's capabilities. All transformations on input columns\
\ are explicitly specified with FTE's built-in transformations. Chaining\
\ of multiple transformations on a single column is also supported. For\
\ example: .. code-block:: python [ { \"transformation\": \"ZScale\"\
, \"input_columns\": [\"feature_1\"] }, { \"transformation\": \"ZScale\"\
, \"input_columns\": [\"feature_2\"] } ]`. Additional information about\
\ FTE's currently supported built-in\ntransformations:\nDatetime: Extracts\
\ datetime featues from a column containing timestamp strings.\n Example:\
\ .. code-block:: python { \"transformation\": \"Datetime\", \"input_columns\"\
: [\"feature_1\"], \"time_format\": \"%Y-%m-%d\" }\n Arguments:\n \
\ input_columns: A list with a single column to perform the datetime\
\ transformation on.\n output_columns: Names of output columns,\
\ one for each datetime_features element.\n time_format: Datetime\
\ format string. Time format is a combination of Date + Time Delimiter\
\ (optional) + Time (optional) directives. Valid date directives are as\
\ follows * '%Y-%m-%d' # 2018-11-30 * '%Y/%m/%d' # 2018/11/30 * '%y-%m-%d'\
\ # 18-11-30 * '%y/%m/%d' # 18/11/30 * '%m-%d-%Y' # 11-30-2018 * '%m/%d/%Y'\
\ # 11/30/2018 * '%m-%d-%y' # 11-30-18 * '%m/%d/%y' # 11/30/18 * '%d-%m-%Y'\
\ # 30-11-2018 * '%d/%m/%Y' # 30/11/2018 * '%d-%B-%Y' # 30-November-2018\
\ * '%d-%m-%y' # 30-11-18 * '%d/%m/%y' # 30/11/18 * '%d-%B-%y' # 30-November-18\
\ * '%d%m%Y' # 30112018 * '%m%d%Y' # 11302018 * '%Y%m%d' # 20181130\
\ Valid time delimiters are as follows * 'T' * ' ' Valid time directives\
\ are as follows * '%H:%M' # 23:59 * '%H:%M:%S' #\n \
\ 23:59:58 * '%H:%M:%S.%f' # 23:59:58[.123456] * '%H:%M:%S.%f%z'\
\ # 23:59:58[.123456]+0000 * '%H:%M:%S%z', # 23:59:58+0000\n \
\ datetime_features: List of datetime features to be extract. Each entry\
\ must be one of * 'YEAR' * 'MONTH' * 'DAY' * 'DAY_OF_WEEK' * 'DAY_OF_YEAR'\
\ * 'WEEK_OF_YEAR' * 'QUARTER' * 'HOUR' * 'MINUTE' * 'SECOND' Defaults\
\ to ['YEAR', 'MONTH', 'DAY', 'DAY_OF_WEEK', 'DAY_OF_YEAR', 'WEEK_OF_YEAR']\n\
Log: Performs the natural log on a numeric column.\n Example: .. code-block::\
\ python { \"transformation\": \"Log\", \"input_columns\": [\"feature_1\"\
] }\n Arguments:\n input_columns: A list with a single column\
\ to perform the log transformation on.\n output_columns: A list\
\ with a single output column name, corresponding to the output of our\
\ transformation.\nZScale: Performs Z-scale normalization on a numeric\
\ column.\n Example: .. code-block:: python { \"transformation\"\
: \"ZScale\", \"input_columns\": [\"feature_1\"] }\n Arguments:\n \
\ input_columns: A list with a single column to perform the z-scale\
\ transformation on.\n output_columns: A list with a single output\
\ column name, corresponding to the output of our transformation.\nVocabulary:\
\ Converts strings to integers, where each unique string gets a unique\
\ integer representation.\n Example: .. code-block:: python { \"\
transformation\": \"Vocabulary\", \"input_columns\": [\"feature_1\"] }\n\
\ Arguments:\n input_columns: A list with a single column to\
\ perform the vocabulary transformation on.\n output_columns: A\
\ list with a single output column name, corresponding to the output of\
\ our transformation.\n top_k: Number of the most frequent words\
\ in the vocabulary to use for generating dictionary lookup indices. If\
\ not specified, all words in the vocabulary will be used. Defaults to\
\ None.\n frequency_threshold: Limit the vocabulary only to words\
\ whose number of occurrences in the input exceeds frequency_threshold.\
\ If not specified, all words in the vocabulary will be included. If both\
\ top_k and frequency_threshold are specified, a word must satisfy both\
\ conditions to be included. Defaults to None.\nCategorical: Transforms\
\ categorical columns to integer columns.\n Example: .. code-block::\
\ python { \"transformation\": \"Categorical\", \"input_columns\": [\"\
feature_1\"], \"top_k\": 10 }\n Arguments:\n input_columns:\
\ A list with a single column to perform the categorical transformation\
\ on.\n output_columns: A list with a single output column name,\
\ corresponding to the output of our transformation.\n top_k: Number\
\ of the most frequent words in the vocabulary to use for generating dictionary\
\ lookup indices. If not specified, all words in the vocabulary will be\
\ used.\n frequency_threshold: Limit the vocabulary only to words\
\ whose number of occurrences in the input exceeds frequency_threshold.\
\ If not specified, all words in the vocabulary will be included. If both\
\ top_k and frequency_threshold are specified, a word must satisfy both\
\ conditions to be included.\nReduce: Given a column where each entry\
\ is a numeric array, reduces arrays according to our reduce_mode.\n \
\ Example: .. code-block:: python { \"transformation\": \"Reduce\"\
, \"input_columns\": [\"feature_1\"], \"reduce_mode\": \"MEAN\", \"output_columns\"\
: [\"feature_1_mean\"] }\n Arguments:\n input_columns: A list\
\ with a single column to perform the reduce transformation on.\n \
\ output_columns: A list with a single output column name, corresponding\
\ to the output of our transformation.\n reduce_mode: One of *\
\ 'MAX' * 'MIN' * 'MEAN' * 'LAST_K' Defaults to 'MEAN'.\n last_k:\
\ The number of last k elements when 'LAST_K' reduce mode is used. Defaults\
\ to 1.\nSplitString: Given a column of strings, splits strings into token\
\ arrays.\n Example: .. code-block:: python { \"transformation\"\
: \"SplitString\", \"input_columns\": [\"feature_1\"], \"separator\":\
\ \"$\" }\n Arguments:\n input_columns: A list with a single\
\ column to perform the split string transformation on.\n output_columns:\
\ A list with a single output column name, corresponding to the output\
\ of our transformation.\n separator: Separator to split input\
\ string into tokens. Defaults to ' '.\n missing_token: Missing\
\ token to use when no string is included. Defaults to ' _MISSING_ '.\n\
NGram: Given a column of strings, splits strings into token arrays where\
\ each token is an integer.\n Example: .. code-block:: python { \"\
transformation\": \"NGram\", \"input_columns\": [\"feature_1\"], \"min_ngram_size\"\
: 1, \"max_ngram_size\": 2, \"separator\": \" \" }\n Arguments:\n \
\ input_columns: A list with a single column to perform the n-gram\
\ transformation on.\n output_columns: A list with a single output\
\ column name, corresponding to the output of our transformation.\n \
\ min_ngram_size: Minimum n-gram size. Must be a positive number\
\ and <= max_ngram_size. Defaults to 1.\n max_ngram_size: Maximum\
\ n-gram size. Must be a positive number and >= min_ngram_size. Defaults\
\ to 2.\n top_k: Number of the most frequent words in the vocabulary\
\ to use for generating dictionary lookup indices. If not specified, all\
\ words in the vocabulary will be used. Defaults to None.\n frequency_threshold:\
\ Limit the dictionary's vocabulary only to words whose number of occurrences\
\ in the input exceeds frequency_threshold. If not specified, all words\
\ in the vocabulary will be included. If both top_k and frequency_threshold\
\ are specified, a word must satisfy both conditions to be included. Defaults\
\ to None.\n separator: Separator to split input string into tokens.\
\ Defaults to ' '.\n missing_token: Missing token to use when no\
\ string is included. Defaults to ' _MISSING_ '.\nClip: Given a numeric\
\ column, clips elements such that elements < min_value are assigned min_value,\
\ and elements > max_value are assigned max_value.\n Example: .. code-block::\
\ python { \"transformation\": \"Clip\", \"input_columns\": [\"col1\"\
], \"output_columns\": [\"col1_clipped\"], \"min_value\": 1., \"max_value\"\
: 10., }\n Arguments:\n input_columns: A list with a single\
\ column to perform the n-gram transformation on.\n output_columns:\
\ A list with a single output column name, corresponding to the output\
\ of our transformation.\n min_value: Number where all values below\
\ min_value are set to min_value. If no min_value is provided, min clipping\
\ will not occur. Defaults to None.\n max_value: Number where all\
\ values above max_value are set to max_value If no max_value is provided,\
\ max clipping will not occur. Defaults to None.\nMultiHotEncoding: Performs\
\ multi-hot encoding on a categorical array column.\n Example: ..\
\ code-block:: python { \"transformation\": \"MultiHotEncoding\", \"\
input_columns\": [\"col1\"], } The number of classes is determened by\
\ the largest number included in the input if it is numeric or the total\
\ number of unique values of the input if it is type str. If the input\
\ is has type str and an element contians separator tokens, the input\
\ will be split at separator indices, and the each element of the split\
\ list will be considered a seperate class. For example,\n Input: \
\ .. code-block:: python [ [\"foo bar\"], # Example 0 [\"foo\",\
\ \"bar\"], # Example 1 [\"foo\"], # Example 2 [\"bar\"], \
\ # Example 3 ] Output (with default separator=\" \"): .. code-block::\
\ python [ [1, 1], # Example 0 [1, 1], # Example 1 [1,\
\ 0], # Example 2 [0, 1], # Example 3 ]\n Arguments:\n\
\ input_columns: A list with a single column to perform the multi-hot-encoding\
\ on.\n output_columns: A list with a single output column name,\
\ corresponding to the output of our transformation.\n top_k: Number\
\ of the most frequent words in the vocabulary to use for generating dictionary\
\ lookup indices. If not specified, all words in the vocabulary will be\
\ used. Defaults to None.\n frequency_threshold: Limit the dictionary's\
\ vocabulary only to words whose number of occurrences in the input exceeds\
\ frequency_threshold. If not specified, all words in the vocabulary will\
\ be included. If both top_k and frequency_threshold are specified, a\
\ word must satisfy both conditions to be included. Defaults to None.\n\
\ separator: Separator to split input string into tokens. Defaults\
\ to ' '.\nMaxAbsScale: Performs maximum absolute scaling on a numeric\
\ column.\n Example: .. code-block:: python { \"transformation\"\
: \"MaxAbsScale\", \"input_columns\": [\"col1\"], \"output_columns\":\
\ [\"col1_max_abs_scaled\"] }\n Arguments:\n input_columns:\
\ A list with a single column to perform max-abs-scale on.\n output_columns:\
\ A list with a single output column name, corresponding to the output\
\ of our transformation.\nCustom: Transformations defined in tf_custom_transformation_definitions\
\ are included here in the TensorFlow-based transformation configuration.\
\ For example, given the following tf_custom_transformation_definitions:\
\ .. code-block:: python [ { \"transformation\": \"PlusX\", \"module_path\"\
: \"gs://bucket/custom_transform_fn.py\", \"function_name\": \"plus_one_transform\"\
\ } ] We can include the following transformation: .. code-block:: python\
\ { \"transformation\": \"PlusX\", \"input_columns\": [\"col1\"], \"\
output_columns\": [\"col1_max_abs_scaled\"] \"x\": 5 } Note that input_columns\
\ must still be included in our arguments and output_columns is optional.\
\ All other arguments are those defined in custom_transform_fn.py, which\
\ includes `\"x\"` in this case. See tf_custom_transformation_definitions\
\ above. legacy_transformations_path (Optional[str]) Deprecated. Prefer\
\ tf_auto_transform_features. Path to a GCS file containing JSON string\
\ for legacy style transformations. Note that legacy_transformations_path\
\ and tf_auto_transform_features cannot both be specified."
isOptional: true
parameterType: STRING
timestamp_split_key:
defaultValue: ''
description: Timestamp split key.
isOptional: true
parameterType: STRING
training_fraction:
defaultValue: -1.0
description: Fraction of input data for training.
isOptional: true
parameterType: NUMBER_DOUBLE
validation_fraction:
defaultValue: -1.0
description: Fraction of input data for validation.
isOptional: true
parameterType: NUMBER_DOUBLE
weight_column:
defaultValue: ''
description: Weight column of input data.
isOptional: true
parameterType: STRING
outputDefinitions:
artifacts:
dataset_stats:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
description: The stats of the dataset.
feature_ranking:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
description: The ranking of features, all features supported in the dataset
will be included. For "AMI" algorithm, array features won't be available
in the ranking as arrays are not supported yet.
instance_schema:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
materialized_data:
artifactType:
schemaTitle: system.Dataset
schemaVersion: 0.0.1
description: The materialized dataset.
training_schema:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
transform_output:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
description: The transform output artifact.
parameters:
bigquery_downsampled_test_split_uri:
description: BigQuery URI for the downsampled test split to pass to the
batch prediction component during batch explain.
parameterType: STRING
bigquery_test_split_uri:
description: BigQuery URI for the test split to pass to the batch prediction
component during evaluation.
parameterType: STRING
bigquery_train_split_uri:
description: BigQuery URI for the train split to pass to the batch prediction
component during distillation.
parameterType: STRING
bigquery_validation_split_uri:
description: BigQuery URI for the validation split to pass to the batch
prediction component during distillation.
parameterType: STRING
gcp_resources:
description: GCP resources created by this component. For more details,
see https://github.com/kubeflow/pipelines/blob/master/components/google-cloud/google_cloud_pipeline_components/proto/README.md.
parameterType: STRING
split_example_counts:
description: JSON string of data split example counts for train, validate,
and test splits.
parameterType: STRING
comp-get-fte-suffix:
executorLabel: exec-get-fte-suffix
inputDefinitions:
parameters:
bigquery_staging_full_dataset_id:
parameterType: STRING
fte_table:
parameterType: STRING
location:
parameterType: STRING
project:
parameterType: STRING
outputDefinitions:
parameters:
Output:
parameterType: STRING
comp-get-table-location:
executorLabel: exec-get-table-location
inputDefinitions:
parameters:
default_location:
defaultValue: ''
description: Location to return if no table was given.
isOptional: true
parameterType: STRING
project:
description: The GCP project.
parameterType: STRING
table:
description: The BigQuery table to get a location for.
parameterType: STRING
outputDefinitions:
parameters:
Output:
parameterType: STRING
comp-model-evaluation-regression:
executorLabel: exec-model-evaluation-regression
inputDefinitions:
artifacts:
model:
artifactType:
schemaTitle: google.VertexModel
schemaVersion: 0.0.1
description: 'The Vertex model used for evaluation. Must be located in the
same
region as the location argument. It is used to set the default
configurations for AutoML and custom-trained models.'
isOptional: true
predictions_bigquery_source:
artifactType:
schemaTitle: google.BQTable
schemaVersion: 0.0.1
description: 'BigQuery table
with prediction or explanation data to be used for this evaluation. For
prediction results, the table column should be named "predicted_*".'
isOptional: true
predictions_gcs_source:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
description: 'An artifact with its
URI pointing toward a GCS directory with prediction or explanation files
to be used for this evaluation. For prediction results, the files should
be named "prediction.results-*". For explanation results, the files
should be named "explanation.results-*".'
isOptional: true
parameters:
dataflow_disk_size_gb:
defaultValue: 50.0
description: 'The disk size (in GB) of the machine
executing the evaluation run.'
isOptional: true
parameterType: NUMBER_INTEGER
dataflow_machine_type:
defaultValue: n1-standard-4
description: 'The machine type executing the
evaluation run.'
isOptional: true
parameterType: STRING
dataflow_max_workers_num:
defaultValue: 5.0
description: 'The max number of workers
executing the evaluation run.'
isOptional: true
parameterType: NUMBER_INTEGER
dataflow_service_account:
defaultValue: ''
description: 'Service account to run the
Dataflow job. If not set, Dataflow will use the default worker service
account. For more details, see
https://cloud.google.com/dataflow/docs/concepts/secURIty-and-permissions#default_worker_service_account'
isOptional: true
parameterType: STRING
dataflow_subnetwork:
defaultValue: ''
description: 'Dataflow''s fully qualified subnetwork
name, when empty the default subnetwork will be used. More
details:
https://cloud.google.com/dataflow/docs/guides/specifying-networks#example_network_and_subnetwork_specifications'
isOptional: true
parameterType: STRING
dataflow_use_public_ips:
defaultValue: true
description: 'Specifies whether Dataflow
workers use public IP addresses.'
isOptional: true
parameterType: BOOLEAN
dataflow_workers_num:
defaultValue: 1.0
description: 'The number of workers executing the
evaluation run.'
isOptional: true
parameterType: NUMBER_INTEGER
encryption_spec_key_name:
defaultValue: ''
description: ' Customer-managed encryption key options.
If set, resources created by this pipeline will be encrypted with the
provided encryption key. Has the form:
`projects/my-project/locations/my-location/keyRings/my-kr/cryptoKeys/my-key`.
The key needs to be in the same region as where the compute resource is
created.'
isOptional: true
parameterType: STRING
force_runner_mode:
defaultValue: ''
description: 'Flag to choose Beam runner. Valid options are
`DirectRunner` and `Dataflow`.'
isOptional: true
parameterType: STRING
ground_truth_bigquery_source:
defaultValue: ''
description: 'Required for custom tabular.
The BigQuery table URI representing where the ground truth is located.
Used to provide ground truth for each prediction instance when they are
not part of the batch prediction jobs prediction instance.'
isOptional: true
parameterType: STRING
ground_truth_format:
defaultValue: jsonl
description: 'Required for custom tabular and non
tabular data. The file format for the ground truth files. `jsonl`,
`csv`, and `bigquery` are the allowed formats.'
isOptional: true
parameterType: STRING
ground_truth_gcs_source:
defaultValue: []
description: 'Required for custom
tabular and non tabular data. The GCS URIs representing where the ground
truth is located. Used to provide ground truth for each prediction
instance when they are not part of the batch prediction jobs prediction
instance.'
isOptional: true
parameterType: LIST
location:
defaultValue: us-central1
description: Location for running the evaluation.
isOptional: true
parameterType: STRING
prediction_score_column:
defaultValue: prediction.value
description: 'The column name of the field
containing batch prediction scores. Formatted to be able to find nested
columns, delimited by `.`.'
isOptional: true
parameterType: STRING
predictions_format:
defaultValue: jsonl
description: 'The file format for the batch
prediction results. `jsonl`, `csv`, and `bigquery` are the allowed
formats, from Vertex Batch Prediction.'
isOptional: true
parameterType: STRING
project:
defaultValue: '{{$.pipeline_google_cloud_project_id}}'
description: Project to run evaluation container. Defaults to the project
in which the PipelineJob is run.
isOptional: true
parameterType: STRING
target_field_name:
description: 'The target field''s name. Formatted to be able to find
nested columns, delimited by `.`. Prefixed with ''instance.'' on the
component for Vertex Batch Prediction.'
parameterType: STRING
outputDefinitions:
artifacts:
evaluation_metrics:
artifactType:
schemaTitle: google.RegressionMetrics
schemaVersion: 0.0.1
description: '`google.RegressionMetrics` representing the regression
evaluation metrics in GCS.'
parameters:
gcp_resources:
description: 'Serialized gcp_resources proto tracking the Dataflow
job. For more details, see
https://github.com/kubeflow/pipelines/blob/master/components/google-cloud/google_cloud_pipeline_components/proto/README.md.'
parameterType: STRING
comp-model-upload:
executorLabel: exec-model-upload
inputDefinitions:
artifacts:
parent_model:
artifactType:
schemaTitle: google.VertexModel
schemaVersion: 0.0.1
description: An artifact of a model which to upload a new version to. Only
specify this field when uploading a new version. [More information.](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.models/upload#request-body)
isOptional: true
unmanaged_container_model:
artifactType:
schemaTitle: google.UnmanagedContainerModel
schemaVersion: 0.0.1
description: "The unmanaged container model to be uploaded. The Model can\
\ be passed from an upstream step or imported via a KFP `dsl.importer`.\n\
:Examples:\n ::\n\n from kfp import dsl\n from google_cloud_pipeline_components.google_cloud_pipeline_components.types\
\ import artifact_types\n\n importer_spec = dsl.importer(\n artifact_uri='gs://managed-pipeline-gcpc-e2e-test/automl-tabular/model',\n\
\ artifact_class=artifact_types.UnmanagedContainerModel,\n metadata={\n\
\ 'containerSpec': { 'imageUri':\n 'us-docker.pkg.dev/vertex-ai/automl-tabular/prediction-server:prod'\n\
\ }\n })"
isOptional: true
parameters:
description:
defaultValue: ''
description: The description of the Model. [More information.](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.models#Model)
isOptional: true
parameterType: STRING
display_name:
description: 'The display name of the Model. The name
can be up to 128 characters long and can be consist of any UTF-8
characters. [More information.](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.models#Model)'
parameterType: STRING
encryption_spec_key_name:
defaultValue: ''
description: 'Customer-managed encryption
key spec for a Model. If set, this Model and all sub-resources of this
Model will be secured by this key. Has the form:
`projects/my-project/locations/my-location/keyRings/my-kr/cryptoKeys/my-key`.
The key needs to be in the same region as where the compute resource
is created.'
isOptional: true
parameterType: STRING
explanation_metadata:
defaultValue: {}
description: 'Metadata describing the Model''s
input and output for explanation. Both `explanation_metadata` and `explanation_parameters`
must be passed together when used. [More information.](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/ExplanationSpec#explanationmetadata)'
isOptional: true
parameterType: STRUCT
explanation_parameters:
defaultValue: {}
description: 'Parameters to configure
explaining for Model''s predictions. [More information.](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/ExplanationSpec#ExplanationParameters)'
isOptional: true
parameterType: STRUCT
labels:
defaultValue: {}
description: 'The labels with user-defined metadata to
organize your model. Label keys and values can be no longer than 64
characters (Unicode codepoints), can only contain lowercase letters,
numeric characters, underscores and dashes. International characters
are allowed. See https://goo.gl/xmQnxf for more information and
examples of labels.'
isOptional: true
parameterType: STRUCT
location:
defaultValue: us-central1
description: 'Optional location to upload this Model to. If
not set, defaults to `us-central1`.'
isOptional: true
parameterType: STRING
project:
defaultValue: '{{$.pipeline_google_cloud_project_id}}'
description: Project to upload this Model to. Defaults to the project in
which the PipelineJob is run.
isOptional: true
parameterType: STRING
outputDefinitions:
artifacts:
model:
artifactType:
schemaTitle: google.VertexModel
schemaVersion: 0.0.1
description: Artifact tracking the created Model.
parameters:
gcp_resources:
description: Serialized JSON of `gcp_resources` [proto](https://github.com/kubeflow/pipelines/tree/master/components/google-cloud/google_cloud_pipeline_components/proto)
which tracks the upload Model's long-running operation.
parameterType: STRING
comp-prophet-trainer:
executorLabel: exec-prophet-trainer
inputDefinitions:
parameters:
data_granularity_unit:
description: String representing the units of time for the time column.
parameterType: STRING
dataflow_disk_size_gb:
defaultValue: 40.0
description: Dataflow worker's disk size in GB during training.
isOptional: true
parameterType: NUMBER_INTEGER
dataflow_machine_type:
defaultValue: n1-standard-1
description: The dataflow machine type used for training.
isOptional: true
parameterType: STRING
dataflow_max_num_workers:
defaultValue: 10.0
description: The max number of Dataflow workers used for training.
isOptional: true
parameterType: NUMBER_INTEGER
dataflow_service_account:
defaultValue: ''
description: Custom service account to run dataflow jobs.
isOptional: true
parameterType: STRING
dataflow_subnetwork:
defaultValue: ''
description: Dataflow's fully qualified subnetwork name, when empty the
default subnetwork will be used.
isOptional: true
parameterType: STRING
dataflow_use_public_ips:
defaultValue: true
description: Specifies whether Dataflow workers use public IP addresses.
isOptional: true
parameterType: BOOLEAN
encryption_spec_key_name:
defaultValue: ''
description: Customer-managed encryption key.
isOptional: true
parameterType: STRING
forecast_horizon:
description: The number of time periods into the future for which forecasts
will be created. Future periods start after the latest timestamp for each
time series.
parameterType: NUMBER_INTEGER
location:
description: The GCP region for Vertex AI.
parameterType: STRING
max_num_trials:
defaultValue: 6.0
description: Maximum number of tuning trials to perform per time series.
There are up to 100 possible combinations to explore for each time series.
Recommended values to try are 3, 6, and 24.
isOptional: true
parameterType: NUMBER_INTEGER
optimization_objective:
defaultValue: rmse
description: Optimization objective for tuning. Supported metrics come from
Prophet's performance_metrics function. These are mse, rmse, mae, mape,
mdape, smape, and coverage.
isOptional: true
parameterType: STRING
predefined_split_column:
description: The predefined_split column name. A string that represents
a list of comma separated CSV filenames.
parameterType: STRING
project:
description: The GCP project that runs the pipeline components.
parameterType: STRING
root_dir:
description: The Cloud Storage location to store the output.
parameterType: STRING
source_bigquery_uri:
description: The BigQuery table path of format bq (str)://bq_project.bq_dataset.bq_table
parameterType: STRING
target_column:
description: Name of the column that the model is to predict values for.
parameterType: STRING
time_column:
description: Name of the column that identifies time order in the time series.
parameterType: STRING
time_series_identifier_column:
description: Name of the column that identifies the time series.
parameterType: STRING
window_column:
description: Name of the column that should be used to filter input rows. The
column should contain either booleans or string booleans; if the value
of the row is True, generate a sliding window from that row.
parameterType: STRING
outputDefinitions:
artifacts:
evaluated_examples_directory:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
unmanaged_container_model:
artifactType:
schemaTitle: google.UnmanagedContainerModel
schemaVersion: 0.0.1
description: The UnmanagedContainerModel artifact.
parameters:
gcp_resources:
description: Serialized gcp_resources proto tracking the custom training
job.
parameterType: STRING
comp-table-to-uri:
executorLabel: exec-table-to-uri
inputDefinitions:
artifacts:
table:
artifactType:
schemaTitle: system.Artifact
schemaVersion: 0.0.1
parameters:
use_bq_prefix:
defaultValue: false
isOptional: true
parameterType: BOOLEAN
outputDefinitions:
parameters:
dataset_id:
parameterType: STRING
project_id:
parameterType: STRING
table_id:
parameterType: STRING
uri:
parameterType: STRING
comp-validate-inputs:
executorLabel: exec-validate-inputs
inputDefinitions:
parameters:
bigquery_destination_uri:
isOptional: true
parameterType: STRING
data_granularity_unit:
isOptional: true
parameterType: STRING
data_source_bigquery_table_path:
isOptional: true
parameterType: STRING
data_source_csv_filenames:
isOptional: true
parameterType: STRING
optimization_objective:
isOptional: true
parameterType: STRING
predefined_split_key:
isOptional: true
parameterType: STRING
source_model_uri:
isOptional: true
parameterType: STRING
target_column:
isOptional: true
parameterType: STRING
test_fraction:
isOptional: true
parameterType: NUMBER_DOUBLE
time_column:
isOptional: true
parameterType: STRING
time_series_identifier_column:
isOptional: true
parameterType: STRING
timestamp_split_key:
isOptional: true
parameterType: STRING
training_fraction:
isOptional: true
parameterType: NUMBER_DOUBLE
validation_fraction:
isOptional: true
parameterType: NUMBER_DOUBLE
window_column:
isOptional: true
parameterType: STRING
window_max_count:
isOptional: true
parameterType: NUMBER_INTEGER
window_stride_length:
isOptional: true
parameterType: NUMBER_INTEGER
comp-wrapped-in-list:
executorLabel: exec-wrapped-in-list
inputDefinitions:
parameters:
value:
parameterType: STRING
outputDefinitions:
parameters:
Output:
parameterType: LIST
deploymentSpec:
executors:
exec-bigquery-create-dataset:
container:
args:
- --executor_input
- '{{$}}'
- --function_to_execute
- bigquery_create_dataset
command:
- sh
- -c
- "\nif ! [ -x \"$(command -v pip)\" ]; then\n python3 -m ensurepip ||\
\ python3 -m ensurepip --user || apt-get install python3-pip\nfi\n\nPIP_DISABLE_PIP_VERSION_CHECK=1\
\ python3 -m pip install --quiet --no-warn-script-location 'kfp==2.0.0-rc.2'\
\ && \"$0\" \"$@\"\n"
- sh
- -ec
- 'program_path=$(mktemp -d)
printf "%s" "$0" > "$program_path/ephemeral_component.py"
python3 -m kfp.components.executor_main --component_module_path "$program_path/ephemeral_component.py" "$@"
'
- "\nimport kfp\nfrom kfp import dsl\nfrom kfp.dsl import *\nfrom typing import\
\ *\n\ndef bigquery_create_dataset(\n project: str,\n location: str,\n\
\ dataset: str,\n exists_ok: bool = False,\n) -> NamedTuple('Outputs',\
\ [('project_id', str), ('dataset_id', str)]):\n \"\"\"Creates a BigQuery\
\ dataset.\"\"\"\n # pylint: disable=g-import-not-at-top,import-outside-toplevel,redefined-outer-name,reimported\n\
\ import collections\n\n from google.cloud import bigquery\n # pylint:\
\ enable=g-import-not-at-top,import-outside-toplevel,redefined-outer-name,reimported\n\
\n client = bigquery.Client(project=project, location=location)\n ref\
\ = client.create_dataset(dataset=dataset, exists_ok=exists_ok)\n return\
\ collections.namedtuple('Outputs', ['project_id', 'dataset_id'])(\n \
\ ref.project, ref.dataset_id)\n\n"
image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20240808_0625
exec-bigquery-delete-dataset-with-prefix:
container:
args:
- --executor_input
- '{{$}}'
- --function_to_execute
- bigquery_delete_dataset_with_prefix
command:
- sh
- -c
- "\nif ! [ -x \"$(command -v pip)\" ]; then\n python3 -m ensurepip ||\
\ python3 -m ensurepip --user || apt-get install python3-pip\nfi\n\nPIP_DISABLE_PIP_VERSION_CHECK=1\
\ python3 -m pip install --quiet --no-warn-script-location 'kfp==2.0.0-rc.2'\
\ && \"$0\" \"$@\"\n"
- sh
- -ec
- 'program_path=$(mktemp -d)
printf "%s" "$0" > "$program_path/ephemeral_component.py"
python3 -m kfp.components.executor_main --component_module_path "$program_path/ephemeral_component.py" "$@"
'
- "\nimport kfp\nfrom kfp import dsl\nfrom kfp.dsl import *\nfrom typing import\
\ *\n\ndef bigquery_delete_dataset_with_prefix(\n project: str,\n \
\ dataset_prefix: str,\n delete_contents: bool = False,\n) -> None:\n\
\ \"\"\"Deletes all BigQuery datasets matching the given prefix.\"\"\"\n\
\ # pylint: disable=g-import-not-at-top,import-outside-toplevel,redefined-outer-name,reimported\n\
\ from google.cloud import bigquery\n # pylint: enable=g-import-not-at-top,import-outside-toplevel,redefined-outer-name,reimported\n\
\n client = bigquery.Client(project=project)\n for dataset in client.list_datasets(project=project):\n\
\ if dataset.dataset_id.startswith(dataset_prefix):\n client.delete_dataset(\n\
\ dataset=dataset.dataset_id,\n delete_contents=delete_contents)\n\
\n"
image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20240808_0625
exec-bigquery-query-job:
container:
args:
- --type
- BigqueryQueryJob
- --project
- '{{$.inputs.parameters[''project'']}}'
- --location
- '{{$.inputs.parameters[''location'']}}'
- --payload
- '{"Concat": ["{", "\"configuration\": {", "\"query\": ", "{{$.inputs.parameters[''job_configuration_query'']}}",
", \"labels\": ", "{{$.inputs.parameters[''labels'']}}", "}", "}"]}'
- --job_configuration_query_override
- '{"Concat": ["{", "\"query\": \"", "{{$.inputs.parameters[''query'']}}",
"\"", ", \"query_parameters\": ", "{{$.inputs.parameters[''query_parameters'']}}",
", \"destination_encryption_configuration\": {", "\"kmsKeyName\": \"", "{{$.inputs.parameters[''encryption_spec_key_name'']}}",
"\"}", "}"]}'
- --gcp_resources
- '{{$.outputs.parameters[''gcp_resources''].output_file}}'
- --executor_input
- '{{$}}'
command:
- python3
- -u
- -m
- google_cloud_pipeline_components.container.v1.bigquery.query_job.launcher
image: gcr.io/ml-pipeline/google-cloud-pipeline-components:2.3.1
exec-build-job-configuration-query:
container:
args:
- --executor_input
- '{{$}}'
- --function_to_execute
- build_job_configuration_query
command:
- sh
- -c
- "\nif ! [ -x \"$(command -v pip)\" ]; then\n python3 -m ensurepip ||\
\ python3 -m ensurepip --user || apt-get install python3-pip\nfi\n\nPIP_DISABLE_PIP_VERSION_CHECK=1\
\ python3 -m pip install --quiet --no-warn-script-location 'kfp==2.0.0-rc.2'\
\ && \"$0\" \"$@\"\n"
- sh
- -ec
- 'program_path=$(mktemp -d)
printf "%s" "$0" > "$program_path/ephemeral_component.py"
python3 -m kfp.components.executor_main --component_module_path "$program_path/ephemeral_component.py" "$@"
'
- "\nimport kfp\nfrom kfp import dsl\nfrom kfp.dsl import *\nfrom typing import\
\ *\n\ndef build_job_configuration_query(\n project_id: str = '',\n \
\ dataset_id: str = '',\n table_id: str = '',\n write_disposition:\
\ str = '',\n priority: str = 'INTERACTIVE',\n) -> dict: # pylint: disable=g-bare-generic\n\
\ \"\"\"Creates a JobConfigurationQuery object.\"\"\"\n config = {\n \
\ 'priority': priority,\n }\n if all([project_id, dataset_id, table_id]):\n\
\ config['destinationTable'] = {\n 'projectId': project_id,\n\
\ 'datasetId': dataset_id,\n 'tableId': table_id,\n }\n\
\ if write_disposition:\n config['write_disposition'] = write_disposition\n\
\ return config\n\n"
image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20240808_0625
exec-feature-transform-engine:
container:
args:
- feature_transform_engine
- '{"Concat": ["--project=", "{{$.inputs.parameters[''project'']}}"]}'
- '{"Concat": ["--location=", "{{$.inputs.parameters[''location'']}}"]}'
- '{"Concat": ["--dataset_level_custom_transformation_definitions=", "{{$.inputs.parameters[''dataset_level_custom_transformation_definitions'']}}"]}'
- '{"Concat": ["--dataset_level_transformations=", "{{$.inputs.parameters[''dataset_level_transformations'']}}"]}'
- '{"Concat": ["--forecasting_time_column=", "{{$.inputs.parameters[''forecasting_time_column'']}}"]}'
- '{"IfPresent": {"InputName": "forecasting_time_series_identifier_column",
"Then": {"Concat": ["--forecasting_time_series_identifier_column=", "{{$.inputs.parameters[''forecasting_time_series_identifier_column'']}}"]}}}'
- '{"Concat": ["--forecasting_time_series_identifier_columns=", "{{$.inputs.parameters[''forecasting_time_series_identifier_columns'']}}"]}'
- '{"Concat": ["--forecasting_time_series_attribute_columns=", "{{$.inputs.parameters[''forecasting_time_series_attribute_columns'']}}"]}'
- '{"Concat": ["--forecasting_unavailable_at_forecast_columns=", "{{$.inputs.parameters[''forecasting_unavailable_at_forecast_columns'']}}"]}'
- '{"Concat": ["--forecasting_available_at_forecast_columns=", "{{$.inputs.parameters[''forecasting_available_at_forecast_columns'']}}"]}'
- '{"Concat": ["--forecasting_forecast_horizon=", "{{$.inputs.parameters[''forecasting_forecast_horizon'']}}"]}'
- '{"Concat": ["--forecasting_context_window=", "{{$.inputs.parameters[''forecasting_context_window'']}}"]}'
- '{"Concat": ["--forecasting_predefined_window_column=", "{{$.inputs.parameters[''forecasting_predefined_window_column'']}}"]}'
- '{"Concat": ["--forecasting_window_stride_length=", "{{$.inputs.parameters[''forecasting_window_stride_length'']}}"]}'
- '{"Concat": ["--forecasting_window_max_count=", "{{$.inputs.parameters[''forecasting_window_max_count'']}}"]}'
- '{"Concat": ["--forecasting_holiday_regions=", "{{$.inputs.parameters[''forecasting_holiday_regions'']}}"]}'
- '{"Concat": ["--forecasting_apply_windowing=", "{{$.inputs.parameters[''forecasting_apply_windowing'']}}"]}'
- '{"Concat": ["--predefined_split_key=", "{{$.inputs.parameters[''predefined_split_key'']}}"]}'
- '{"Concat": ["--stratified_split_key=", "{{$.inputs.parameters[''stratified_split_key'']}}"]}'
- '{"Concat": ["--timestamp_split_key=", "{{$.inputs.parameters[''timestamp_split_key'']}}"]}'
- '{"Concat": ["--training_fraction=", "{{$.inputs.parameters[''training_fraction'']}}"]}'
- '{"Concat": ["--validation_fraction=", "{{$.inputs.parameters[''validation_fraction'']}}"]}'
- '{"Concat": ["--test_fraction=", "{{$.inputs.parameters[''test_fraction'']}}"]}'
- '{"Concat": ["--stats_gen_execution_engine=", "{{$.inputs.parameters[''stats_gen_execution_engine'']}}"]}'
- '{"Concat": ["--tf_transform_execution_engine=", "{{$.inputs.parameters[''tf_transform_execution_engine'']}}"]}'
- '{"IfPresent": {"InputName": "tf_auto_transform_features", "Then": {"Concat":
["--tf_auto_transform_features=", "{{$.inputs.parameters[''tf_auto_transform_features'']}}"]}}}'
- '{"Concat": ["--tf_custom_transformation_definitions=", "{{$.inputs.parameters[''tf_custom_transformation_definitions'']}}"]}'
- '{"Concat": ["--tf_transformations_path=", "{{$.inputs.parameters[''tf_transformations_path'']}}"]}'
- '{"Concat": ["--legacy_transformations_path=", "{{$.inputs.parameters[''legacy_transformations_path'']}}"]}'
- '{"Concat": ["--data_source_csv_filenames=", "{{$.inputs.parameters[''data_source_csv_filenames'']}}"]}'
- '{"Concat": ["--data_source_bigquery_table_path=", "{{$.inputs.parameters[''data_source_bigquery_table_path'']}}"]}'
- '{"Concat": ["--bigquery_staging_full_dataset_id=", "{{$.inputs.parameters[''bigquery_staging_full_dataset_id'']}}"]}'
- '{"Concat": ["--target_column=", "{{$.inputs.parameters[''target_column'']}}"]}'
- '{"Concat": ["--weight_column=", "{{$.inputs.parameters[''weight_column'']}}"]}'
- '{"Concat": ["--prediction_type=", "{{$.inputs.parameters[''prediction_type'']}}"]}'
- '{"IfPresent": {"InputName": "model_type", "Then": {"Concat": ["--model_type=",
"{{$.inputs.parameters[''model_type'']}}"]}}}'
- '{"Concat": ["--multimodal_tabular_columns=", "{{$.inputs.parameters[''multimodal_tabular_columns'']}}"]}'
- '{"Concat": ["--multimodal_timeseries_columns=", "{{$.inputs.parameters[''multimodal_timeseries_columns'']}}"]}'
- '{"Concat": ["--multimodal_text_columns=", "{{$.inputs.parameters[''multimodal_text_columns'']}}"]}'
- '{"Concat": ["--multimodal_image_columns=", "{{$.inputs.parameters[''multimodal_image_columns'']}}"]}'
- '{"Concat": ["--run_distill=", "{{$.inputs.parameters[''run_distill'']}}"]}'
- '{"Concat": ["--run_feature_selection=", "{{$.inputs.parameters[''run_feature_selection'']}}"]}'
- '{"Concat": ["--materialized_examples_format=", "{{$.inputs.parameters[''materialized_examples_format'']}}"]}'
- '{"Concat": ["--max_selected_features=", "{{$.inputs.parameters[''max_selected_features'']}}"]}'
- '{"Concat": ["--feature_selection_staging_dir=", "{{$.inputs.parameters[''root_dir'']}}",
"/{{$.pipeline_job_uuid}}/{{$.pipeline_task_uuid}}/feature_selection_staging_dir"]}'
- '{"Concat": ["--feature_selection_algorithm=", "{{$.inputs.parameters[''feature_selection_algorithm'']}}"]}'
- '{"Concat": ["--feature_selection_execution_engine=", "{{$.inputs.parameters[''feature_selection_execution_engine'']}}"]}'
- '{"Concat": ["--feature_ranking_path=", "{{$.outputs.artifacts[''feature_ranking''].uri}}"]}'
- '{"Concat": ["--error_file_path=", "{{$.inputs.parameters[''root_dir'']}}",
"/{{$.pipeline_job_uuid}}/{{$.pipeline_task_uuid}}/error.txt"]}'
- '{"Concat": ["--stats_result_path=", "{{$.outputs.artifacts[''dataset_stats''].uri}}"]}'
- '{"Concat": ["--transform_output_artifact_path=", "{{$.outputs.artifacts[''transform_output''].uri}}"]}'
- '{"Concat": ["--transform_output_path=", "{{$.inputs.parameters[''root_dir'']}}",
"/{{$.pipeline_job_uuid}}/{{$.pipeline_task_uuid}}/transform"]}'
- '{"Concat": ["--materialized_examples_path=", "{{$.inputs.parameters[''root_dir'']}}",
"/{{$.pipeline_job_uuid}}/{{$.pipeline_task_uuid}}/materialized"]}'
- '{"Concat": ["--export_data_path=", "{{$.inputs.parameters[''root_dir'']}}",
"/{{$.pipeline_job_uuid}}/{{$.pipeline_task_uuid}}/export"]}'
- '{"Concat": ["--materialized_data_path=", "{{$.inputs.parameters[''root_dir'']}}",
"/{{$.pipeline_job_uuid}}/{{$.pipeline_task_uuid}}/materialized_data"]}'
- '{"Concat": ["--materialized_data_artifact_path=", "{{$.outputs.artifacts[''materialized_data''].uri}}"]}'
- '{"Concat": ["--bigquery_train_split_uri_path=", "{{$.outputs.parameters[''bigquery_train_split_uri''].output_file}}"]}'
- '{"Concat": ["--bigquery_validation_split_uri_path=", "{{$.outputs.parameters[''bigquery_validation_split_uri''].output_file}}"]}'
- '{"Concat": ["--bigquery_test_split_uri_path=", "{{$.outputs.parameters[''bigquery_test_split_uri''].output_file}}"]}'
- '{"Concat": ["--bigquery_downsampled_test_split_uri_path=", "{{$.outputs.parameters[''bigquery_downsampled_test_split_uri''].output_file}}"]}'
- '{"Concat": ["--split_example_counts_path=", "{{$.outputs.parameters[''split_example_counts''].output_file}}"]}'
- '{"Concat": ["--instance_schema_path=", "{{$.outputs.artifacts[''instance_schema''].path}}"]}'
- '{"Concat": ["--training_schema_path=", "{{$.outputs.artifacts[''training_schema''].path}}"]}'
- --job_name=feature-transform-engine-{{$.pipeline_job_uuid}}-{{$.pipeline_task_uuid}}
- '{"Concat": ["--dataflow_project=", "{{$.inputs.parameters[''project'']}}"]}'
- '{"Concat": ["--dataflow_staging_dir=", "{{$.inputs.parameters[''root_dir'']}}",
"/{{$.pipeline_job_uuid}}/{{$.pipeline_task_uuid}}/dataflow_staging"]}'
- '{"Concat": ["--dataflow_tmp_dir=", "{{$.inputs.parameters[''root_dir'']}}",
"/{{$.pipeline_job_uuid}}/{{$.pipeline_task_uuid}}/dataflow_tmp"]}'
- '{"Concat": ["--dataflow_max_num_workers=", "{{$.inputs.parameters[''dataflow_max_num_workers'']}}"]}'
- '{"Concat": ["--dataflow_machine_type=", "{{$.inputs.parameters[''dataflow_machine_type'']}}"]}'
- --dataflow_worker_container_image=us-docker.pkg.dev/vertex-ai/automl-tabular/dataflow-worker:20240808_0625
- --feature_transform_engine_docker_uri=us-docker.pkg.dev/vertex-ai/automl-tabular/feature-transform-engine:20240808_0625
- '{"Concat": ["--dataflow_disk_size_gb=", "{{$.inputs.parameters[''dataflow_disk_size_gb'']}}"]}'
- '{"Concat": ["--dataflow_subnetwork_fully_qualified=", "{{$.inputs.parameters[''dataflow_subnetwork'']}}"]}'
- '{"Concat": ["--dataflow_use_public_ips=", "{{$.inputs.parameters[''dataflow_use_public_ips'']}}"]}'
- '{"Concat": ["--dataflow_service_account=", "{{$.inputs.parameters[''dataflow_service_account'']}}"]}'
- '{"Concat": ["--dataflow_kms_key=", "{{$.inputs.parameters[''encryption_spec_key_name'']}}"]}'
- '{"Concat": ["--autodetect_csv_schema=", "{{$.inputs.parameters[''autodetect_csv_schema'']}}"]}'
- '{"Concat": ["--gcp_resources_path=", "{{$.outputs.parameters[''gcp_resources''].output_file}}"]}'
- '{"IfPresent": {"InputName": "group_columns", "Then": {"Concat": ["--group_columns=",
"{{$.inputs.parameters[''group_columns'']}}"]}}}'
- '{"IfPresent": {"InputName": "group_total_weight", "Then": {"Concat": ["--group_total_weight=",
"{{$.inputs.parameters[''group_total_weight'']}}"]}}}'
- '{"IfPresent": {"InputName": "temporal_total_weight", "Then": {"Concat":
["--temporal_total_weight=", "{{$.inputs.parameters[''temporal_total_weight'']}}"]}}}'
- '{"IfPresent": {"InputName": "group_temporal_total_weight", "Then": {"Concat":
["--group_temporal_total_weight=", "{{$.inputs.parameters[''group_temporal_total_weight'']}}"]}}}'
- '{"Concat": ["--encryption_spec_key_name=", "{{$.inputs.parameters[''encryption_spec_key_name'']}}"]}'
image: us-docker.pkg.dev/vertex-ai/automl-tabular/feature-transform-engine:20240808_0625
exec-get-fte-suffix:
container:
args:
- --executor_input
- '{{$}}'
- --function_to_execute
- get_fte_suffix
command:
- sh
- -ec
- 'program_path=$(mktemp -d)
printf "%s" "$0" > "$program_path/ephemeral_component.py"
python3 -m kfp.components.executor_main --component_module_path "$program_path/ephemeral_component.py" "$@"
'
- "\nimport kfp\nfrom kfp import dsl\nfrom kfp.dsl import *\nfrom typing import\
\ *\n\ndef get_fte_suffix(\n project: str,\n location: str,\n bigquery_staging_full_dataset_id:\
\ str,\n fte_table: str,\n) -> str:\n \"\"\"Infers the FTE suffix from\
\ the intermediate FTE table name.\"\"\"\n # pylint: disable=g-import-not-at-top,import-outside-toplevel,redefined-outer-name,reimported\n\
\ from google.cloud import bigquery\n # pylint: enable=g-import-not-at-top,import-outside-toplevel,redefined-outer-name,reimported\n\
\n client = bigquery.Client(project=project, location=location)\n for\
\ table in client.list_tables(bigquery_staging_full_dataset_id):\n if\
\ table.table_id.startswith(fte_table):\n return table.table_id[len(fte_table)\
\ + 1:]\n raise ValueError(\n f'No FTE output tables found in {bigquery_staging_full_dataset_id}.')\n\
\n"
image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20240808_0625
exec-get-table-location:
container:
args:
- --executor_input
- '{{$}}'
- --function_to_execute
- get_table_location
command:
- sh
- -c
- "\nif ! [ -x \"$(command -v pip)\" ]; then\n python3 -m ensurepip ||\
\ python3 -m ensurepip --user || apt-get install python3-pip\nfi\n\nPIP_DISABLE_PIP_VERSION_CHECK=1\
\ python3 -m pip install --quiet --no-warn-script-location 'kfp==2.0.0-rc.2'\
\ && \"$0\" \"$@\"\n"
- sh
- -ec
- 'program_path=$(mktemp -d)
printf "%s" "$0" > "$program_path/ephemeral_component.py"
python3 -m kfp.components.executor_main --component_module_path "$program_path/ephemeral_component.py" "$@"
'
- "\nimport kfp\nfrom kfp import dsl\nfrom kfp.dsl import *\nfrom typing import\
\ *\n\ndef get_table_location(\n project: str,\n table: Optional[str],\n\
\ default_location: str = '',\n) -> str:\n \"\"\"Returns the region\
\ the given table belongs to.\n\n Args:\n project: The GCP project.\n\
\ table: The BigQuery table to get a location for.\n default_location:\
\ Location to return if no table was given.\n\n Returns:\n A GCP region\
\ or multi-region.\n \"\"\"\n # pylint: disable=g-import-not-at-top,import-outside-toplevel,redefined-outer-name,reimported\n\
\ from google.cloud import bigquery\n # pylint: enable=g-import-not-at-top,import-outside-toplevel,redefined-outer-name,reimported\n\
\n if not table:\n return default_location\n\n client = bigquery.Client(project=project)\n\
\ if table.startswith('bq://'):\n table = table[len('bq://'):]\n elif\
\ table.startswith('bigquery://'):\n table = table[len('bigquery://'):]\n\
\ return client.get_table(table).location\n\n"
image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20240808_0625
exec-model-evaluation-regression:
container:
args:
- --setup_file
- /setup.py
- --json_mode
- 'true'
- --project_id
- '{{$.inputs.parameters[''project'']}}'
- --location
- '{{$.inputs.parameters[''location'']}}'
- --problem_type
- regression
- --target_field_name
- '{"Concat": ["instance.", "{{$.inputs.parameters[''target_field_name'']}}"]}'
- --batch_prediction_format
- '{{$.inputs.parameters[''predictions_format'']}}'
- '{"IfPresent": {"InputName": "predictions_gcs_source", "Then": ["--batch_prediction_gcs_source",
"{{$.inputs.artifacts[''predictions_gcs_source''].uri}}"]}}'
- '{"IfPresent": {"InputName": "predictions_bigquery_source", "Then": ["--batch_prediction_bigquery_source",
{"Concat": ["bq://", "{{$.inputs.artifacts[''predictions_bigquery_source''].metadata[''projectId'']}}",
".", "{{$.inputs.artifacts[''predictions_bigquery_source''].metadata[''datasetId'']}}",
".", "{{$.inputs.artifacts[''predictions_bigquery_source''].metadata[''tableId'']}}"]}]}}'
- '{"IfPresent": {"InputName": "model", "Then": ["--model_name", "{{$.inputs.artifacts[''model''].metadata[''resourceName'']}}"]}}'
- --ground_truth_format
- '{{$.inputs.parameters[''ground_truth_format'']}}'
- --ground_truth_gcs_source
- '{{$.inputs.parameters[''ground_truth_gcs_source'']}}'
- --ground_truth_bigquery_source
- '{{$.inputs.parameters[''ground_truth_bigquery_source'']}}'
- --root_dir
- '{{$.pipeline_root}}/{{$.pipeline_job_uuid}}-{{$.pipeline_task_uuid}}'
- --prediction_score_column
- '{{$.inputs.parameters[''prediction_score_column'']}}'
- --dataflow_job_prefix
- evaluation-regression-{{$.pipeline_job_uuid}}-{{$.pipeline_task_uuid}}
- --dataflow_service_account
- '{{$.inputs.parameters[''dataflow_service_account'']}}'
- --dataflow_disk_size
- '{{$.inputs.parameters[''dataflow_disk_size_gb'']}}'
- --dataflow_machine_type
- '{{$.inputs.parameters[''dataflow_machine_type'']}}'
- --dataflow_workers_num
- '{{$.inputs.parameters[''dataflow_workers_num'']}}'
- --dataflow_max_workers_num
- '{{$.inputs.parameters[''dataflow_max_workers_num'']}}'
- --dataflow_subnetwork
- '{{$.inputs.parameters[''dataflow_subnetwork'']}}'
- --dataflow_use_public_ips
- '{{$.inputs.parameters[''dataflow_use_public_ips'']}}'
- --kms_key_name
- '{{$.inputs.parameters[''encryption_spec_key_name'']}}'
- --force_runner_mode
- '{{$.inputs.parameters[''force_runner_mode'']}}'
- --output_metrics_gcs_path
- '{{$.outputs.artifacts[''evaluation_metrics''].path}}'
- --gcp_resources
- '{{$.outputs.parameters[''gcp_resources''].output_file}}'
- --executor_input
- '{{$}}'
command:
- python3
- /main.py
image: gcr.io/ml-pipeline/model-evaluation:v0.9.2
exec-model-upload:
container:
args:
- --type
- UploadModel
- --payload
- '{"Concat": ["{", "\"display_name\": \"", "{{$.inputs.parameters[''display_name'']}}",
"\"", ", \"description\": \"", "{{$.inputs.parameters[''description'']}}",
"\"", ", \"explanation_spec\": {", "\"parameters\": ", "{{$.inputs.parameters[''explanation_parameters'']}}",
", \"metadata\": ", "{{$.inputs.parameters[''explanation_metadata'']}}",
"}", ", \"encryption_spec\": {\"kms_key_name\":\"", "{{$.inputs.parameters[''encryption_spec_key_name'']}}",
"\"}", ", \"labels\": ", "{{$.inputs.parameters[''labels'']}}", ", \"pipeline_job\":
\"", "projects/{{$.inputs.parameters[''project'']}}/locations/{{$.inputs.parameters[''location'']}}/pipelineJobs/{{$.pipeline_job_uuid}}",
"\"", "}"]}'
- --project
- '{{$.inputs.parameters[''project'']}}'
- --location
- '{{$.inputs.parameters[''location'']}}'
- --gcp_resources
- '{{$.outputs.parameters[''gcp_resources''].output_file}}'
- --executor_input
- '{{$}}'
- '{"IfPresent": {"InputName": "parent_model", "Then": ["--parent_model_name",
"{{$.inputs.artifacts[''parent_model''].metadata[''resourceName'']}}"]}}'
command:
- python3
- -u
- -m
- google_cloud_pipeline_components.container.v1.model.upload_model.launcher
image: gcr.io/ml-pipeline/google-cloud-pipeline-components:2.3.1
exec-prophet-trainer:
container:
args:
- --type
- CustomJob
- --project
- '{{$.inputs.parameters[''project'']}}'
- --location
- '{{$.inputs.parameters[''location'']}}'
- --gcp_resources
- '{{$.outputs.parameters[''gcp_resources''].output_file}}'
- --payload
- '{"Concat": ["{\"display_name\": \"prophet-trainer-{{$.pipeline_job_uuid}}-{{$.pipeline_task_uuid}}\",
", "\"encryption_spec\": {\"kms_key_name\":\"", "{{$.inputs.parameters[''encryption_spec_key_name'']}}",
"\"}, ", "\"job_spec\": {\"worker_pool_specs\": [{\"replica_count\":\"1\",
", "\"machine_spec\": {\"machine_type\": \"n1-standard-4\"}, ", "\"container_spec\":
{\"image_uri\":\"us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/training:20240808_0625\",
", "\"args\": [\"prophet_trainer\", \"", "--job_name=dataflow-{{$.pipeline_job_name}}\",
\"", "--dataflow_worker_container_image=us-docker.pkg.dev/vertex-ai/automl-tabular/dataflow-worker:20240808_0625\",
\"", "--prediction_container_image=us-docker.pkg.dev/vertex-ai/automl-tabular/fte-prediction-server:20240808_0625\",
\"", "--artifacts_dir=", "{{$.inputs.parameters[''root_dir'']}}", "/{{$.pipeline_job_uuid}}/model/\",
\"", "--evaluated_examples_dir=", "{{$.inputs.parameters[''root_dir'']}}",
"/{{$.pipeline_job_uuid}}/eval/\", \"", "--region=", "{{$.inputs.parameters[''location'']}}",
"\", \"", "--source_bigquery_uri=", "{{$.inputs.parameters[''source_bigquery_uri'']}}",
"\", \"", "--target_column=", "{{$.inputs.parameters[''target_column'']}}",
"\", \"", "--time_column=", "{{$.inputs.parameters[''time_column'']}}",
"\", \"", "--time_series_identifier_column=", "{{$.inputs.parameters[''time_series_identifier_column'']}}",
"\", \"", "--forecast_horizon=", "{{$.inputs.parameters[''forecast_horizon'']}}",
"\", \"", "--window_column=", "{{$.inputs.parameters[''window_column'']}}",
"\", \"", "--optimization_objective=", "{{$.inputs.parameters[''optimization_objective'']}}",
"\", \"", "--data_granularity_unit=", "{{$.inputs.parameters[''data_granularity_unit'']}}",
"\", \"", "--predefined_split_column=", "{{$.inputs.parameters[''predefined_split_column'']}}",
"\", \"", "--max_num_trials=", "{{$.inputs.parameters[''max_num_trials'']}}",
"\", \"", "--dataflow_project=", "{{$.inputs.parameters[''project'']}}",
"\", \"", "--dataflow_max_num_workers=", "{{$.inputs.parameters[''dataflow_max_num_workers'']}}",
"\", \"", "--dataflow_machine_type=", "{{$.inputs.parameters[''dataflow_machine_type'']}}",
"\", \"", "--dataflow_disk_size_gb=", "{{$.inputs.parameters[''dataflow_disk_size_gb'']}}",
"\", \"", "--dataflow_service_account=", "{{$.inputs.parameters[''dataflow_service_account'']}}",
"\", \"", "--dataflow_subnetwork=", "{{$.inputs.parameters[''dataflow_subnetwork'']}}",
"\", \"", "--dataflow_use_public_ips=", "{{$.inputs.parameters[''dataflow_use_public_ips'']}}",
"\", \"", "--dataflow_staging_dir=", "{{$.inputs.parameters[''root_dir'']}}",
"/{{$.pipeline_job_uuid}}/{{$.pipeline_task_uuid}}/dataflow_staging\", \"",
"--dataflow_tmp_dir=", "{{$.inputs.parameters[''root_dir'']}}", "/{{$.pipeline_job_uuid}}/{{$.pipeline_task_uuid}}/dataflow_tmp\",
\"", "--gcp_resources_path=", "{{$.outputs.parameters[''gcp_resources''].output_file}}",
"\", \"", "--executor_input={{$.json_escape[1]}}\"]}}]}}"]}'
command:
- python3
- -u
- -m
- google_cloud_pipeline_components.container.v1.custom_job.launcher
image: gcr.io/ml-pipeline/google-cloud-pipeline-components:1.0.44
exec-table-to-uri:
container:
args:
- --executor_input
- '{{$}}'
- --function_to_execute
- table_to_uri
command:
- sh
- -ec
- 'program_path=$(mktemp -d)
printf "%s" "$0" > "$program_path/ephemeral_component.py"
python3 -m kfp.components.executor_main --component_module_path "$program_path/ephemeral_component.py" "$@"
'
- "\nimport kfp\nfrom kfp import dsl\nfrom kfp.dsl import *\nfrom typing import\
\ *\n\ndef table_to_uri(\n table: dsl.Input[dsl.Artifact],\n use_bq_prefix:\
\ bool = False,\n) -> NamedTuple(\n 'Outputs',\n [\n ('project_id',\
\ str),\n ('dataset_id', str),\n ('table_id', str),\n \
\ ('uri', str),\n ],\n):\n \"\"\"Converts a google.BQTable to a URI.\"\
\"\"\n # pylint: disable=g-import-not-at-top,import-outside-toplevel\n\
\ import collections\n # pylint: enable=g-import-not-at-top,import-outside-toplevel\n\
\n outputs = [\n table.metadata['projectId'],\n table.metadata['datasetId'],\n\
\ table.metadata['tableId'],\n ]\n bq_uri = '.'.join(outputs)\n \
\ if use_bq_prefix:\n bq_uri = 'bq://' + bq_uri\n outputs.append(bq_uri)\n\
\ return collections.namedtuple(\n 'Outputs',\n ['project_id',\
\ 'dataset_id', 'table_id', 'uri'],\n )(*outputs)\n\n"
image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20240808_0625
exec-validate-inputs:
container:
args:
- --executor_input
- '{{$}}'
- --function_to_execute
- validate_inputs
command:
- sh
- -ec
- 'program_path=$(mktemp -d)
printf "%s" "$0" > "$program_path/ephemeral_component.py"
python3 -m kfp.components.executor_main --component_module_path "$program_path/ephemeral_component.py" "$@"
'
- "\nimport kfp\nfrom kfp import dsl\nfrom kfp.dsl import *\nfrom typing import\
\ *\n\ndef validate_inputs(\n time_column: Optional[str] = None,\n \
\ time_series_identifier_column: Optional[str] = None,\n target_column:\
\ Optional[str] = None,\n data_source_bigquery_table_path: Optional[str]\
\ = None,\n training_fraction: Optional[float] = None,\n validation_fraction:\
\ Optional[float] = None,\n test_fraction: Optional[float] = None,\n\
\ predefined_split_key: Optional[str] = None,\n timestamp_split_key:\
\ Optional[str] = None,\n data_source_csv_filenames: Optional[str] =\
\ None,\n source_model_uri: Optional[str] = None,\n bigquery_destination_uri:\
\ Optional[str] = None,\n window_column: Optional[str] = None,\n window_stride_length:\
\ Optional[int] = None,\n window_max_count: Optional[int] = None,\n \
\ optimization_objective: Optional[str] = None,\n data_granularity_unit:\
\ Optional[str] = None,\n) -> None:\n \"\"\"Checks training pipeline input\
\ parameters are valid.\"\"\"\n # pylint: disable=g-import-not-at-top,import-outside-toplevel\n\
\ import re\n # pylint: enable=g-import-not-at-top,import-outside-toplevel\n\
\n project_pattern = r'([a-z0-9.-]+:)?[a-z][a-z0-9-_]{4,28}[a-z0-9]'\n\
\ dataset_pattern = r'[a-zA-Z0-9_]+'\n table_pattern = r'[^\\.\\:`]+'\n\
\ dataset_uri_pattern = re.compile(\n f'(bq://)?{project_pattern}[.:]{dataset_pattern}')\n\
\ table_uri_pattern = re.compile(\n f'(bq://)?{project_pattern}[.:]{dataset_pattern}[.:]{table_pattern}')\n\
\n # Validate BigQuery column and dataset names.\n bigquery_column_parameters\
\ = [\n time_column,\n time_series_identifier_column,\n target_column,\n\
\ ]\n column_pattern = re.compile(r'[a-zA-Z_][a-zA-Z0-9_]{1,300}')\n \
\ for column in bigquery_column_parameters:\n if column and not column_pattern.fullmatch(column):\n\
\ raise ValueError(f'Invalid column name: {column}.')\n if (bigquery_destination_uri\
\ and\n not dataset_uri_pattern.fullmatch(bigquery_destination_uri)):\n\
\ raise ValueError(\n f'Invalid BigQuery dataset URI: {bigquery_destination_uri}.')\n\
\ if (source_model_uri and not table_uri_pattern.fullmatch(source_model_uri)):\n\
\ raise ValueError(f'Invalid BigQuery table URI: {source_model_uri}.')\n\
\n # Validate data source.\n data_source_count = sum([bool(source) for\
\ source in [\n data_source_bigquery_table_path, data_source_csv_filenames]])\n\
\ if data_source_count > 1:\n raise ValueError(f'Expected 1 data source,\
\ found {data_source_count}.')\n if (data_source_bigquery_table_path\n\
\ and not table_uri_pattern.fullmatch(data_source_bigquery_table_path)):\n\
\ raise ValueError(\n f'Invalid BigQuery table URI: {data_source_bigquery_table_path}.')\n\
\ gcs_path_pattern = re.compile(r'gs:\\/\\/(.+)\\/([^\\/]+)')\n if data_source_csv_filenames:\n\
\ csv_list = [filename.strip()\n for filename in data_source_csv_filenames.split(',')]\n\
\ for gcs_path in csv_list:\n if not gcs_path_pattern.fullmatch(gcs_path):\n\
\ raise ValueError(f'Invalid path to CSV stored in GCS: {gcs_path}.')\n\
\n # Validate split spec.\n fraction_splits = [\n training_fraction,\n\
\ validation_fraction,\n test_fraction,\n ]\n fraction_splits\
\ = [None if fraction == -1 else fraction\n for fraction\
\ in fraction_splits]\n split_count = sum([\n bool(source)\n \
\ for source in [predefined_split_key,\n any(fraction_splits)]\n\
\ ])\n if split_count > 1:\n raise ValueError(f'Expected 1 split type,\
\ found {split_count}.')\n if (predefined_split_key and\n not column_pattern.fullmatch(predefined_split_key)):\n\
\ raise ValueError(f'Invalid column name: {predefined_split_key}.')\n\
\ if any(fraction_splits):\n if not all(fraction_splits):\n raise\
\ ValueError(\n f'All fractions must be non-zero. Got: {fraction_splits}.')\n\
\ if sum(fraction_splits) != 1:\n raise ValueError(\n f'Fraction\
\ splits must sum to 1. Got: {sum(fraction_splits)}.')\n if (timestamp_split_key\
\ and\n not column_pattern.fullmatch(timestamp_split_key)):\n raise\
\ ValueError(f'Invalid column name: {timestamp_split_key}.')\n if timestamp_split_key\
\ and not all(fraction_splits):\n raise ValueError('All fractions must\
\ be non-zero for timestamp split.')\n\n # Validate window config.\n if\
\ window_stride_length == -1:\n window_stride_length = None\n if window_max_count\
\ == -1:\n window_max_count = None\n window_configs = [window_column,\
\ window_stride_length, window_max_count]\n window_config_count = sum([bool(config)\
\ for config in window_configs])\n if window_config_count > 1:\n raise\
\ ValueError(f'Expected 1 window config, found {window_config_count}.')\n\
\ if window_column and not column_pattern.fullmatch(window_column):\n \
\ raise ValueError(f'Invalid column name: {window_column}.')\n if window_stride_length\
\ and (window_stride_length < 1 or\n window_stride_length\
\ > 1000):\n raise ValueError('Stride must be between 1 and 1000. Got:\
\ '\n f'{window_stride_length}.')\n if window_max_count\
\ and (window_max_count < 1000 or\n window_max_count\
\ > int(1e8)):\n raise ValueError('Max count must be between 1000 and\
\ 100000000. Got: '\n f'{window_max_count}.')\n\n #\
\ Validate eval metric.\n valid_optimization_objectives = ['rmse', 'mae',\
\ 'rmsle']\n if optimization_objective:\n if optimization_objective\
\ not in valid_optimization_objectives:\n raise ValueError(\n \
\ 'Optimization objective should be one of the following: '\n \
\ f'{valid_optimization_objectives}, got: {optimization_objective}.')\n\
\n # Validate data granularity unit.\n valid_data_granularity_units =\
\ [\n 'minute', 'hour', 'day', 'week', 'month', 'year']\n if data_granularity_unit:\n\
\ if data_granularity_unit not in valid_data_granularity_units:\n \
\ raise ValueError(\n 'Granularity unit should be one of the\
\ following: '\n f'{valid_data_granularity_units}, got: {data_granularity_unit}.')\n\
\n"
image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20240808_0625
exec-wrapped-in-list:
container:
args:
- --executor_input
- '{{$}}'
- --function_to_execute
- wrapped_in_list
command:
- sh
- -ec
- 'program_path=$(mktemp -d)
printf "%s" "$0" > "$program_path/ephemeral_component.py"
python3 -m kfp.components.executor_main --component_module_path "$program_path/ephemeral_component.py" "$@"
'
- "\nimport kfp\nfrom kfp import dsl\nfrom kfp.dsl import *\nfrom typing import\
\ *\n\ndef wrapped_in_list(value: str) -> List[str]:\n \"\"\"Wraps a string\
\ in a list.\"\"\"\n return [value]\n\n"
image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20240808_0625
pipelineInfo:
description: Trains one Prophet model per time series.
name: prophet-train
root:
dag:
tasks:
bigquery-delete-dataset-with-prefix:
cachingOptions: {}
componentRef:
name: comp-bigquery-delete-dataset-with-prefix
dependentTasks:
- exit-handler-1
inputs:
parameters:
dataset_prefix:
runtimeValue:
constant: tmp_{{$.pipeline_job_uuid}}
delete_contents:
runtimeValue:
constant: true
project:
componentInputParameter: project
taskInfo:
name: delete-tmp-dataset
triggerPolicy:
strategy: ALL_UPSTREAM_TASKS_COMPLETED
exit-handler-1:
componentRef:
name: comp-exit-handler-1
inputs:
parameters:
pipelinechannel--data_granularity_unit:
componentInputParameter: data_granularity_unit
pipelinechannel--data_source_bigquery_table_path:
componentInputParameter: data_source_bigquery_table_path
pipelinechannel--data_source_csv_filenames:
componentInputParameter: data_source_csv_filenames
pipelinechannel--dataflow_service_account:
componentInputParameter: dataflow_service_account
pipelinechannel--dataflow_subnetwork:
componentInputParameter: dataflow_subnetwork
pipelinechannel--dataflow_use_public_ips:
componentInputParameter: dataflow_use_public_ips
pipelinechannel--encryption_spec_key_name:
componentInputParameter: encryption_spec_key_name
pipelinechannel--evaluation_dataflow_disk_size_gb:
componentInputParameter: evaluation_dataflow_disk_size_gb
pipelinechannel--evaluation_dataflow_machine_type:
componentInputParameter: evaluation_dataflow_machine_type
pipelinechannel--evaluation_dataflow_max_num_workers:
componentInputParameter: evaluation_dataflow_max_num_workers
pipelinechannel--forecast_horizon:
componentInputParameter: forecast_horizon
pipelinechannel--location:
componentInputParameter: location
pipelinechannel--max_num_trials:
componentInputParameter: max_num_trials
pipelinechannel--optimization_objective:
componentInputParameter: optimization_objective
pipelinechannel--predefined_split_key:
componentInputParameter: predefined_split_key
pipelinechannel--project:
componentInputParameter: project
pipelinechannel--root_dir:
componentInputParameter: root_dir
pipelinechannel--run_evaluation:
componentInputParameter: run_evaluation
pipelinechannel--target_column:
componentInputParameter: target_column
pipelinechannel--test_fraction:
componentInputParameter: test_fraction
pipelinechannel--time_column:
componentInputParameter: time_column
pipelinechannel--time_series_identifier_column:
componentInputParameter: time_series_identifier_column
pipelinechannel--timestamp_split_key:
componentInputParameter: timestamp_split_key
pipelinechannel--trainer_dataflow_disk_size_gb:
componentInputParameter: trainer_dataflow_disk_size_gb
pipelinechannel--trainer_dataflow_machine_type:
componentInputParameter: trainer_dataflow_machine_type
pipelinechannel--trainer_dataflow_max_num_workers:
componentInputParameter: trainer_dataflow_max_num_workers
pipelinechannel--training_fraction:
componentInputParameter: training_fraction
pipelinechannel--validation_fraction:
componentInputParameter: validation_fraction
pipelinechannel--window_column:
componentInputParameter: window_column
pipelinechannel--window_max_count:
componentInputParameter: window_max_count
pipelinechannel--window_stride_length:
componentInputParameter: window_stride_length
taskInfo:
name: exit-handler-1
inputDefinitions:
parameters:
data_granularity_unit:
description: 'String representing the units of time for the time
column.'
parameterType: STRING
data_source_bigquery_table_path:
defaultValue: ''
description: 'The BigQuery table path of format
bq://bq_project.bq_dataset.bq_table'
isOptional: true
parameterType: STRING
data_source_csv_filenames:
defaultValue: ''
description: 'A string that represents a list of comma
separated CSV filenames.'
isOptional: true
parameterType: STRING
dataflow_service_account:
defaultValue: ''
description: Custom service account to run dataflow jobs.
isOptional: true
parameterType: STRING
dataflow_subnetwork:
defaultValue: ''
description: 'Dataflow''s fully qualified subnetwork name, when empty
the default subnetwork will be used.'
isOptional: true
parameterType: STRING
dataflow_use_public_ips:
defaultValue: true
description: 'Specifies whether Dataflow workers use public IP
addresses.'
isOptional: true
parameterType: BOOLEAN
encryption_spec_key_name:
defaultValue: ''
description: The KMS key name.
isOptional: true
parameterType: STRING
evaluation_dataflow_disk_size_gb:
defaultValue: 40.0
description: 'Dataflow worker''s disk size in GB during
evaluation.'
isOptional: true
parameterType: NUMBER_INTEGER
evaluation_dataflow_machine_type:
defaultValue: n1-standard-1
description: 'The dataflow machine type used for
evaluation.'
isOptional: true
parameterType: STRING
evaluation_dataflow_max_num_workers:
defaultValue: 10.0
description: 'The max number of Dataflow workers used
for evaluation.'
isOptional: true
parameterType: NUMBER_INTEGER
forecast_horizon:
description: 'The number of time periods into the future for which
forecasts will be created. Future periods start after the latest timestamp
for each time series.'
parameterType: NUMBER_INTEGER
location:
description: The GCP region for Vertex AI.
parameterType: STRING
max_num_trials:
defaultValue: 6.0
description: 'Maximum number of tuning trials to perform per time series.
There are up to 100 possible combinations to explore for each time series.
Recommended values to try are 3, 6, and 24.'
isOptional: true
parameterType: NUMBER_INTEGER
optimization_objective:
description: Optimization objective for the model.
parameterType: STRING
predefined_split_key:
defaultValue: ''
description: The predefined_split column name.
isOptional: true
parameterType: STRING
project:
description: The GCP project that runs the pipeline components.
parameterType: STRING
root_dir:
description: The Cloud Storage location to store the output.
parameterType: STRING
run_evaluation:
defaultValue: true
description: Whether to run evaluation steps during training.
isOptional: true
parameterType: BOOLEAN
target_column:
description: Name of the column that the model is to predict values for.
parameterType: STRING
test_fraction:
defaultValue: -1.0
description: The test fraction.
isOptional: true
parameterType: NUMBER_DOUBLE
time_column:
description: 'Name of the column that identifies time order in the time
series.'
parameterType: STRING
time_series_identifier_column:
description: 'Name of the column that identifies the time
series.'
parameterType: STRING
timestamp_split_key:
defaultValue: ''
description: The timestamp_split column name.
isOptional: true
parameterType: STRING
trainer_dataflow_disk_size_gb:
defaultValue: 40.0
description: 'Dataflow worker''s disk size in GB during
training.'
isOptional: true
parameterType: NUMBER_INTEGER
trainer_dataflow_machine_type:
defaultValue: n1-standard-1
description: The dataflow machine type used for training.
isOptional: true
parameterType: STRING
trainer_dataflow_max_num_workers:
defaultValue: 10.0
description: 'The max number of Dataflow workers used
for training.'
isOptional: true
parameterType: NUMBER_INTEGER
training_fraction:
defaultValue: -1.0
description: The training fraction.
isOptional: true
parameterType: NUMBER_DOUBLE
validation_fraction:
defaultValue: -1.0
description: The validation fraction.
isOptional: true
parameterType: NUMBER_DOUBLE
window_column:
defaultValue: ''
description: 'Name of the column that should be used to filter input rows.
The column should contain either booleans or string booleans; if the value
of the row is True, generate a sliding window from that row.'
isOptional: true
parameterType: STRING
window_max_count:
defaultValue: -1.0
description: 'Number of rows that should be used to generate input
examples. If the total row count is larger than this number, the input
data will be randomly sampled to hit the count.'
isOptional: true
parameterType: NUMBER_INTEGER
window_stride_length:
defaultValue: -1.0
description: 'Step length used to generate input examples. Every
window_stride_length rows will be used to generate a sliding window.'
isOptional: true
parameterType: NUMBER_INTEGER
schemaVersion: 2.1.0
sdkVersion: kfp-2.0.0-rc.2
| 835 |
0 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/automl | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/automl/forecasting/prophet_trainer.py | # Copyright 2023 The Kubeflow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Prophet trainer component spec."""
from typing import Optional
from google_cloud_pipeline_components.types.artifact_types import UnmanagedContainerModel
from kfp import dsl
from kfp.dsl import Artifact
from kfp.dsl import Output
# pylint: disable=g-doc-args,unused-argument
@dsl.container_component
def prophet_trainer(
project: str,
location: str,
root_dir: str,
target_column: str,
time_column: str,
time_series_identifier_column: str,
forecast_horizon: int,
window_column: str,
data_granularity_unit: str,
predefined_split_column: str,
source_bigquery_uri: str,
gcp_resources: dsl.OutputPath(str),
unmanaged_container_model: Output[UnmanagedContainerModel],
evaluated_examples_directory: Output[Artifact],
optimization_objective: Optional[str] = 'rmse',
max_num_trials: Optional[int] = 6,
encryption_spec_key_name: Optional[str] = '',
dataflow_max_num_workers: Optional[int] = 10,
dataflow_machine_type: Optional[str] = 'n1-standard-1',
dataflow_disk_size_gb: Optional[int] = 40,
dataflow_service_account: Optional[str] = '',
dataflow_subnetwork: Optional[str] = '',
dataflow_use_public_ips: Optional[bool] = True,
):
# fmt: off
"""Trains and tunes one Prophet model per time series using Dataflow.
Args:
project: The GCP project that runs the pipeline components.
location: The GCP region for Vertex AI.
root_dir: The Cloud Storage location to store the output.
time_column: Name of the column that identifies time order in the time series.
time_series_identifier_column: Name of the column that identifies the time series.
target_column: Name of the column that the model is to predict values for.
forecast_horizon: The number of time periods into the future for which forecasts will be created. Future periods start after the latest timestamp for each time series.
optimization_objective: Optimization objective for tuning. Supported metrics come from Prophet's performance_metrics function. These are mse, rmse, mae, mape, mdape, smape, and coverage.
data_granularity_unit: String representing the units of time for the time column.
predefined_split_column: The predefined_split column name. A string that represents a list of comma separated CSV filenames.
source_bigquery_uri: The BigQuery table path of format bq (str)://bq_project.bq_dataset.bq_table
window_column: Name of the column that should be used to filter input rows. The column should contain either booleans or string booleans; if the value of the row is True, generate a sliding window from that row.
max_num_trials: Maximum number of tuning trials to perform per time series. There are up to 100 possible combinations to explore for each time series. Recommended values to try are 3, 6, and 24.
encryption_spec_key_name: Customer-managed encryption key.
dataflow_machine_type: The dataflow machine type used for training.
dataflow_max_num_workers: The max number of Dataflow workers used for training.
dataflow_disk_size_gb: Dataflow worker's disk size in GB during training.
dataflow_service_account: Custom service account to run dataflow jobs.
dataflow_subnetwork: Dataflow's fully qualified subnetwork name, when empty the default subnetwork will be used.
dataflow_use_public_ips: Specifies whether Dataflow workers use public IP addresses.
Returns:
gcp_resources: Serialized gcp_resources proto tracking the custom training job.
unmanaged_container_model: The UnmanagedContainerModel artifact.
"""
# fmt: on
return dsl.ContainerSpec(
image='gcr.io/ml-pipeline/google-cloud-pipeline-components:1.0.44',
command=[
'python3',
'-u',
'-m',
'google_cloud_pipeline_components.container.v1.custom_job.launcher',
],
args=[
'--type',
'CustomJob',
'--project',
project,
'--location',
location,
'--gcp_resources',
gcp_resources,
'--payload',
dsl.ConcatPlaceholder(
items=[
'{"display_name": '
+ f'"prophet-trainer-{dsl.PIPELINE_JOB_ID_PLACEHOLDER}-{dsl.PIPELINE_TASK_ID_PLACEHOLDER}", ',
'"encryption_spec": {"kms_key_name":"',
encryption_spec_key_name,
'"}, ',
'"job_spec": {"worker_pool_specs": [{"replica_count":"1", ',
'"machine_spec": {"machine_type": "n1-standard-4"}, ',
(
'"container_spec":'
' {"image_uri":"us-docker.pkg.dev/vertex-ai-restricted/automl-tabular/training:20240808_0625", '
),
'"args": ["prophet_trainer", "',
(
f'--job_name=dataflow-{dsl.PIPELINE_JOB_NAME_PLACEHOLDER}", "'
),
(
'--dataflow_worker_container_image=us-docker.pkg.dev/vertex-ai/automl-tabular/dataflow-worker:20240808_0625", "'
),
(
'--prediction_container_image=us-docker.pkg.dev/vertex-ai/automl-tabular/fte-prediction-server:20240808_0625", "'
),
'--artifacts_dir=',
root_dir,
f'/{dsl.PIPELINE_JOB_ID_PLACEHOLDER}/model/", "',
'--evaluated_examples_dir=',
root_dir,
f'/{dsl.PIPELINE_JOB_ID_PLACEHOLDER}/eval/", "',
'--region=',
location,
'", "',
'--source_bigquery_uri=',
source_bigquery_uri,
'", "',
'--target_column=',
target_column,
'", "',
'--time_column=',
time_column,
'", "',
'--time_series_identifier_column=',
time_series_identifier_column,
'", "',
'--forecast_horizon=',
forecast_horizon,
'", "',
'--window_column=',
window_column,
'", "',
'--optimization_objective=',
optimization_objective,
'", "',
'--data_granularity_unit=',
data_granularity_unit,
'", "',
'--predefined_split_column=',
predefined_split_column,
'", "',
'--max_num_trials=',
max_num_trials,
'", "',
'--dataflow_project=',
project,
'", "',
'--dataflow_max_num_workers=',
dataflow_max_num_workers,
'", "',
'--dataflow_machine_type=',
dataflow_machine_type,
'", "',
'--dataflow_disk_size_gb=',
dataflow_disk_size_gb,
'", "',
'--dataflow_service_account=',
dataflow_service_account,
'", "',
'--dataflow_subnetwork=',
dataflow_subnetwork,
'", "',
'--dataflow_use_public_ips=',
dataflow_use_public_ips,
'", "',
'--dataflow_staging_dir=',
root_dir,
(
f'/{dsl.PIPELINE_JOB_ID_PLACEHOLDER}/{dsl.PIPELINE_TASK_ID_PLACEHOLDER}/dataflow_staging", "'
),
'--dataflow_tmp_dir=',
root_dir,
(
f'/{dsl.PIPELINE_JOB_ID_PLACEHOLDER}/{dsl.PIPELINE_TASK_ID_PLACEHOLDER}/dataflow_tmp", "'
),
'--gcp_resources_path=',
gcp_resources,
'", "',
'--executor_input={{$.json_escape[1]}}"]}}]}}',
]
),
],
)
| 836 |
0 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/automl | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/automl/forecasting/__init__.py | # Copyright 2023 The Kubeflow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GA AutoML forecasting components."""
from google_cloud_pipeline_components.v1.automl.forecasting.prophet_trainer import prophet_trainer as ProphetTrainerOp
from google_cloud_pipeline_components.v1.automl.forecasting.utils import get_bqml_arima_predict_pipeline_and_parameters
from google_cloud_pipeline_components.v1.automl.forecasting.utils import get_bqml_arima_train_pipeline_and_parameters
from google_cloud_pipeline_components.v1.automl.forecasting.utils import get_prophet_prediction_pipeline_and_parameters
from google_cloud_pipeline_components.v1.automl.forecasting.utils import get_prophet_train_pipeline_and_parameters
__all__ = [
'ProphetTrainerOp',
'get_bqml_arima_predict_pipeline_and_parameters',
'get_bqml_arima_train_pipeline_and_parameters',
'get_prophet_prediction_pipeline_and_parameters',
'get_prophet_train_pipeline_and_parameters',
]
| 837 |
0 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/automl | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/automl/forecasting/utils.py | """Util functions for Vertex Forecasting pipelines."""
import os
import pathlib
from typing import Any, Dict, Tuple
_GCPC_FORECASTING_PATH = pathlib.Path(__file__).parent.resolve()
def get_bqml_arima_train_pipeline_and_parameters(
project: str,
location: str,
root_dir: str,
time_column: str,
time_series_identifier_column: str,
target_column: str,
forecast_horizon: int,
data_granularity_unit: str,
predefined_split_key: str = '',
timestamp_split_key: str = '',
training_fraction: float = -1.0,
validation_fraction: float = -1.0,
test_fraction: float = -1.0,
data_source_csv_filenames: str = '',
data_source_bigquery_table_path: str = '',
window_column: str = '',
window_stride_length: int = -1,
window_max_count: int = -1,
bigquery_destination_uri: str = '',
override_destination: bool = False,
max_order: int = 5,
run_evaluation: bool = True,
) -> Tuple[str, Dict[str, Any]]:
# fmt: off
"""Get the BQML ARIMA_PLUS training pipeline.
Args:
project: The GCP project that runs the pipeline components.
location: The GCP region for Vertex AI.
root_dir: The Cloud Storage location to store the output.
time_column: Name of the column that identifies time order in the time series.
time_series_identifier_column: Name of the column that identifies the time series.
target_column: Name of the column that the model is to predict values for.
forecast_horizon: The number of time periods into the future for which forecasts will be created. Future periods start after the latest timestamp for each time series.
data_granularity_unit: The data granularity unit. Accepted values are: minute, hour, day, week, month, year.
predefined_split_key: The predefined_split column name.
timestamp_split_key: The timestamp_split column name.
training_fraction: The training fraction.
validation_fraction: The validation fraction.
test_fraction: float = The test fraction.
data_source_csv_filenames: A string that represents a list of comma separated CSV filenames.
data_source_bigquery_table_path: The BigQuery table path of format: `bq://bq_project.bq_dataset.bq_table`.
window_column: Name of the column that should be used to filter input rows. The column should contain either booleans or string booleans; if the value of the row is True, generate a sliding window from that row.
window_stride_length: Step length used to generate input examples. Every window_stride_length rows will be used to generate a sliding window.
window_max_count: Number of rows that should be used to generate input examples. If the total row count is larger than this number, the input data will be randomly sampled to hit the count.
bigquery_destination_uri: URI of the desired destination dataset. If not specified, resources will be created under a new dataset in the project. Unlike in Vertex Forecasting, all resources will be given hardcoded names under this dataset, and the model artifact will also be exported here.
override_destination: Whether to overwrite the metrics and evaluated examples tables if they already exist. If this is False and the tables exist, this pipeline will fail.
max_order: Integer between 1 and 5 representing the size of the parameter search space for ARIMA_PLUS. 5 would result in the highest accuracy model, but also the longest training runtime.
run_evaluation: Whether to run evaluation steps during training.
Returns:
Tuple of pipeline_definition_path and parameter_values.
"""
# fmt: on
parameter_values = {
'project': project,
'location': location,
'root_dir': root_dir,
'time_column': time_column,
'time_series_identifier_column': time_series_identifier_column,
'target_column': target_column,
'forecast_horizon': forecast_horizon,
'data_granularity_unit': data_granularity_unit,
'predefined_split_key': predefined_split_key,
'timestamp_split_key': timestamp_split_key,
'training_fraction': training_fraction,
'validation_fraction': validation_fraction,
'test_fraction': test_fraction,
'data_source_csv_filenames': data_source_csv_filenames,
'data_source_bigquery_table_path': data_source_bigquery_table_path,
'window_column': window_column,
'window_stride_length': window_stride_length,
'window_max_count': window_max_count,
'bigquery_destination_uri': bigquery_destination_uri,
'override_destination': override_destination,
'max_order': max_order,
'run_evaluation': run_evaluation,
}
pipeline_definition_path = os.path.join(
_GCPC_FORECASTING_PATH, 'bqml_arima_train_pipeline.yaml'
)
return pipeline_definition_path, parameter_values
def get_bqml_arima_predict_pipeline_and_parameters(
project: str,
location: str,
model_name: str,
data_source_csv_filenames: str = '',
data_source_bigquery_table_path: str = '',
bigquery_destination_uri: str = '',
generate_explanation: bool = False,
) -> Tuple[str, Dict[str, Any]]:
# fmt: off
"""Get the BQML ARIMA_PLUS prediction pipeline.
Args:
project: The GCP project that runs the pipeline components.
location: The GCP region for Vertex AI.
model_name: ARIMA_PLUS BQML model URI.
data_source_csv_filenames: A string that represents a list of comma separated CSV filenames.
data_source_bigquery_table_path: The BigQuery table path of format: `bq://bq_project.bq_dataset.bq_table`.
bigquery_destination_uri: URI of the desired destination dataset. If not specified, a resource will be created under a new dataset in the project.
generate_explanation: Generate explanation along with the batch prediction results. This will cause the batch prediction output to include explanations.
Returns:
Tuple of pipeline_definition_path and parameter_values.
"""
# fmt: on
parameter_values = {
'project': project,
'location': location,
'model_name': model_name,
'data_source_csv_filenames': data_source_csv_filenames,
'data_source_bigquery_table_path': data_source_bigquery_table_path,
'bigquery_destination_uri': bigquery_destination_uri,
'generate_explanation': generate_explanation,
}
pipeline_definition_path = os.path.join(
_GCPC_FORECASTING_PATH, 'bqml_arima_predict_pipeline.yaml'
)
return pipeline_definition_path, parameter_values
def get_prophet_train_pipeline_and_parameters(
project: str,
location: str,
root_dir: str,
time_column: str,
time_series_identifier_column: str,
target_column: str,
forecast_horizon: int,
optimization_objective: str,
data_granularity_unit: str,
predefined_split_key: str = '',
timestamp_split_key: str = '',
training_fraction: float = -1.0,
validation_fraction: float = -1.0,
test_fraction: float = -1.0,
data_source_csv_filenames: str = '',
data_source_bigquery_table_path: str = '',
window_column: str = '',
window_stride_length: int = -1,
window_max_count: int = -1,
max_num_trials: int = 6,
trainer_dataflow_machine_type: str = 'n1-standard-1',
trainer_dataflow_max_num_workers: int = 10,
trainer_dataflow_disk_size_gb: int = 40,
evaluation_dataflow_machine_type: str = 'n1-standard-1',
evaluation_dataflow_max_num_workers: int = 10,
evaluation_dataflow_disk_size_gb: int = 40,
dataflow_service_account: str = '',
dataflow_subnetwork: str = '',
dataflow_use_public_ips: bool = True,
run_evaluation: bool = True,
) -> Tuple[str, Dict[str, Any]]:
# fmt: off
"""Returns Prophet train pipeline and formatted parameters.
Args:
project: The GCP project that runs the pipeline components.
location: The GCP region for Vertex AI.
root_dir: The Cloud Storage location to store the output.
time_column: Name of the column that identifies time order in the time series.
time_series_identifier_column: Name of the column that identifies the time series.
target_column: Name of the column that the model is to predict values for.
forecast_horizon: The number of time periods into the future for which forecasts will be created. Future periods start after the latest timestamp for each time series.
optimization_objective: Optimization objective for the model.
data_granularity_unit: String representing the units of time for the time column.
predefined_split_key: The predefined_split column name.
timestamp_split_key: The timestamp_split column name.
training_fraction: The training fraction.
validation_fraction: The validation fraction.
test_fraction: float = The test fraction.
data_source_csv_filenames: A string that represents a list of comma separated CSV filenames.
data_source_bigquery_table_path: The BigQuery table path of format: `bq://bq_project.bq_dataset.bq_table`.
window_column: Name of the column that should be used to filter input rows. The column should contain either booleans or string booleans; if the value of the row is True, generate a sliding window from that row.
window_stride_length: Step length used to generate input examples. Every window_stride_length rows will be used to generate a sliding window.
window_max_count: Number of rows that should be used to generate input examples. If the total row count is larger than this number, the input data will be randomly sampled to hit the count.
max_num_trials: Maximum number of tuning trials to perform per time series.
trainer_dataflow_machine_type: The dataflow machine type used for training.
trainer_dataflow_max_num_workers: The max number of Dataflow workers used for training.
trainer_dataflow_disk_size_gb: Dataflow worker's disk size in GB during training.
evaluation_dataflow_machine_type: The dataflow machine type used for evaluation.
evaluation_dataflow_max_num_workers: The max number of Dataflow workers used for evaluation.
evaluation_dataflow_disk_size_gb: Dataflow worker's disk size in GB during evaluation.
dataflow_service_account: Custom service account to run dataflow jobs.
dataflow_subnetwork: Dataflow's fully qualified subnetwork name, when empty the default subnetwork will be used.
dataflow_use_public_ips: Specifies whether Dataflow workers use public IP addresses.
run_evaluation: Whether to run evaluation steps during training.
Returns:
Tuple of pipeline_definition_path and parameter_values.
"""
# fmt: on
parameter_values = {
'project': project,
'location': location,
'root_dir': root_dir,
'time_column': time_column,
'time_series_identifier_column': time_series_identifier_column,
'target_column': target_column,
'forecast_horizon': forecast_horizon,
'predefined_split_key': predefined_split_key,
'timestamp_split_key': timestamp_split_key,
'training_fraction': training_fraction,
'validation_fraction': validation_fraction,
'test_fraction': test_fraction,
'data_source_csv_filenames': data_source_csv_filenames,
'data_source_bigquery_table_path': data_source_bigquery_table_path,
'window_column': window_column,
'window_stride_length': window_stride_length,
'window_max_count': window_max_count,
'max_num_trials': max_num_trials,
'optimization_objective': optimization_objective,
'data_granularity_unit': data_granularity_unit,
'trainer_dataflow_machine_type': trainer_dataflow_machine_type,
'trainer_dataflow_max_num_workers': trainer_dataflow_max_num_workers,
'trainer_dataflow_disk_size_gb': trainer_dataflow_disk_size_gb,
'evaluation_dataflow_machine_type': evaluation_dataflow_machine_type,
'evaluation_dataflow_max_num_workers': (
evaluation_dataflow_max_num_workers
),
'evaluation_dataflow_disk_size_gb': evaluation_dataflow_disk_size_gb,
'dataflow_service_account': dataflow_service_account,
'dataflow_subnetwork': dataflow_subnetwork,
'dataflow_use_public_ips': dataflow_use_public_ips,
'run_evaluation': run_evaluation,
}
pipeline_definition_path = os.path.join(
_GCPC_FORECASTING_PATH, 'prophet_trainer_pipeline.yaml'
)
return pipeline_definition_path, parameter_values
def get_prophet_prediction_pipeline_and_parameters(
project: str,
location: str,
model_name: str,
time_column: str,
time_series_identifier_column: str,
target_column: str,
data_source_csv_filenames: str = '',
data_source_bigquery_table_path: str = '',
bigquery_destination_uri: str = '',
machine_type: str = 'n1-standard-2',
max_num_workers: int = 10,
) -> Tuple[str, Dict[str, Any]]:
# fmt: off
"""Returns Prophet prediction pipeline and formatted parameters.
Unlike the prediction server for Vertex Forecasting, the Prophet prediction
server returns predictions batched by time series id. This pipeline shows how
these predictions can be disaggregated to get results similar to what Vertex
Forecasting provides.
Args:
project: The GCP project that runs the pipeline components.
location: The GCP region for Vertex AI.
model_name: The name of the Model resource, in a form of `projects/{project}/locations/{location}/models/{model}`.
time_column: Name of the column that identifies time order in the time series.
time_series_identifier_column: Name of the column that identifies the time series.
target_column: Name of the column that the model is to predict values for.
data_source_csv_filenames: A string that represents a list of comma separated CSV filenames.
data_source_bigquery_table_path: The BigQuery table path of format: `bq://bq_project.bq_dataset.bq_table`.
bigquery_destination_uri: URI of the desired destination dataset. If not specified, resources will be created under a new dataset in the project.
machine_type: The machine type used for batch prediction.
max_num_workers: The max number of workers used for batch prediction.
Returns:
Tuple of pipeline_definition_path and parameter_values.
"""
# fmt: on
parameter_values = {
'project': project,
'location': location,
'model_name': model_name,
'time_column': time_column,
'time_series_identifier_column': time_series_identifier_column,
'target_column': target_column,
'data_source_csv_filenames': data_source_csv_filenames,
'data_source_bigquery_table_path': data_source_bigquery_table_path,
'bigquery_destination_uri': bigquery_destination_uri,
'machine_type': machine_type,
'max_num_workers': max_num_workers,
}
pipeline_definition_path = os.path.join(
_GCPC_FORECASTING_PATH, 'prophet_predict_pipeline.yaml'
)
return pipeline_definition_path, parameter_values
| 838 |
0 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/automl | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/automl/forecasting/bqml_arima_predict_pipeline.yaml | # PIPELINE DEFINITION
# Name: automl-tabular-bqml-arima-prediction
# Description: Forecasts using a BQML ARIMA_PLUS model.
# Inputs:
# bigquery_destination_uri: str [Default: '']
# data_source_bigquery_table_path: str [Default: '']
# data_source_csv_filenames: str [Default: '']
# encryption_spec_key_name: str [Default: '']
# generate_explanation: bool [Default: False]
# location: str
# model_name: str
# project: str
components:
comp-bigquery-create-dataset:
executorLabel: exec-bigquery-create-dataset
inputDefinitions:
parameters:
dataset:
parameterType: STRING
exists_ok:
defaultValue: false
isOptional: true
parameterType: BOOLEAN
location:
parameterType: STRING
project:
parameterType: STRING
outputDefinitions:
parameters:
dataset_id:
parameterType: STRING
project_id:
parameterType: STRING
comp-bigquery-create-dataset-2:
executorLabel: exec-bigquery-create-dataset-2
inputDefinitions:
parameters:
dataset:
parameterType: STRING
exists_ok:
defaultValue: false
isOptional: true
parameterType: BOOLEAN
location:
parameterType: STRING
project:
parameterType: STRING
outputDefinitions:
parameters:
dataset_id:
parameterType: STRING
project_id:
parameterType: STRING
comp-bigquery-delete-dataset-with-prefix:
executorLabel: exec-bigquery-delete-dataset-with-prefix
inputDefinitions:
parameters:
dataset_prefix:
parameterType: STRING
delete_contents:
defaultValue: false
isOptional: true
parameterType: BOOLEAN
project:
parameterType: STRING
comp-bigquery-query-job:
executorLabel: exec-bigquery-query-job
inputDefinitions:
parameters:
encryption_spec_key_name:
defaultValue: ''
description: 'Describes the Cloud
KMS encryption key that will be used to protect destination
BigQuery table. The BigQuery Service Account associated with your
project requires access to this encryption key. If
encryption_spec_key_name are both specified in here and in
job_configuration_query, the value in here will override the other
one.'
isOptional: true
parameterType: STRING
job_configuration_query:
defaultValue: {}
description: 'A json formatted string
describing the rest of the job configuration. For more details, see
https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationQuery'
isOptional: true
parameterType: STRUCT
labels:
defaultValue: {}
description: 'The labels associated with this job. You can
use these to organize and group your jobs. Label keys and values can
be no longer than 63 characters, can only containlowercase letters,
numeric characters, underscores and dashes. International characters
are allowed. Label values are optional. Label keys must start with a
letter and each label in the list must have a different key.
Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.'
isOptional: true
parameterType: STRUCT
location:
defaultValue: us-central1
description: 'Location for creating the BigQuery job. If not
set, default to `US` multi-region. For more details, see
https://cloud.google.com/bigquery/docs/locations#specifying_your_location'
isOptional: true
parameterType: STRING
project:
defaultValue: '{{$.pipeline_google_cloud_project_id}}'
description: Project to run the BigQuery query job. Defaults to the project
in which the PipelineJob is run.
isOptional: true
parameterType: STRING
query:
defaultValue: ''
description: 'SQL query text to execute. Only standard SQL is
supported. If query are both specified in here and in
job_configuration_query, the value in here will override the other
one.'
isOptional: true
parameterType: STRING
query_parameters:
defaultValue: []
description: 'jobs.query parameters for
standard SQL queries. If query_parameters are both specified in here
and in job_configuration_query, the value in here will override the
other one.'
isOptional: true
parameterType: LIST
outputDefinitions:
artifacts:
destination_table:
artifactType:
schemaTitle: google.BQTable
schemaVersion: 0.0.1
description: 'Describes the table where the query results should be stored.
This property must be set for large results that exceed the maximum
response size.
For queries that produce anonymous (cached) results, this field will
be populated by BigQuery.'
parameters:
gcp_resources:
description: 'Serialized gcp_resources proto tracking the BigQuery job.
For more details, see
https://github.com/kubeflow/pipelines/blob/master/components/google-cloud/google_cloud_pipeline_components/proto/README.md.'
parameterType: STRING
comp-build-job-configuration-query:
executorLabel: exec-build-job-configuration-query
inputDefinitions:
parameters:
dataset_id:
defaultValue: ''
isOptional: true
parameterType: STRING
priority:
defaultValue: INTERACTIVE
isOptional: true
parameterType: STRING
project_id:
defaultValue: ''
isOptional: true
parameterType: STRING
table_id:
defaultValue: ''
isOptional: true
parameterType: STRING
write_disposition:
defaultValue: ''
isOptional: true
parameterType: STRING
outputDefinitions:
parameters:
Output:
parameterType: STRUCT
comp-exit-handler-1:
dag:
tasks:
bigquery-create-dataset:
cachingOptions: {}
componentRef:
name: comp-bigquery-create-dataset
dependentTasks:
- get-table-location
- validate-inputs
inputs:
parameters:
dataset:
runtimeValue:
constant: tmp_{{$.pipeline_job_uuid}}
location:
taskOutputParameter:
outputParameterKey: Output
producerTask: get-table-location
project:
componentInputParameter: pipelinechannel--project
taskInfo:
name: create-tmp-dataset
bigquery-create-dataset-2:
cachingOptions:
enableCache: true
componentRef:
name: comp-bigquery-create-dataset-2
dependentTasks:
- get-table-location
- maybe-replace-with-default
- validate-inputs
inputs:
parameters:
dataset:
taskOutputParameter:
outputParameterKey: Output
producerTask: maybe-replace-with-default
exists_ok:
runtimeValue:
constant: true
location:
taskOutputParameter:
outputParameterKey: Output
producerTask: get-table-location
project:
componentInputParameter: pipelinechannel--project
taskInfo:
name: create-prediction-dataset
bigquery-query-job:
cachingOptions:
enableCache: true
componentRef:
name: comp-bigquery-query-job
dependentTasks:
- build-job-configuration-query
- get-first-valid
- get-model-metadata
- get-table-location
inputs:
parameters:
encryption_spec_key_name:
componentInputParameter: pipelinechannel--encryption_spec_key_name
job_configuration_query:
taskOutputParameter:
outputParameterKey: Output
producerTask: build-job-configuration-query
location:
taskOutputParameter:
outputParameterKey: Output
producerTask: get-table-location
pipelinechannel--get-first-valid-Output:
taskOutputParameter:
outputParameterKey: Output
producerTask: get-first-valid
pipelinechannel--get-model-metadata-forecast_horizon:
taskOutputParameter:
outputParameterKey: forecast_horizon
producerTask: get-model-metadata
pipelinechannel--get-model-metadata-target_column:
taskOutputParameter:
outputParameterKey: target_column
producerTask: get-model-metadata
pipelinechannel--get-model-metadata-time_column:
taskOutputParameter:
outputParameterKey: time_column
producerTask: get-model-metadata
pipelinechannel--get-model-metadata-time_series_identifier_column:
taskOutputParameter:
outputParameterKey: time_series_identifier_column
producerTask: get-model-metadata
pipelinechannel--model_name:
componentInputParameter: pipelinechannel--model_name
project:
componentInputParameter: pipelinechannel--project
query:
runtimeValue:
constant: "\n SELECT\n target.*,\n STRUCT(prediction.time_series_adjusted_data\
\ AS value)\n AS predicted_{{$.inputs.parameters['pipelinechannel--get-model-metadata-target_column']}},\n\
\ prediction.* EXCEPT (\n {{$.inputs.parameters['pipelinechannel--get-model-metadata-time_series_identifier_column']}},\n\
\ time_series_timestamp,\n time_series_adjusted_data\n\
\ ),\n FROM\n ML.EXPLAIN_FORECAST(\n \
\ MODEL `{{$.inputs.parameters['pipelinechannel--model_name']}}`,\n\
\ STRUCT({{$.inputs.parameters['pipelinechannel--get-model-metadata-forecast_horizon']}}\
\ AS horizon)) AS prediction\n RIGHT JOIN `{{$.inputs.parameters['pipelinechannel--get-first-valid-Output']}}`\
\ AS target\n ON\n CAST(target.{{$.inputs.parameters['pipelinechannel--get-model-metadata-time_series_identifier_column']}}\
\ AS STRING)\n = CAST(prediction.{{$.inputs.parameters['pipelinechannel--get-model-metadata-time_series_identifier_column']}}\
\ AS STRING)\n AND TIMESTAMP(target.{{$.inputs.parameters['pipelinechannel--get-model-metadata-time_column']}})\
\ = prediction.time_series_timestamp\n WHERE target.{{$.inputs.parameters['pipelinechannel--get-model-metadata-target_column']}}\
\ IS NULL\n "
taskInfo:
name: predictions-table
build-job-configuration-query:
cachingOptions:
enableCache: true
componentRef:
name: comp-build-job-configuration-query
dependentTasks:
- bigquery-create-dataset-2
inputs:
parameters:
dataset_id:
runtimeValue:
constant: '{{$.inputs.parameters[''pipelinechannel--bigquery-create-dataset-2-dataset_id'']}}'
pipelinechannel--bigquery-create-dataset-2-dataset_id:
taskOutputParameter:
outputParameterKey: dataset_id
producerTask: bigquery-create-dataset-2
pipelinechannel--bigquery-create-dataset-2-project_id:
taskOutputParameter:
outputParameterKey: project_id
producerTask: bigquery-create-dataset-2
project_id:
runtimeValue:
constant: '{{$.inputs.parameters[''pipelinechannel--bigquery-create-dataset-2-project_id'']}}'
table_id:
runtimeValue:
constant: predictions_{{$.pipeline_job_uuid}}
taskInfo:
name: build-job-configuration-query
get-first-valid:
cachingOptions:
enableCache: true
componentRef:
name: comp-get-first-valid
dependentTasks:
- load-table-from-uri
inputs:
parameters:
pipelinechannel--data_source_bigquery_table_path:
componentInputParameter: pipelinechannel--data_source_bigquery_table_path
pipelinechannel--load-table-from-uri-Output:
taskOutputParameter:
outputParameterKey: Output
producerTask: load-table-from-uri
values:
runtimeValue:
constant: '["{{$.inputs.parameters[''pipelinechannel--data_source_bigquery_table_path'']}}",
"{{$.inputs.parameters[''pipelinechannel--load-table-from-uri-Output'']}}"]'
taskInfo:
name: get-first-valid
get-model-metadata:
cachingOptions:
enableCache: true
componentRef:
name: comp-get-model-metadata
dependentTasks:
- get-table-location
- validate-inputs
inputs:
parameters:
location:
taskOutputParameter:
outputParameterKey: Output
producerTask: get-table-location
model:
componentInputParameter: pipelinechannel--model_name
project:
componentInputParameter: pipelinechannel--project
taskInfo:
name: get-model-metadata
get-table-location:
cachingOptions:
enableCache: true
componentRef:
name: comp-get-table-location
inputs:
parameters:
default_location:
componentInputParameter: pipelinechannel--location
project:
componentInputParameter: pipelinechannel--project
table:
componentInputParameter: pipelinechannel--data_source_bigquery_table_path
taskInfo:
name: get-table-location
load-table-from-uri:
cachingOptions:
enableCache: true
componentRef:
name: comp-load-table-from-uri
dependentTasks:
- bigquery-create-dataset
- get-table-location
inputs:
parameters:
destination:
runtimeValue:
constant: '{{$.inputs.parameters[''pipelinechannel--bigquery-create-dataset-project_id'']}}.{{$.inputs.parameters[''pipelinechannel--bigquery-create-dataset-dataset_id'']}}.csv_export'
location:
taskOutputParameter:
outputParameterKey: Output
producerTask: get-table-location
pipelinechannel--bigquery-create-dataset-dataset_id:
taskOutputParameter:
outputParameterKey: dataset_id
producerTask: bigquery-create-dataset
pipelinechannel--bigquery-create-dataset-project_id:
taskOutputParameter:
outputParameterKey: project_id
producerTask: bigquery-create-dataset
project:
componentInputParameter: pipelinechannel--project
source_format:
runtimeValue:
constant: CSV
source_uris:
componentInputParameter: pipelinechannel--data_source_csv_filenames
taskInfo:
name: load-table-from-uri
maybe-replace-with-default:
cachingOptions:
enableCache: true
componentRef:
name: comp-maybe-replace-with-default
inputs:
parameters:
default:
runtimeValue:
constant: prediction_{{$.pipeline_job_uuid}}
value:
componentInputParameter: pipelinechannel--bigquery_destination_uri
taskInfo:
name: maybe-replace-with-default
validate-inputs:
cachingOptions:
enableCache: true
componentRef:
name: comp-validate-inputs
inputs:
parameters:
bigquery_destination_uri:
componentInputParameter: pipelinechannel--bigquery_destination_uri
data_source_bigquery_table_path:
componentInputParameter: pipelinechannel--data_source_bigquery_table_path
data_source_csv_filenames:
componentInputParameter: pipelinechannel--data_source_csv_filenames
source_model_uri:
componentInputParameter: pipelinechannel--model_name
taskInfo:
name: validate-inputs
inputDefinitions:
parameters:
pipelinechannel--bigquery_destination_uri:
parameterType: STRING
pipelinechannel--data_source_bigquery_table_path:
parameterType: STRING
pipelinechannel--data_source_csv_filenames:
parameterType: STRING
pipelinechannel--encryption_spec_key_name:
parameterType: STRING
pipelinechannel--location:
parameterType: STRING
pipelinechannel--model_name:
parameterType: STRING
pipelinechannel--project:
parameterType: STRING
comp-get-first-valid:
executorLabel: exec-get-first-valid
inputDefinitions:
parameters:
values:
parameterType: STRING
outputDefinitions:
parameters:
Output:
parameterType: STRING
comp-get-model-metadata:
executorLabel: exec-get-model-metadata
inputDefinitions:
parameters:
location:
parameterType: STRING
model:
parameterType: STRING
project:
parameterType: STRING
outputDefinitions:
parameters:
forecast_horizon:
parameterType: NUMBER_INTEGER
target_column:
parameterType: STRING
time_column:
parameterType: STRING
time_series_identifier_column:
parameterType: STRING
comp-get-table-location:
executorLabel: exec-get-table-location
inputDefinitions:
parameters:
default_location:
defaultValue: ''
description: Location to return if no table was given.
isOptional: true
parameterType: STRING
project:
description: The GCP project.
parameterType: STRING
table:
description: The BigQuery table to get a location for.
parameterType: STRING
outputDefinitions:
parameters:
Output:
parameterType: STRING
comp-load-table-from-uri:
executorLabel: exec-load-table-from-uri
inputDefinitions:
parameters:
destination:
description: Table into which data is to be loaded.
parameterType: STRING
location:
description: The GCP region.
parameterType: STRING
project:
description: The GCP project.
parameterType: STRING
source_format:
defaultValue: CSV
description: 'The file format for the files being imported. Only CSV is
supported.'
isOptional: true
parameterType: STRING
source_uris:
description: 'URIs of data files to be loaded; in format
gs://<bucket_name>/<object_name_or_glob>.'
parameterType: STRING
outputDefinitions:
parameters:
Output:
parameterType: STRING
comp-maybe-replace-with-default:
executorLabel: exec-maybe-replace-with-default
inputDefinitions:
parameters:
default:
defaultValue: ''
isOptional: true
parameterType: STRING
value:
parameterType: STRING
outputDefinitions:
parameters:
Output:
parameterType: STRING
comp-validate-inputs:
executorLabel: exec-validate-inputs
inputDefinitions:
parameters:
bigquery_destination_uri:
isOptional: true
parameterType: STRING
data_granularity_unit:
isOptional: true
parameterType: STRING
data_source_bigquery_table_path:
isOptional: true
parameterType: STRING
data_source_csv_filenames:
isOptional: true
parameterType: STRING
optimization_objective:
isOptional: true
parameterType: STRING
predefined_split_key:
isOptional: true
parameterType: STRING
source_model_uri:
isOptional: true
parameterType: STRING
target_column:
isOptional: true
parameterType: STRING
test_fraction:
isOptional: true
parameterType: NUMBER_DOUBLE
time_column:
isOptional: true
parameterType: STRING
time_series_identifier_column:
isOptional: true
parameterType: STRING
timestamp_split_key:
isOptional: true
parameterType: STRING
training_fraction:
isOptional: true
parameterType: NUMBER_DOUBLE
validation_fraction:
isOptional: true
parameterType: NUMBER_DOUBLE
window_column:
isOptional: true
parameterType: STRING
window_max_count:
isOptional: true
parameterType: NUMBER_INTEGER
window_stride_length:
isOptional: true
parameterType: NUMBER_INTEGER
deploymentSpec:
executors:
exec-bigquery-create-dataset:
container:
args:
- --executor_input
- '{{$}}'
- --function_to_execute
- bigquery_create_dataset
command:
- sh
- -c
- "\nif ! [ -x \"$(command -v pip)\" ]; then\n python3 -m ensurepip ||\
\ python3 -m ensurepip --user || apt-get install python3-pip\nfi\n\nPIP_DISABLE_PIP_VERSION_CHECK=1\
\ python3 -m pip install --quiet --no-warn-script-location 'kfp==2.0.0-rc.2'\
\ && \"$0\" \"$@\"\n"
- sh
- -ec
- 'program_path=$(mktemp -d)
printf "%s" "$0" > "$program_path/ephemeral_component.py"
python3 -m kfp.components.executor_main --component_module_path "$program_path/ephemeral_component.py" "$@"
'
- "\nimport kfp\nfrom kfp import dsl\nfrom kfp.dsl import *\nfrom typing import\
\ *\n\ndef bigquery_create_dataset(\n project: str,\n location: str,\n\
\ dataset: str,\n exists_ok: bool = False,\n) -> NamedTuple('Outputs',\
\ [('project_id', str), ('dataset_id', str)]):\n \"\"\"Creates a BigQuery\
\ dataset.\"\"\"\n # pylint: disable=g-import-not-at-top,import-outside-toplevel,redefined-outer-name,reimported\n\
\ import collections\n\n from google.cloud import bigquery\n # pylint:\
\ enable=g-import-not-at-top,import-outside-toplevel,redefined-outer-name,reimported\n\
\n client = bigquery.Client(project=project, location=location)\n ref\
\ = client.create_dataset(dataset=dataset, exists_ok=exists_ok)\n return\
\ collections.namedtuple('Outputs', ['project_id', 'dataset_id'])(\n \
\ ref.project, ref.dataset_id)\n\n"
image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20240808_0625
exec-bigquery-create-dataset-2:
container:
args:
- --executor_input
- '{{$}}'
- --function_to_execute
- bigquery_create_dataset
command:
- sh
- -c
- "\nif ! [ -x \"$(command -v pip)\" ]; then\n python3 -m ensurepip ||\
\ python3 -m ensurepip --user || apt-get install python3-pip\nfi\n\nPIP_DISABLE_PIP_VERSION_CHECK=1\
\ python3 -m pip install --quiet --no-warn-script-location 'kfp==2.0.0-rc.2'\
\ && \"$0\" \"$@\"\n"
- sh
- -ec
- 'program_path=$(mktemp -d)
printf "%s" "$0" > "$program_path/ephemeral_component.py"
python3 -m kfp.components.executor_main --component_module_path "$program_path/ephemeral_component.py" "$@"
'
- "\nimport kfp\nfrom kfp import dsl\nfrom kfp.dsl import *\nfrom typing import\
\ *\n\ndef bigquery_create_dataset(\n project: str,\n location: str,\n\
\ dataset: str,\n exists_ok: bool = False,\n) -> NamedTuple('Outputs',\
\ [('project_id', str), ('dataset_id', str)]):\n \"\"\"Creates a BigQuery\
\ dataset.\"\"\"\n # pylint: disable=g-import-not-at-top,import-outside-toplevel,redefined-outer-name,reimported\n\
\ import collections\n\n from google.cloud import bigquery\n # pylint:\
\ enable=g-import-not-at-top,import-outside-toplevel,redefined-outer-name,reimported\n\
\n client = bigquery.Client(project=project, location=location)\n ref\
\ = client.create_dataset(dataset=dataset, exists_ok=exists_ok)\n return\
\ collections.namedtuple('Outputs', ['project_id', 'dataset_id'])(\n \
\ ref.project, ref.dataset_id)\n\n"
image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20240808_0625
exec-bigquery-delete-dataset-with-prefix:
container:
args:
- --executor_input
- '{{$}}'
- --function_to_execute
- bigquery_delete_dataset_with_prefix
command:
- sh
- -c
- "\nif ! [ -x \"$(command -v pip)\" ]; then\n python3 -m ensurepip ||\
\ python3 -m ensurepip --user || apt-get install python3-pip\nfi\n\nPIP_DISABLE_PIP_VERSION_CHECK=1\
\ python3 -m pip install --quiet --no-warn-script-location 'kfp==2.0.0-rc.2'\
\ && \"$0\" \"$@\"\n"
- sh
- -ec
- 'program_path=$(mktemp -d)
printf "%s" "$0" > "$program_path/ephemeral_component.py"
python3 -m kfp.components.executor_main --component_module_path "$program_path/ephemeral_component.py" "$@"
'
- "\nimport kfp\nfrom kfp import dsl\nfrom kfp.dsl import *\nfrom typing import\
\ *\n\ndef bigquery_delete_dataset_with_prefix(\n project: str,\n \
\ dataset_prefix: str,\n delete_contents: bool = False,\n) -> None:\n\
\ \"\"\"Deletes all BigQuery datasets matching the given prefix.\"\"\"\n\
\ # pylint: disable=g-import-not-at-top,import-outside-toplevel,redefined-outer-name,reimported\n\
\ from google.cloud import bigquery\n # pylint: enable=g-import-not-at-top,import-outside-toplevel,redefined-outer-name,reimported\n\
\n client = bigquery.Client(project=project)\n for dataset in client.list_datasets(project=project):\n\
\ if dataset.dataset_id.startswith(dataset_prefix):\n client.delete_dataset(\n\
\ dataset=dataset.dataset_id,\n delete_contents=delete_contents)\n\
\n"
image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20240808_0625
exec-bigquery-query-job:
container:
args:
- --type
- BigqueryQueryJob
- --project
- '{{$.inputs.parameters[''project'']}}'
- --location
- '{{$.inputs.parameters[''location'']}}'
- --payload
- '{"Concat": ["{", "\"configuration\": {", "\"query\": ", "{{$.inputs.parameters[''job_configuration_query'']}}",
", \"labels\": ", "{{$.inputs.parameters[''labels'']}}", "}", "}"]}'
- --job_configuration_query_override
- '{"Concat": ["{", "\"query\": \"", "{{$.inputs.parameters[''query'']}}",
"\"", ", \"query_parameters\": ", "{{$.inputs.parameters[''query_parameters'']}}",
", \"destination_encryption_configuration\": {", "\"kmsKeyName\": \"", "{{$.inputs.parameters[''encryption_spec_key_name'']}}",
"\"}", "}"]}'
- --gcp_resources
- '{{$.outputs.parameters[''gcp_resources''].output_file}}'
- --executor_input
- '{{$}}'
command:
- python3
- -u
- -m
- google_cloud_pipeline_components.container.v1.bigquery.query_job.launcher
image: gcr.io/ml-pipeline/google-cloud-pipeline-components:2.3.1
exec-build-job-configuration-query:
container:
args:
- --executor_input
- '{{$}}'
- --function_to_execute
- build_job_configuration_query
command:
- sh
- -c
- "\nif ! [ -x \"$(command -v pip)\" ]; then\n python3 -m ensurepip ||\
\ python3 -m ensurepip --user || apt-get install python3-pip\nfi\n\nPIP_DISABLE_PIP_VERSION_CHECK=1\
\ python3 -m pip install --quiet --no-warn-script-location 'kfp==2.0.0-rc.2'\
\ && \"$0\" \"$@\"\n"
- sh
- -ec
- 'program_path=$(mktemp -d)
printf "%s" "$0" > "$program_path/ephemeral_component.py"
python3 -m kfp.components.executor_main --component_module_path "$program_path/ephemeral_component.py" "$@"
'
- "\nimport kfp\nfrom kfp import dsl\nfrom kfp.dsl import *\nfrom typing import\
\ *\n\ndef build_job_configuration_query(\n project_id: str = '',\n \
\ dataset_id: str = '',\n table_id: str = '',\n write_disposition:\
\ str = '',\n priority: str = 'INTERACTIVE',\n) -> dict: # pylint: disable=g-bare-generic\n\
\ \"\"\"Creates a JobConfigurationQuery object.\"\"\"\n config = {\n \
\ 'priority': priority,\n }\n if all([project_id, dataset_id, table_id]):\n\
\ config['destinationTable'] = {\n 'projectId': project_id,\n\
\ 'datasetId': dataset_id,\n 'tableId': table_id,\n }\n\
\ if write_disposition:\n config['write_disposition'] = write_disposition\n\
\ return config\n\n"
image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20240808_0625
exec-get-first-valid:
container:
args:
- --executor_input
- '{{$}}'
- --function_to_execute
- get_first_valid
command:
- sh
- -ec
- 'program_path=$(mktemp -d)
printf "%s" "$0" > "$program_path/ephemeral_component.py"
python3 -m kfp.components.executor_main --component_module_path "$program_path/ephemeral_component.py" "$@"
'
- "\nimport kfp\nfrom kfp import dsl\nfrom kfp.dsl import *\nfrom typing import\
\ *\n\ndef get_first_valid(values: str) -> str:\n \"\"\"Returns the first\
\ truthy value from the given serialized JSON list.\"\"\"\n # pylint: disable=g-import-not-at-top,import-outside-toplevel,redefined-outer-name,reimported\n\
\ import json\n # pylint: enable=g-import-not-at-top,import-outside-toplevel,redefined-outer-name,reimported\n\
\n for value in json.loads(values):\n if value:\n return value\n\
\ raise ValueError('No valid values.')\n\n"
image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20240808_0625
exec-get-model-metadata:
container:
args:
- --executor_input
- '{{$}}'
- --function_to_execute
- get_model_metadata
command:
- sh
- -c
- "\nif ! [ -x \"$(command -v pip)\" ]; then\n python3 -m ensurepip ||\
\ python3 -m ensurepip --user || apt-get install python3-pip\nfi\n\nPIP_DISABLE_PIP_VERSION_CHECK=1\
\ python3 -m pip install --quiet --no-warn-script-location 'kfp==2.0.0-rc.2'\
\ && \"$0\" \"$@\"\n"
- sh
- -ec
- 'program_path=$(mktemp -d)
printf "%s" "$0" > "$program_path/ephemeral_component.py"
python3 -m kfp.components.executor_main --component_module_path "$program_path/ephemeral_component.py" "$@"
'
- "\nimport kfp\nfrom kfp import dsl\nfrom kfp.dsl import *\nfrom typing import\
\ *\n\ndef get_model_metadata(\n project: str,\n location: str,\n\
\ model: str,\n) -> NamedTuple(\n 'Outputs',\n [\n ('time_column',\
\ str),\n ('time_series_identifier_column', str),\n ('target_column',\
\ str),\n ('forecast_horizon', int),\n ],\n):\n \"\"\"Retrieves\
\ training options for a BQML model.\"\"\"\n # pylint: disable=g-import-not-at-top,import-outside-toplevel,redefined-outer-name,reimported\n\
\ import collections\n\n from google.cloud import bigquery\n # pylint:\
\ enable=g-import-not-at-top,import-outside-toplevel,redefined-outer-name,reimported\n\
\n client = bigquery.Client(project=project, location=location)\n options\
\ = client.get_model(model).training_runs[0].training_options\n return\
\ collections.namedtuple(\n 'Outputs', [\n 'time_column',\n\
\ 'time_series_identifier_column',\n 'target_column',\n\
\ 'forecast_horizon',\n ],\n )(\n options.time_series_timestamp_column,\n\
\ options.time_series_id_column,\n options.time_series_data_column,\n\
\ options.horizon,\n )\n\n"
image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20240808_0625
exec-get-table-location:
container:
args:
- --executor_input
- '{{$}}'
- --function_to_execute
- get_table_location
command:
- sh
- -c
- "\nif ! [ -x \"$(command -v pip)\" ]; then\n python3 -m ensurepip ||\
\ python3 -m ensurepip --user || apt-get install python3-pip\nfi\n\nPIP_DISABLE_PIP_VERSION_CHECK=1\
\ python3 -m pip install --quiet --no-warn-script-location 'kfp==2.0.0-rc.2'\
\ && \"$0\" \"$@\"\n"
- sh
- -ec
- 'program_path=$(mktemp -d)
printf "%s" "$0" > "$program_path/ephemeral_component.py"
python3 -m kfp.components.executor_main --component_module_path "$program_path/ephemeral_component.py" "$@"
'
- "\nimport kfp\nfrom kfp import dsl\nfrom kfp.dsl import *\nfrom typing import\
\ *\n\ndef get_table_location(\n project: str,\n table: Optional[str],\n\
\ default_location: str = '',\n) -> str:\n \"\"\"Returns the region\
\ the given table belongs to.\n\n Args:\n project: The GCP project.\n\
\ table: The BigQuery table to get a location for.\n default_location:\
\ Location to return if no table was given.\n\n Returns:\n A GCP region\
\ or multi-region.\n \"\"\"\n # pylint: disable=g-import-not-at-top,import-outside-toplevel,redefined-outer-name,reimported\n\
\ from google.cloud import bigquery\n # pylint: enable=g-import-not-at-top,import-outside-toplevel,redefined-outer-name,reimported\n\
\n if not table:\n return default_location\n\n client = bigquery.Client(project=project)\n\
\ if table.startswith('bq://'):\n table = table[len('bq://'):]\n elif\
\ table.startswith('bigquery://'):\n table = table[len('bigquery://'):]\n\
\ return client.get_table(table).location\n\n"
image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20240808_0625
exec-load-table-from-uri:
container:
args:
- --executor_input
- '{{$}}'
- --function_to_execute
- load_table_from_uri
command:
- sh
- -c
- "\nif ! [ -x \"$(command -v pip)\" ]; then\n python3 -m ensurepip ||\
\ python3 -m ensurepip --user || apt-get install python3-pip\nfi\n\nPIP_DISABLE_PIP_VERSION_CHECK=1\
\ python3 -m pip install --quiet --no-warn-script-location 'kfp==2.0.0-rc.2'\
\ && \"$0\" \"$@\"\n"
- sh
- -ec
- 'program_path=$(mktemp -d)
printf "%s" "$0" > "$program_path/ephemeral_component.py"
python3 -m kfp.components.executor_main --component_module_path "$program_path/ephemeral_component.py" "$@"
'
- "\nimport kfp\nfrom kfp import dsl\nfrom kfp.dsl import *\nfrom typing import\
\ *\n\ndef load_table_from_uri(\n project: str,\n location: str,\n\
\ source_uris: str,\n destination: str,\n source_format: str =\
\ 'CSV',\n) -> str:\n \"\"\"Creates a table from a list of URIs.\n\n Args:\n\
\ project: The GCP project.\n location: The GCP region.\n source_uris:\
\ URIs of data files to be loaded; in format\n gs://<bucket_name>/<object_name_or_glob>.\n\
\ destination: Table into which data is to be loaded.\n source_format:\
\ The file format for the files being imported. Only CSV is\n supported.\n\
\n Returns:\n The destination table containing imported data.\n \"\"\
\"\n # pylint: disable=g-import-not-at-top,import-outside-toplevel,redefined-outer-name,reimported\n\
\ from google.cloud import bigquery\n # pylint: enable=g-import-not-at-top,import-outside-toplevel,redefined-outer-name,reimported\n\
\n if not source_uris:\n return ''\n\n csv_list = [filename.strip()\
\ for filename in source_uris.split(',')]\n client = bigquery.Client(project=project,\
\ location=location)\n job_config = bigquery.LoadJobConfig(\n autodetect=True,\
\ source_format=source_format)\n client.load_table_from_uri(\n source_uris=csv_list,\n\
\ destination=destination,\n project=project,\n location=location,\n\
\ job_config=job_config).result()\n return destination\n\n"
image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20240808_0625
exec-maybe-replace-with-default:
container:
args:
- --executor_input
- '{{$}}'
- --function_to_execute
- maybe_replace_with_default
command:
- sh
- -ec
- 'program_path=$(mktemp -d)
printf "%s" "$0" > "$program_path/ephemeral_component.py"
python3 -m kfp.components.executor_main --component_module_path "$program_path/ephemeral_component.py" "$@"
'
- "\nimport kfp\nfrom kfp import dsl\nfrom kfp.dsl import *\nfrom typing import\
\ *\n\ndef maybe_replace_with_default(value: str, default: str = '') ->\
\ str:\n \"\"\"Replaces string with another value if it is a dash.\"\"\"\
\n return default if not value else value\n\n"
image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20240808_0625
exec-validate-inputs:
container:
args:
- --executor_input
- '{{$}}'
- --function_to_execute
- validate_inputs
command:
- sh
- -ec
- 'program_path=$(mktemp -d)
printf "%s" "$0" > "$program_path/ephemeral_component.py"
python3 -m kfp.components.executor_main --component_module_path "$program_path/ephemeral_component.py" "$@"
'
- "\nimport kfp\nfrom kfp import dsl\nfrom kfp.dsl import *\nfrom typing import\
\ *\n\ndef validate_inputs(\n time_column: Optional[str] = None,\n \
\ time_series_identifier_column: Optional[str] = None,\n target_column:\
\ Optional[str] = None,\n data_source_bigquery_table_path: Optional[str]\
\ = None,\n training_fraction: Optional[float] = None,\n validation_fraction:\
\ Optional[float] = None,\n test_fraction: Optional[float] = None,\n\
\ predefined_split_key: Optional[str] = None,\n timestamp_split_key:\
\ Optional[str] = None,\n data_source_csv_filenames: Optional[str] =\
\ None,\n source_model_uri: Optional[str] = None,\n bigquery_destination_uri:\
\ Optional[str] = None,\n window_column: Optional[str] = None,\n window_stride_length:\
\ Optional[int] = None,\n window_max_count: Optional[int] = None,\n \
\ optimization_objective: Optional[str] = None,\n data_granularity_unit:\
\ Optional[str] = None,\n) -> None:\n \"\"\"Checks training pipeline input\
\ parameters are valid.\"\"\"\n # pylint: disable=g-import-not-at-top,import-outside-toplevel\n\
\ import re\n # pylint: enable=g-import-not-at-top,import-outside-toplevel\n\
\n project_pattern = r'([a-z0-9.-]+:)?[a-z][a-z0-9-_]{4,28}[a-z0-9]'\n\
\ dataset_pattern = r'[a-zA-Z0-9_]+'\n table_pattern = r'[^\\.\\:`]+'\n\
\ dataset_uri_pattern = re.compile(\n f'(bq://)?{project_pattern}[.:]{dataset_pattern}')\n\
\ table_uri_pattern = re.compile(\n f'(bq://)?{project_pattern}[.:]{dataset_pattern}[.:]{table_pattern}')\n\
\n # Validate BigQuery column and dataset names.\n bigquery_column_parameters\
\ = [\n time_column,\n time_series_identifier_column,\n target_column,\n\
\ ]\n column_pattern = re.compile(r'[a-zA-Z_][a-zA-Z0-9_]{1,300}')\n \
\ for column in bigquery_column_parameters:\n if column and not column_pattern.fullmatch(column):\n\
\ raise ValueError(f'Invalid column name: {column}.')\n if (bigquery_destination_uri\
\ and\n not dataset_uri_pattern.fullmatch(bigquery_destination_uri)):\n\
\ raise ValueError(\n f'Invalid BigQuery dataset URI: {bigquery_destination_uri}.')\n\
\ if (source_model_uri and not table_uri_pattern.fullmatch(source_model_uri)):\n\
\ raise ValueError(f'Invalid BigQuery table URI: {source_model_uri}.')\n\
\n # Validate data source.\n data_source_count = sum([bool(source) for\
\ source in [\n data_source_bigquery_table_path, data_source_csv_filenames]])\n\
\ if data_source_count > 1:\n raise ValueError(f'Expected 1 data source,\
\ found {data_source_count}.')\n if (data_source_bigquery_table_path\n\
\ and not table_uri_pattern.fullmatch(data_source_bigquery_table_path)):\n\
\ raise ValueError(\n f'Invalid BigQuery table URI: {data_source_bigquery_table_path}.')\n\
\ gcs_path_pattern = re.compile(r'gs:\\/\\/(.+)\\/([^\\/]+)')\n if data_source_csv_filenames:\n\
\ csv_list = [filename.strip()\n for filename in data_source_csv_filenames.split(',')]\n\
\ for gcs_path in csv_list:\n if not gcs_path_pattern.fullmatch(gcs_path):\n\
\ raise ValueError(f'Invalid path to CSV stored in GCS: {gcs_path}.')\n\
\n # Validate split spec.\n fraction_splits = [\n training_fraction,\n\
\ validation_fraction,\n test_fraction,\n ]\n fraction_splits\
\ = [None if fraction == -1 else fraction\n for fraction\
\ in fraction_splits]\n split_count = sum([\n bool(source)\n \
\ for source in [predefined_split_key,\n any(fraction_splits)]\n\
\ ])\n if split_count > 1:\n raise ValueError(f'Expected 1 split type,\
\ found {split_count}.')\n if (predefined_split_key and\n not column_pattern.fullmatch(predefined_split_key)):\n\
\ raise ValueError(f'Invalid column name: {predefined_split_key}.')\n\
\ if any(fraction_splits):\n if not all(fraction_splits):\n raise\
\ ValueError(\n f'All fractions must be non-zero. Got: {fraction_splits}.')\n\
\ if sum(fraction_splits) != 1:\n raise ValueError(\n f'Fraction\
\ splits must sum to 1. Got: {sum(fraction_splits)}.')\n if (timestamp_split_key\
\ and\n not column_pattern.fullmatch(timestamp_split_key)):\n raise\
\ ValueError(f'Invalid column name: {timestamp_split_key}.')\n if timestamp_split_key\
\ and not all(fraction_splits):\n raise ValueError('All fractions must\
\ be non-zero for timestamp split.')\n\n # Validate window config.\n if\
\ window_stride_length == -1:\n window_stride_length = None\n if window_max_count\
\ == -1:\n window_max_count = None\n window_configs = [window_column,\
\ window_stride_length, window_max_count]\n window_config_count = sum([bool(config)\
\ for config in window_configs])\n if window_config_count > 1:\n raise\
\ ValueError(f'Expected 1 window config, found {window_config_count}.')\n\
\ if window_column and not column_pattern.fullmatch(window_column):\n \
\ raise ValueError(f'Invalid column name: {window_column}.')\n if window_stride_length\
\ and (window_stride_length < 1 or\n window_stride_length\
\ > 1000):\n raise ValueError('Stride must be between 1 and 1000. Got:\
\ '\n f'{window_stride_length}.')\n if window_max_count\
\ and (window_max_count < 1000 or\n window_max_count\
\ > int(1e8)):\n raise ValueError('Max count must be between 1000 and\
\ 100000000. Got: '\n f'{window_max_count}.')\n\n #\
\ Validate eval metric.\n valid_optimization_objectives = ['rmse', 'mae',\
\ 'rmsle']\n if optimization_objective:\n if optimization_objective\
\ not in valid_optimization_objectives:\n raise ValueError(\n \
\ 'Optimization objective should be one of the following: '\n \
\ f'{valid_optimization_objectives}, got: {optimization_objective}.')\n\
\n # Validate data granularity unit.\n valid_data_granularity_units =\
\ [\n 'minute', 'hour', 'day', 'week', 'month', 'year']\n if data_granularity_unit:\n\
\ if data_granularity_unit not in valid_data_granularity_units:\n \
\ raise ValueError(\n 'Granularity unit should be one of the\
\ following: '\n f'{valid_data_granularity_units}, got: {data_granularity_unit}.')\n\
\n"
image: us-docker.pkg.dev/vertex-ai/automl-tabular/kfp-v2-base:20240808_0625
pipelineInfo:
description: Forecasts using a BQML ARIMA_PLUS model.
name: automl-tabular-bqml-arima-prediction
root:
dag:
tasks:
bigquery-delete-dataset-with-prefix:
cachingOptions: {}
componentRef:
name: comp-bigquery-delete-dataset-with-prefix
dependentTasks:
- exit-handler-1
inputs:
parameters:
dataset_prefix:
runtimeValue:
constant: tmp_{{$.pipeline_job_uuid}}
delete_contents:
runtimeValue:
constant: true
project:
componentInputParameter: project
taskInfo:
name: delete-tmp-dataset
triggerPolicy:
strategy: ALL_UPSTREAM_TASKS_COMPLETED
exit-handler-1:
componentRef:
name: comp-exit-handler-1
inputs:
parameters:
pipelinechannel--bigquery_destination_uri:
componentInputParameter: bigquery_destination_uri
pipelinechannel--data_source_bigquery_table_path:
componentInputParameter: data_source_bigquery_table_path
pipelinechannel--data_source_csv_filenames:
componentInputParameter: data_source_csv_filenames
pipelinechannel--encryption_spec_key_name:
componentInputParameter: encryption_spec_key_name
pipelinechannel--location:
componentInputParameter: location
pipelinechannel--model_name:
componentInputParameter: model_name
pipelinechannel--project:
componentInputParameter: project
taskInfo:
name: exit-handler-1
inputDefinitions:
parameters:
bigquery_destination_uri:
defaultValue: ''
description: 'URI of the desired destination dataset. If not
specified, a resource will be created under a new dataset in the project.'
isOptional: true
parameterType: STRING
data_source_bigquery_table_path:
defaultValue: ''
description: 'The BigQuery table path of format
bq://bq_project.bq_dataset.bq_table'
isOptional: true
parameterType: STRING
data_source_csv_filenames:
defaultValue: ''
description: 'A string that represents a list of comma
separated CSV filenames.'
isOptional: true
parameterType: STRING
encryption_spec_key_name:
defaultValue: ''
description: The KMS key name.
isOptional: true
parameterType: STRING
generate_explanation:
defaultValue: false
description: 'Generate explanation along with the batch prediction
results. This will cause the batch prediction output to include
explanations.'
isOptional: true
parameterType: BOOLEAN
location:
description: The GCP region for Vertex AI.
parameterType: STRING
model_name:
description: ARIMA_PLUS BQML model URI.
parameterType: STRING
project:
description: The GCP project that runs the pipeline components.
parameterType: STRING
schemaVersion: 2.1.0
sdkVersion: kfp-2.0.0-rc.2
| 839 |
0 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/model_evaluation/classification_component.py | # Copyright 2023 The Kubeflow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, List
from google_cloud_pipeline_components import _placeholders
from google_cloud_pipeline_components._implementation.model_evaluation import version
from google_cloud_pipeline_components.types.artifact_types import BQTable
from google_cloud_pipeline_components.types.artifact_types import ClassificationMetrics
from google_cloud_pipeline_components.types.artifact_types import VertexModel
from kfp import dsl
from kfp.dsl import container_component
@container_component
def model_evaluation_classification(
gcp_resources: dsl.OutputPath(str),
evaluation_metrics: dsl.Output[ClassificationMetrics],
target_field_name: str,
model: dsl.Input[VertexModel] = None,
location: str = 'us-central1',
predictions_format: str = 'jsonl',
predictions_gcs_source: dsl.Input[dsl.Artifact] = None,
predictions_bigquery_source: dsl.Input[BQTable] = None,
ground_truth_format: str = 'jsonl',
ground_truth_gcs_source: List[str] = [],
ground_truth_bigquery_source: str = '',
classification_type: str = 'multiclass',
class_labels: List[str] = [],
prediction_score_column: str = '',
prediction_label_column: str = '',
slicing_specs: List[Any] = [],
positive_classes: List[str] = [],
dataflow_service_account: str = '',
dataflow_disk_size_gb: int = 50,
dataflow_machine_type: str = 'n1-standard-4',
dataflow_workers_num: int = 1,
dataflow_max_workers_num: int = 5,
dataflow_subnetwork: str = '',
dataflow_use_public_ips: bool = True,
encryption_spec_key_name: str = '',
force_runner_mode: str = '',
project: str = _placeholders.PROJECT_ID_PLACEHOLDER,
):
# fmt: off
"""Computes a `google.ClassificationMetrics` Artifact, containing evaluation
metrics given a model's prediction results.
Creates a Dataflow job with Apache Beam and TFMA to compute evaluation
metrics.
Supports multiclass classification evaluation for tabular, image, video, and
text data.
Args:
location: Location for running the evaluation.
predictions_format: The file format for the batch prediction results. `jsonl`, `csv`, and `bigquery` are the allowed formats, from Vertex Batch Prediction.
predictions_gcs_source: An artifact with its URI pointing toward a GCS directory with prediction or explanation files to be used for this evaluation. For prediction results, the files should be named "prediction.results-*" or "predictions_". For explanation results, the files should be named "explanation.results-*".
predictions_bigquery_source: BigQuery table with prediction or explanation data to be used for this evaluation. For prediction results, the table column should be named "predicted_*".
ground_truth_format: Required for custom tabular and non tabular data. The file format for the ground truth files. `jsonl`, `csv`, and `bigquery` are the allowed formats.
ground_truth_gcs_source: Required for custom tabular and non tabular data. The GCS URIs representing where the ground truth is located. Used to provide ground truth for each prediction instance when they are not part of the batch prediction jobs prediction instance.
ground_truth_bigquery_source: Required for custom tabular. The BigQuery table URI representing where the ground truth is located. Used to provide ground truth for each prediction instance when they are not part of the batch prediction jobs prediction instance.
classification_type: The type of classification problem, either `multiclass` or `multilabel`.
class_labels: The list of class names for the target_field_name, in the same order they appear in the batch predictions jobs predictions output file. For instance, if the values of target_field_name could be either `1` or `0`, and the predictions output contains ["1", "0"] for the prediction_label_column, then the class_labels input will be ["1", "0"]. If not set, defaults to the classes found in the prediction_label_column in the batch prediction jobs predictions file.
target_field_name: The full name path of the features target field in the predictions file. Formatted to be able to find nested columns, delimited by `.`. Alternatively referred to as the ground truth (or ground_truth_column) field.
model: The Vertex model used for evaluation. Must be located in the same region as the location argument. It is used to set the default configurations for AutoML and custom-trained models.
prediction_score_column: The column name of the field containing batch prediction scores. Formatted to be able to find nested columns, delimited by `.`.
prediction_label_column: The column name of the field containing classes the model is scoring. Formatted to be able to find nested columns, delimited by `.`.
slicing_specs: List of `google.cloud.aiplatform_v1.types.ModelEvaluationSlice.SlicingSpec`. When provided, compute metrics for each defined slice. See sample code in https://cloud.google.com/vertex-ai/docs/pipelines/model-evaluation-component Below is an example of how to format this input.
1: First, create a SlicingSpec. `from google.cloud.aiplatform_v1.types.ModelEvaluationSlice.Slice import SliceSpec` `from google.cloud.aiplatform_v1.types.ModelEvaluationSlice.Slice.SliceSpec import SliceConfig` `slicing_spec = SliceSpec(configs={ 'feature_a': SliceConfig(SliceSpec.Value(string_value='label_a'))})`
2: Create a list to store the slicing specs into. `slicing_specs = []`
3: Format each SlicingSpec into a JSON or Dict. `slicing_spec_json = json_format.MessageToJson(slicing_spec)` or `slicing_spec_dict = json_format.MessageToDict(slicing_spec)`
4: Combine each slicing_spec JSON into a list. `slicing_specs.append(slicing_spec_json)`
5: Finally, pass slicing_specs as an parameter for this component. `ModelEvaluationClassificationOp(slicing_specs=slicing_specs)` For more details on configuring slices, see https://cloud.google.com/python/docs/reference/aiplatform/latest/google.cloud.aiplatform_v1.types.ModelEvaluationSlice
positive_classes: The list of class names to create binary classification metrics based on one-vs-rest for each value of positive_classes provided.
dataflow_service_account: Service account to run the Dataflow job. If not set, Dataflow will use the default worker service account. For more details, see https://cloud.google.com/dataflow/docs/concepts/security-and-permissions#default_worker_service_account
dataflow_disk_size_gb: The disk size (in GB) of the machine executing the evaluation run.
dataflow_machine_type: The machine type executing the evaluation run.
dataflow_workers_num: The number of workers executing the evaluation run.
dataflow_max_workers_num: The max number of workers executing the evaluation run.
dataflow_subnetwork: Dataflow's fully qualified subnetwork name, when empty the default subnetwork will be used. More details: https://cloud.google.com/dataflow/docs/guides/specifying-networks#example_network_and_subnetwork_specifications
dataflow_use_public_ips: Specifies whether Dataflow workers use public IP addresses.
encryption_spec_key_name: Customer-managed encryption key options. If set, resources created by this pipeline will be encrypted with the provided encryption key. Has the form: `projects/my-project/locations/my-location/keyRings/my-kr/cryptoKeys/my-key`. The key needs to be in the same region as where the compute resource is created.
force_runner_mode: Flag to choose Beam runner. Valid options are `DirectRunner` and `Dataflow`.
project: Project to run evaluation container. Defaults to the project in which the PipelineJob is run.
Returns:
evaluation_metrics: `google.ClassificationMetrics` representing the classification evaluation metrics in GCS.
gcp_resources: Serialized gcp_resources proto tracking the Dataflow job. For more details, see https://github.com/kubeflow/pipelines/blob/master/components/google-cloud/google_cloud_pipeline_components/proto/README.md.
"""
# fmt: on
return dsl.ContainerSpec(
image=version.EVAL_IMAGE_TAG,
command=[
'python3',
'/main.py',
],
args=[
'--setup_file',
'/setup.py',
'--json_mode',
'true',
'--project_id',
project,
'--location',
location,
'--problem_type',
'classification',
'--target_field_name',
dsl.ConcatPlaceholder(['instance.', target_field_name]),
'--batch_prediction_format',
predictions_format,
dsl.IfPresentPlaceholder(
input_name='predictions_gcs_source',
then=[
'--batch_prediction_gcs_source',
predictions_gcs_source.uri,
],
),
dsl.IfPresentPlaceholder(
input_name='predictions_bigquery_source',
then=[
'--batch_prediction_bigquery_source',
dsl.ConcatPlaceholder([
'bq://',
predictions_bigquery_source.metadata['projectId'],
'.',
predictions_bigquery_source.metadata['datasetId'],
'.',
predictions_bigquery_source.metadata['tableId'],
]),
],
),
dsl.IfPresentPlaceholder(
input_name='model',
then=[
'--model_name',
model.metadata['resourceName'],
],
),
'--ground_truth_format',
ground_truth_format,
'--ground_truth_gcs_source',
ground_truth_gcs_source,
'--ground_truth_bigquery_source',
ground_truth_bigquery_source,
'--root_dir',
f'{dsl.PIPELINE_ROOT_PLACEHOLDER}/{dsl.PIPELINE_JOB_ID_PLACEHOLDER}-{dsl.PIPELINE_TASK_ID_PLACEHOLDER}',
'--classification_type',
classification_type,
'--class_labels',
class_labels,
'--prediction_score_column',
prediction_score_column,
'--prediction_label_column',
prediction_label_column,
dsl.IfPresentPlaceholder(
input_name='slicing_specs',
then=[
'--slicing_specs',
slicing_specs,
],
),
'--positive_classes',
positive_classes,
'--dataflow_job_prefix',
f'evaluation-classification-{dsl.PIPELINE_JOB_ID_PLACEHOLDER}-{dsl.PIPELINE_TASK_ID_PLACEHOLDER}',
'--dataflow_service_account',
dataflow_service_account,
'--dataflow_disk_size',
dataflow_disk_size_gb,
'--dataflow_machine_type',
dataflow_machine_type,
'--dataflow_workers_num',
dataflow_workers_num,
'--dataflow_max_workers_num',
dataflow_max_workers_num,
'--dataflow_subnetwork',
dataflow_subnetwork,
'--dataflow_use_public_ips',
dataflow_use_public_ips,
'--kms_key_name',
encryption_spec_key_name,
'--force_runner_mode',
force_runner_mode,
'--output_metrics_gcs_path',
evaluation_metrics.path,
'--gcp_resources',
gcp_resources,
'--executor_input',
'{{$}}',
],
)
| 840 |
0 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/model_evaluation/evaluation_llm_classification_pipeline.py | # Copyright 2024 The Kubeflow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Vertex Gen AI Evaluation for text classification task."""
from typing import Dict, List, NamedTuple
from google_cloud_pipeline_components._implementation.model_evaluation import LLMEvaluationClassificationPredictionsPostprocessorOp
from google_cloud_pipeline_components._implementation.model_evaluation import LLMEvaluationPreprocessorOp
from google_cloud_pipeline_components._implementation.model_evaluation import ModelNamePreprocessorOp
from google_cloud_pipeline_components.preview.model_evaluation.model_evaluation_import_component import model_evaluation_import as ModelImportEvaluationOp
from google_cloud_pipeline_components.types.artifact_types import ClassificationMetrics
from google_cloud_pipeline_components.types.artifact_types import VertexModel
from google_cloud_pipeline_components.v1.batch_predict_job import ModelBatchPredictOp
from google_cloud_pipeline_components.v1.model_evaluation.classification_component import model_evaluation_classification as ModelEvaluationClassificationOp
from kfp import dsl
# pylint: disable=unused-argument, unexpected-keyword-arg
_PIPELINE_NAME = 'evaluation-llm-classification-pipeline'
@dsl.pipeline(name=_PIPELINE_NAME)
def evaluation_llm_classification_pipeline( # pylint: disable=dangerous-default-value
project: str,
location: str,
target_field_name: str,
batch_predict_gcs_source_uris: List[str],
batch_predict_gcs_destination_output_uri: str,
model_name: str = 'publishers/google/models/text-bison@002',
evaluation_task: str = 'text-classification',
evaluation_class_labels: List[str] = [],
input_field_name: str = 'input_text',
batch_predict_instances_format: str = 'jsonl',
batch_predict_predictions_format: str = 'jsonl',
batch_predict_model_parameters: Dict[str, str] = {},
machine_type: str = 'e2-highmem-16',
service_account: str = '',
network: str = '',
dataflow_machine_type: str = 'n1-standard-4',
dataflow_disk_size_gb: int = 50,
dataflow_max_num_workers: int = 5,
dataflow_service_account: str = '',
dataflow_subnetwork: str = '',
dataflow_use_public_ips: bool = True,
encryption_spec_key_name: str = '',
evaluation_display_name: str = 'evaluation-llm-classification-pipeline-{{$.pipeline_job_uuid}}',
) -> NamedTuple(
'outputs',
evaluation_metrics=ClassificationMetrics,
evaluation_resource_name=str,
):
# fmt: off
"""The LLM Text Classification Evaluation pipeline.
Args:
project: Required. The GCP project that runs the pipeline components.
location: Required. The GCP region that runs the pipeline components.
target_field_name: Required. The target field's name. Formatted to be able to find nested columns, delimited by `.`. Prefixed with 'instance.' on the component for Vertex Batch Prediction.
batch_predict_gcs_source_uris: Required. Google Cloud Storage URI(-s) to your instances data to run batch prediction on. The instances data should also contain the ground truth (target) data, used for evaluation. May contain wildcards. For more information on wildcards, see https://cloud.google.com/storage/docs/gsutil/addlhelp/WildcardNames. For more details about this input config, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#InputConfig.
batch_predict_gcs_destination_output_uri: Required. The Google Cloud Storage location of the directory where the output is to be written to.
model_name: The Model name used to run evaluation. Must be a publisher Model or a managed Model sharing the same ancestor location. Starting this job has no impact on any existing deployments of the Model and their resources.
evaluation_task: The task that the large language model will be evaluated on. The evaluation component computes a set of metrics relevant to that specific task. Currently supported Classification tasks is: `text-classification`.
evaluation_class_labels: The JSON array of class names for the target_field, in the same order they appear in the batch predictions input file.
input_field_name: The field name of the input eval dataset instances that contains the input prompts to the LLM.
batch_predict_instances_format: The format in which instances are given, must be one of the Model's supportedInputStorageFormats. For more details about this input config, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#InputConfig.
batch_predict_predictions_format: The format in which Vertex AI gives the predictions. Must be one of the Model's supportedOutputStorageFormats. For more details about this output config, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#OutputConfig.
batch_predict_model_parameters: A map of parameters that govern the predictions. Some acceptable parameters include: maxOutputTokens, topK, topP, and temperature.
machine_type: The machine type of the custom jobs in this pipeline. If not set, defaulted to `e2-highmem-16`. More details: https://cloud.google.com/compute/docs/machine-resource
service_account: Sets the default service account for workload run-as account. The service account running the pipeline (https://cloud.google.com/vertex-ai/docs/pipelines/configure-project#service-account) submitting jobs must have act-as permission on this run-as account. If unspecified, the Vertex AI Custom Code Service Agent(https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) for the CustomJob's project.
network: The full name of the Compute Engine network to which the job should be peered. For example, `projects/12345/global/networks/myVPC`. Format is of the form `projects/{project}/global/networks/{network}`. Where `{project}` is a project number, as in `12345`, and `{network}` is a network name, as in `myVPC`. To specify this field, you must have already configured VPC Network Peering for Vertex AI (https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If left unspecified, the job is not peered with any network.
dataflow_machine_type: The Dataflow machine type for evaluation components.
dataflow_disk_size_gb: The disk size (in GB) of the machine executing the evaluation run. If not set, defaulted to `50`.
dataflow_max_num_workers: The max number of workers executing the evaluation run. If not set, defaulted to `5`.
dataflow_service_account: Custom service account to run Dataflow jobs.
dataflow_subnetwork: Dataflow's fully qualified subnetwork name, when empty the default subnetwork will be used. Example: https://cloud.google.com/dataflow/docs/guides/specifying-networks#example_network_and_subnetwork_specifications
dataflow_use_public_ips: Specifies whether Dataflow workers use public IP addresses.
encryption_spec_key_name: Customer-managed encryption key options. If set, resources created by this pipeline will be encrypted with the provided encryption key. Has the form: `projects/my-project/locations/my-location/keyRings/my-kr/cryptoKeys/my-key`. The key needs to be in the same region as where the compute resource is created.
evaluation_display_name: The display name of the uploaded evaluation resource to the Vertex AI model.
Returns:
evaluation_metrics: ClassificationMetrics Artifact for LLM Text Classification.
evaluation_resource_name: If run on an user's managed VertexModel, the imported evaluation resource name. Empty if run on a publisher model.
"""
# fmt: on
outputs = NamedTuple(
'outputs',
evaluation_metrics=ClassificationMetrics,
evaluation_resource_name=str,
)
preprocessed_model_name = ModelNamePreprocessorOp(
project=project,
location=location,
model_name=model_name,
service_account=service_account,
)
get_vertex_model_task = dsl.importer(
artifact_uri=(
f'https://{location}-aiplatform.googleapis.com/v1/{preprocessed_model_name.outputs["processed_model_name"]}'
),
artifact_class=VertexModel,
metadata={
'resourceName': preprocessed_model_name.outputs[
'processed_model_name'
]
},
)
get_vertex_model_task.set_display_name('get-vertex-model')
eval_dataset_preprocessor_task = LLMEvaluationPreprocessorOp(
project=project,
location=location,
gcs_source_uris=batch_predict_gcs_source_uris,
input_field_name=input_field_name,
machine_type=machine_type,
service_account=service_account,
network=network,
encryption_spec_key_name=encryption_spec_key_name,
)
batch_predict_task = ModelBatchPredictOp(
project=project,
location=location,
model=get_vertex_model_task.outputs['artifact'],
job_display_name='evaluation-batch-predict-{{$.pipeline_job_uuid}}-{{$.pipeline_task_uuid}}',
gcs_source_uris=eval_dataset_preprocessor_task.outputs[
'preprocessed_gcs_source_uris'
],
instances_format=batch_predict_instances_format,
predictions_format=batch_predict_predictions_format,
gcs_destination_output_uri_prefix=batch_predict_gcs_destination_output_uri,
model_parameters=batch_predict_model_parameters,
encryption_spec_key_name=encryption_spec_key_name,
)
postprocessor_task = LLMEvaluationClassificationPredictionsPostprocessorOp(
project=project,
batch_prediction_results=batch_predict_task.outputs[
'gcs_output_directory'
],
class_labels=evaluation_class_labels,
location=location,
machine_type=machine_type,
network=network,
service_account=service_account,
encryption_spec_key_name=encryption_spec_key_name,
)
eval_task = ModelEvaluationClassificationOp(
project=project,
location=location,
class_labels=postprocessor_task.outputs['postprocessed_class_labels'],
target_field_name=target_field_name,
predictions_gcs_source=postprocessor_task.outputs[
'postprocessed_predictions_gcs_source'
],
prediction_label_column='prediction.classes',
prediction_score_column='prediction.scores',
predictions_format=batch_predict_predictions_format,
dataflow_machine_type=dataflow_machine_type,
dataflow_max_workers_num=dataflow_max_num_workers,
dataflow_disk_size_gb=dataflow_disk_size_gb,
dataflow_service_account=dataflow_service_account,
dataflow_subnetwork=dataflow_subnetwork,
dataflow_use_public_ips=dataflow_use_public_ips,
encryption_spec_key_name=encryption_spec_key_name,
)
get_vertex_eval_model_task = dsl.importer(
artifact_uri=(
f'https://{location}-aiplatform.googleapis.com/v1/{model_name}'
),
artifact_class=VertexModel,
metadata={'resourceName': model_name},
)
get_vertex_eval_model_task.set_display_name('get-vertex-eval-model')
import_evaluation_task = ModelImportEvaluationOp(
classification_metrics=eval_task.outputs['evaluation_metrics'],
model=get_vertex_eval_model_task.outputs['artifact'],
dataset_type=batch_predict_instances_format,
dataset_paths=batch_predict_gcs_source_uris,
display_name=evaluation_display_name,
)
return outputs(
evaluation_metrics=eval_task.outputs['evaluation_metrics'],
evaluation_resource_name=import_evaluation_task.outputs[
'evaluation_resource_name'
],
)
| 841 |
0 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/model_evaluation/evaluated_annotation_pipeline.py | # Copyright 2024 The Kubeflow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List
from google_cloud_pipeline_components import _placeholders
from google_cloud_pipeline_components._implementation.model import GetVertexModelOp
from google_cloud_pipeline_components._implementation.model_evaluation import EvaluatedAnnotationOp
from google_cloud_pipeline_components._implementation.model_evaluation import EvaluationDatasetPreprocessorOp as DatasetPreprocessorOp
from google_cloud_pipeline_components._implementation.model_evaluation import ModelImportEvaluatedAnnotationOp
from google_cloud_pipeline_components.preview.model_evaluation.model_evaluation_import_component import model_evaluation_import as ModelImportEvaluationOp
from google_cloud_pipeline_components.v1.batch_predict_job import ModelBatchPredictOp
from google_cloud_pipeline_components.v1.dataset import GetVertexDatasetOp
from google_cloud_pipeline_components.v1.model_evaluation.classification_component import model_evaluation_classification as ModelEvaluationClassificationOp
from kfp import dsl
@dsl.pipeline(name='automl-vision-evaluated-annotation-pipeline')
def evaluated_annotation_pipeline( # pylint: disable=dangerous-default-value
location: str,
model_name: str,
batch_predict_gcs_destination_output_uri: str,
test_dataset_resource_name: str = '',
test_dataset_annotation_set_name: str = '',
test_dataset_storage_source_uris: List[str] = [],
batch_predict_instances_format: str = 'jsonl',
batch_predict_predictions_format: str = 'jsonl',
batch_predict_machine_type: str = 'n1-standard-32',
batch_predict_starting_replica_count: int = 5,
batch_predict_max_replica_count: int = 10,
batch_predict_accelerator_type: str = '',
batch_predict_accelerator_count: int = 0,
dataflow_machine_type: str = 'n1-standard-8',
dataflow_max_num_workers: int = 5,
dataflow_disk_size_gb: int = 50,
dataflow_service_account: str = '',
dataflow_subnetwork: str = '',
dataflow_use_public_ips: bool = True,
encryption_spec_key_name: str = '',
evaluation_display_name: str = 'evaluation-automl-vision-evaluated-annotation-pipeline-{{$.pipeline_job_uuid}}',
force_runner_mode: str = '',
project: str = _placeholders.PROJECT_ID_PLACEHOLDER,
):
# fmt: off
"""The evaluation evaluated annotation pipeline.
Args:
location: The GCP region that runs the pipeline components.
model_name: The Vertex model resource name to be imported and used for batch prediction, in the format of `projects/{project}/locations/{location}/models/{model}` or `projects/{project}/locations/{location}/models/{model}@{model_version_id or model_version_alias}`
batch_predict_gcs_destination_output_uri: The Google Cloud Storage location of the directory where the output is to be written to. In the given directory a new directory is created. Its name is `prediction-<model-display-name>-<job-create-time>`, where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format. Inside of it files `predictions_0001.<extension>`, `predictions_0002.<extension>`, ..., `predictions_N.<extension>` are created where `<extension>` depends on chosen `predictions_format`, and N may equal 0001 and depends on the total number of successfully predicted instances. If the Model has both `instance` and `prediction` schemata defined then each such file contains predictions as per the `predictions_format`. If prediction for any instance failed (partially or completely), then an additional `errors_0001.<extension>`, `errors_0002.<extension>`,..., `errors_N.<extension>` files are created (N depends on total number of failed predictions). These files contain the failed instances, as per their schema, followed by an additional `error` field which as value has `google.rpc.Status` containing only `code` and `message` fields. For more details about this output config, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#OutputConfig.
test_dataset_resource_name: A Vertex dataset resource name of the test dataset. If `test_dataset_storage_source_uris` is also provided, this argument will override the GCS source.
test_dataset_annotation_set_name: A string of the annotation_set name containing the ground truth of the test datset used for evaluation.
test_dataset_storage_source_uris: Google Cloud Storage URI(-s) to unmanaged test datasets.`jsonl` is currently the only allowed format. If `test_dataset` is also provided, this field will be overridden by the provided Vertex Dataset.
batch_predict_instances_format: The format in which instances are given, must be one of the Model's supportedInputStorageFormats. For more details about this input config, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#InputConfig.
batch_predict_predictions_format: The format in which Vertex AI gives the predictions. Must be one of the Model's supportedOutputStorageFormats. For more details about this output config, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#OutputConfig.
batch_predict_machine_type: The type of machine for running batch prediction on dedicated resources. If the Model supports DEDICATED_RESOURCES this config may be provided (and the job will use these resources). If the Model doesn't support AUTOMATIC_RESOURCES, this config must be provided. For more details about the BatchDedicatedResources, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#BatchDedicatedResources. For more details about the machine spec, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/MachineSpec
batch_predict_starting_replica_count: The number of machine replicas used at the start of the batch operation. If not set, Vertex AI decides starting number, not greater than `max_replica_count`. Only used if `machine_type` is set.
batch_predict_max_replica_count: The maximum number of machine replicas the batch operation may be scaled to. Only used if `machine_type` is set.
batch_predict_accelerator_type: The type of accelerator(s) that may be attached to the machine as per `batch_predict_accelerator_count`. Only used if `batch_predict_machine_type` is set. For more details about the machine spec, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/MachineSpec
batch_predict_accelerator_count: The number of accelerators to attach to the `batch_predict_machine_type`. Only used if `batch_predict_machine_type` is set.
dataflow_machine_type: The Dataflow machine type for evaluation components.
dataflow_max_num_workers: The max number of Dataflow workers for evaluation components.
dataflow_disk_size_gb: The disk size (in GB) of the machine executing the evaluation run.
dataflow_service_account: Custom service account to run Dataflow jobs.
dataflow_subnetwork: Dataflow's fully qualified subnetwork name, when empty the default subnetwork will be used. Example: https://cloud.google.com/dataflow/docs/guides/specifying-networks#example_network_and_subnetwork_specifications
dataflow_use_public_ips: Specifies whether Dataflow workers use public IP addresses.
encryption_spec_key_name: Customer-managed encryption key options. If set, resources created by this pipeline will be encrypted with the provided encryption key. Has the form: `projects/my-project/locations/my-location/keyRings/my-kr/cryptoKeys/my-key`. The key needs to be in the same region as where the compute resource is created.
evaluation_display_name: The display name of the uploaded evaluation resource to the Vertex AI model.
force_runner_mode: Indicate the runner mode to use forcely. Valid options are `Dataflow` and `DirectRunner`.
project: The GCP project that runs the pipeline components. Defaults to the project in which the PipelineJob is run.
"""
# fmt: off
get_test_dataset_task = GetVertexDatasetOp(
dataset_resource_name=test_dataset_resource_name
)
dataset_preprocessor_task = DatasetPreprocessorOp(
project=project,
location=location,
test_dataset=get_test_dataset_task.outputs['dataset'],
test_dataset_annotation_set_name=test_dataset_annotation_set_name,
test_dataset_storage_source_uris=test_dataset_storage_source_uris,
)
get_model_task = GetVertexModelOp(model_name=model_name)
batch_predict_task = ModelBatchPredictOp(
project=project,
location=location,
model=get_model_task.outputs['model'],
job_display_name='sdk-batch-predict-evaluation',
gcs_source_uris=dataset_preprocessor_task.outputs[
'batch_prediction_storage_source'
],
instances_format=batch_predict_instances_format,
predictions_format=batch_predict_predictions_format,
gcs_destination_output_uri_prefix=batch_predict_gcs_destination_output_uri,
machine_type=batch_predict_machine_type,
starting_replica_count=batch_predict_starting_replica_count,
max_replica_count=batch_predict_max_replica_count,
encryption_spec_key_name=encryption_spec_key_name,
accelerator_type=batch_predict_accelerator_type,
accelerator_count=batch_predict_accelerator_count,
)
eval_task = ModelEvaluationClassificationOp(
project=project,
location=location,
target_field_name='ground_truth',
ground_truth_format='jsonl',
ground_truth_gcs_source=dataset_preprocessor_task.outputs[
'model_evaluation_storage_source'
],
predictions_format='jsonl',
predictions_gcs_source=batch_predict_task.outputs['gcs_output_directory'],
model=get_model_task.outputs['model'],
dataflow_machine_type=dataflow_machine_type,
dataflow_max_workers_num=dataflow_max_num_workers,
dataflow_disk_size_gb=dataflow_disk_size_gb,
dataflow_service_account=dataflow_service_account,
dataflow_subnetwork=dataflow_subnetwork,
dataflow_use_public_ips=dataflow_use_public_ips,
encryption_spec_key_name=encryption_spec_key_name,
force_runner_mode=force_runner_mode,
prediction_score_column='',
prediction_label_column='',
)
evaluated_annotation_task = EvaluatedAnnotationOp(
project=project,
location=location,
predictions_storage_source=batch_predict_task.outputs[
'gcs_output_directory'
],
ground_truth_storage_source=dataset_preprocessor_task.outputs[
'test_data_items_storage_source'
],
dataflow_machine_type=dataflow_machine_type,
dataflow_max_workers_num=dataflow_max_num_workers,
dataflow_disk_size_gb=dataflow_disk_size_gb,
dataflow_service_account=dataflow_service_account,
dataflow_subnetwork=dataflow_subnetwork,
dataflow_use_public_ips=dataflow_use_public_ips,
encryption_spec_key_name=encryption_spec_key_name,
)
model_evaluation_importer_task = ModelImportEvaluationOp(
classification_metrics=eval_task.outputs['evaluation_metrics'],
model=get_model_task.outputs['model'],
dataset_type=batch_predict_instances_format,
dataset_paths=dataset_preprocessor_task.outputs[
'batch_prediction_storage_source'
],
display_name=evaluation_display_name,
)
ModelImportEvaluatedAnnotationOp(
model=get_model_task.outputs['model'],
evaluated_annotation_output_uri=evaluated_annotation_task.outputs[
'evaluated_annotation_output_uri'
],
evaluation_importer_gcp_resources=model_evaluation_importer_task.outputs[
'gcp_resources'
],
)
| 842 |
0 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/model_evaluation/evaluation_automl_unstructure_data_pipeline.py | # Copyright 2024 The Kubeflow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, List, NamedTuple
from google_cloud_pipeline_components import _placeholders
from google_cloud_pipeline_components._implementation.model import GetVertexModelOp
from google_cloud_pipeline_components._implementation.model_evaluation import TargetFieldDataRemoverOp
from google_cloud_pipeline_components.preview.model_evaluation.model_evaluation_import_component import model_evaluation_import as ModelImportEvaluationOp
from google_cloud_pipeline_components.types.artifact_types import ClassificationMetrics
from google_cloud_pipeline_components.types.artifact_types import RegressionMetrics
from google_cloud_pipeline_components.v1.batch_predict_job import ModelBatchPredictOp
from google_cloud_pipeline_components.v1.model_evaluation.classification_component import model_evaluation_classification as ModelEvaluationClassificationOp
from google_cloud_pipeline_components.v1.model_evaluation.regression_component import model_evaluation_regression as ModelEvaluationRegressionOp
import kfp
from kfp import dsl
@kfp.dsl.pipeline(name='evaluation-classification-pipeline')
def evaluation_automl_unstructure_data_classification_pipeline( # pylint: disable=dangerous-default-value
location: str,
model_name: str,
target_field_name: str,
batch_predict_instances_format: str,
batch_predict_gcs_destination_output_uri: str,
batch_predict_gcs_source_uris: List[str] = [], # pylint: disable=g-bare-generic
batch_predict_bigquery_source_uri: str = '',
batch_predict_predictions_format: str = 'jsonl',
batch_predict_bigquery_destination_output_uri: str = '',
batch_predict_machine_type: str = 'n1-standard-16',
batch_predict_starting_replica_count: int = 5,
batch_predict_max_replica_count: int = 10,
batch_predict_accelerator_type: str = '',
batch_predict_accelerator_count: int = 0,
slicing_specs: List[Any] = [], # pylint: disable=g-bare-generic
evaluation_prediction_label_column: str = '',
evaluation_prediction_score_column: str = '',
evaluation_class_labels: List[str] = [], # pylint: disable=g-bare-generic
dataflow_machine_type: str = 'n1-standard-4',
dataflow_max_num_workers: int = 5,
dataflow_disk_size_gb: int = 50,
dataflow_service_account: str = '',
dataflow_subnetwork: str = '',
dataflow_use_public_ips: bool = True,
encryption_spec_key_name: str = '',
evaluation_display_name: str = 'evaluation-automl-unstructured-data-pipeline-{{$.pipeline_job_uuid}}',
force_runner_mode: str = '',
project: str = _placeholders.PROJECT_ID_PLACEHOLDER,
) -> NamedTuple(
'outputs',
evaluation_metrics=ClassificationMetrics,
evaluation_resource_name=str,
):
# fmt: off
"""The evaluation pipeline with ground truth and no feature attribution for classification models.
This pipeline is used for all classification unstructured AutoML models, including Text, Video, Image and Custom models.
Args:
location: The GCP region that runs the pipeline components.
model_name: The Vertex model resource name to be imported and used for batch prediction. Formatted like projects/{project}/locations/{location}/models/{model} or projects/{project}/locations/{location}/models/{model}@{model_version_id_or_model_version_alias}.
target_field_name: The target field's name. Formatted to be able to find nested columns, delimited by `.`. Prefixed with 'instance.' on the component for Vertex Batch Prediction.
batch_predict_instances_format: The format in which instances are given, must be one of the Model's supportedInputStorageFormats. For more details about this input config, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#InputConfig.
batch_predict_gcs_destination_output_uri: The Google Cloud Storage location of the directory where the output is to be written to. In the given directory a new directory is created. Its name is `prediction-<model-display-name>-<job-create-time>`, where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format. Inside of it files `predictions_0001.<extension>`, `predictions_0002.<extension>`, ..., `predictions_N.<extension>` are created where `<extension>` depends on chosen `predictions_format`, and N may equal 0001 and depends on the total number of successfully predicted instances. If the Model has both `instance` and `prediction` schemata defined then each such file contains predictions as per the `predictions_format`. If prediction for any instance failed (partially or completely), then an additional `errors_0001.<extension>`, `errors_0002.<extension>`,..., `errors_N.<extension>` files are created (N depends on total number of failed predictions). These files contain the failed instances, as per their schema, followed by an additional `error` field which as value has `google.rpc.Status` containing only `code` and `message` fields. For more details about this output config, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#OutputConfig.
batch_predict_gcs_source_uris: Google Cloud Storage URI(-s) to your instances data to run batch prediction on. The instances data should also contain the ground truth (target) data, used for evaluation. May contain wildcards. For more information on wildcards, see https://cloud.google.com/storage/docs/gsutil/addlhelp/WildcardNames. For more details about this input config, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#InputConfig.
batch_predict_bigquery_source_uri: Google BigQuery URI to your instances to run batch prediction on. May contain wildcards. For more details about this input config, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#InputConfig.
batch_predict_predictions_format: The format in which Vertex AI gives the predictions. Must be one of the Model's supportedOutputStorageFormats. For more details about this output config, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#OutputConfig.
batch_predict_bigquery_destination_output_uri: The BigQuery project location where the output is to be written to. In the given project a new dataset is created with name `prediction_<model-display-name>_<job-create-time>` where is made BigQuery-dataset-name compatible (for example, most special characters become underscores), and timestamp is in YYYY_MM_DDThh_mm_ss_sssZ "based on ISO-8601" format. In the dataset two tables will be created, `predictions`, and `errors`. If the Model has both `instance` and `prediction` schemata defined then the tables have columns as follows: The `predictions` table contains instances for which the prediction succeeded, it has columns as per a concatenation of the Model's instance and prediction schemata. The `errors` table contains rows for which the prediction has failed, it has instance columns, as per the instance schema, followed by a single "errors" column, which as values has `google.rpc.Status` represented as a STRUCT, and containing only `code` and `message`. For more details about this output config, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#OutputConfig.
batch_predict_machine_type: The type of machine for running batch prediction on dedicated resources. If the Model supports DEDICATED_RESOURCES this config may be provided (and the job will use these resources). If the Model doesn't support AUTOMATIC_RESOURCES, this config must be provided. For more details about the BatchDedicatedResources, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#BatchDedicatedResources. For more details about the machine spec, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/MachineSpec
batch_predict_starting_replica_count: The number of machine replicas used at the start of the batch operation. If not set, Vertex AI decides starting number, not greater than `max_replica_count`. Only used if `machine_type` is set.
batch_predict_max_replica_count: The maximum number of machine replicas the batch operation may be scaled to. Only used if `machine_type` is set.
batch_predict_accelerator_type: The type of accelerator(s) that may be attached to the machine as per `batch_predict_accelerator_count`. Only used if `batch_predict_machine_type` is set. For more details about the machine spec, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/MachineSpec
batch_predict_accelerator_count: The number of accelerators to attach to the `batch_predict_machine_type`. Only used if `batch_predict_machine_type` is set.
slicing_specs: List of `google.cloud.aiplatform_v1.types.ModelEvaluationSlice.SlicingSpec`. When provided, compute metrics for each defined slice. See sample code in https://cloud.google.com/vertex-ai/docs/pipelines/model-evaluation-component For more details on configuring slices, see https://cloud.google.com/python/docs/reference/aiplatform/latest/google.cloud.aiplatform_v1.types.ModelEvaluationSlice.
evaluation_prediction_label_column: The column name of the field containing classes the model is scoring. Formatted to be able to find nested columns, delimited by `.`.
evaluation_prediction_score_column: The column name of the field containing batch prediction scores. Formatted to be able to find nested columns, delimited by `.`.
evaluation_class_labels: Required for classification prediction type. The list of class names for the target_field_name, in the same order they appear in a file in batch_predict_gcs_source_uris. For instance, if the target_field_name could be either `1` or `0`, then the class_labels input will be ["1", "0"].
dataflow_machine_type: The Dataflow machine type for evaluation components.
dataflow_max_num_workers: The max number of Dataflow workers for evaluation components.
dataflow_disk_size_gb: Dataflow worker's disk size in GB for evaluation components.
dataflow_service_account: Custom service account to run Dataflow jobs.
dataflow_subnetwork: Dataflow's fully qualified subnetwork name, when empty the default subnetwork will be used. Example: https://cloud.google.com/dataflow/docs/guides/specifying-networks#example_network_and_subnetwork_specifications
dataflow_use_public_ips: Specifies whether Dataflow workers use public IP addresses.
encryption_spec_key_name: Customer-managed encryption key options. If set, resources created by this pipeline will be encrypted with the provided encryption key. Has the form: `projects/my-project/locations/my-location/keyRings/my-kr/cryptoKeys/my-key`. The key needs to be in the same region as where the compute resource is created.
evaluation_display_name: The display name of the uploaded evaluation resource to the Vertex AI model.
force_runner_mode: Indicate the runner mode to use forcely. Valid options are `Dataflow` and `DirectRunner`.
project: The GCP project that runs the pipeline components. Defaults to the project in which the PipelineJob is run.
Returns: A Tuple of google.ClassificationMetrics artifact and the imported evaluation metrics resource name.
"""
# fmt: on
outputs = NamedTuple(
'outputs',
evaluation_metrics=ClassificationMetrics,
evaluation_resource_name=str,
)
get_model_task = GetVertexModelOp(model_name=model_name)
# Remove the ground truth from the given GCS data.
# This is required for many models as Vertex Batch Prediction can not have the
# ground truth in the data to run, but later the evaluation component requires
# the ground truth data.
target_field_data_remover_task = TargetFieldDataRemoverOp(
project=project,
location=location,
target_field_name=target_field_name,
gcs_source_uris=batch_predict_gcs_source_uris,
bigquery_source_uri=batch_predict_bigquery_source_uri,
instances_format=batch_predict_instances_format,
dataflow_service_account=dataflow_service_account,
dataflow_subnetwork=dataflow_subnetwork,
dataflow_use_public_ips=dataflow_use_public_ips,
encryption_spec_key_name=encryption_spec_key_name,
force_runner_mode=force_runner_mode,
)
# Run Batch Prediction.
batch_predict_task = ModelBatchPredictOp(
project=project,
location=location,
model=get_model_task.outputs['model'],
job_display_name=f'evaluation-batch-predict-{dsl.PIPELINE_JOB_ID_PLACEHOLDER}-{dsl.PIPELINE_TASK_ID_PLACEHOLDER}',
gcs_source_uris=target_field_data_remover_task.outputs[
'gcs_output_directory'
],
bigquery_source_input_uri=target_field_data_remover_task.outputs[
'bigquery_output_table'
],
instances_format=batch_predict_instances_format,
predictions_format=batch_predict_predictions_format,
gcs_destination_output_uri_prefix=batch_predict_gcs_destination_output_uri,
bigquery_destination_output_uri=batch_predict_bigquery_destination_output_uri,
machine_type=batch_predict_machine_type,
starting_replica_count=batch_predict_starting_replica_count,
max_replica_count=batch_predict_max_replica_count,
encryption_spec_key_name=encryption_spec_key_name,
accelerator_type=batch_predict_accelerator_type,
accelerator_count=batch_predict_accelerator_count,
)
# Run evaluation for a classification model.
eval_task = ModelEvaluationClassificationOp(
project=project,
location=location,
class_labels=evaluation_class_labels,
prediction_label_column=evaluation_prediction_label_column,
prediction_score_column=evaluation_prediction_score_column,
target_field_name=target_field_name,
ground_truth_format=batch_predict_instances_format,
ground_truth_gcs_source=batch_predict_gcs_source_uris,
ground_truth_bigquery_source=batch_predict_bigquery_source_uri,
predictions_format=batch_predict_predictions_format,
predictions_gcs_source=batch_predict_task.outputs['gcs_output_directory'],
predictions_bigquery_source=batch_predict_task.outputs[
'bigquery_output_table'
],
dataflow_machine_type=dataflow_machine_type,
dataflow_max_workers_num=dataflow_max_num_workers,
dataflow_disk_size_gb=dataflow_disk_size_gb,
dataflow_service_account=dataflow_service_account,
dataflow_subnetwork=dataflow_subnetwork,
dataflow_use_public_ips=dataflow_use_public_ips,
encryption_spec_key_name=encryption_spec_key_name,
force_runner_mode=force_runner_mode,
model=get_model_task.outputs['model'],
slicing_specs=slicing_specs,
)
# Import the evaluation result to Vertex AI.
import_evaluation_task = ModelImportEvaluationOp(
classification_metrics=eval_task.outputs['evaluation_metrics'],
model=get_model_task.outputs['model'],
dataset_type=batch_predict_instances_format,
dataset_path=batch_predict_bigquery_source_uri,
dataset_paths=batch_predict_gcs_source_uris,
display_name=evaluation_display_name,
)
return outputs(
evaluation_metrics=eval_task.outputs['evaluation_metrics'],
evaluation_resource_name=import_evaluation_task.outputs[
'evaluation_resource_name'
],
)
@kfp.dsl.pipeline(name='evaluation-regression-pipeline')
def evaluation_automl_unstructure_data_regression_pipeline( # pylint: disable=dangerous-default-value
project: str,
location: str,
model_name: str,
target_field_name: str,
batch_predict_instances_format: str,
batch_predict_gcs_destination_output_uri: str,
batch_predict_gcs_source_uris: list = [], # pylint: disable=g-bare-generic
batch_predict_bigquery_source_uri: str = '',
batch_predict_predictions_format: str = 'jsonl',
batch_predict_bigquery_destination_output_uri: str = '',
batch_predict_machine_type: str = 'n1-standard-16',
batch_predict_starting_replica_count: int = 5,
batch_predict_max_replica_count: int = 10,
batch_predict_accelerator_type: str = '',
batch_predict_accelerator_count: int = 0,
evaluation_prediction_score_column: str = '',
dataflow_machine_type: str = 'n1-standard-4',
dataflow_max_num_workers: int = 5,
dataflow_disk_size_gb: int = 50,
dataflow_service_account: str = '',
dataflow_subnetwork: str = '',
dataflow_use_public_ips: bool = True,
encryption_spec_key_name: str = '',
evaluation_display_name: str = 'evaluation-automl-unstructured-data-pipeline-{{$.pipeline_job_uuid}}',
force_runner_mode: str = '',
) -> NamedTuple(
'outputs',
evaluation_metrics=RegressionMetrics,
evaluation_resource_name=str,
):
# fmt: off
"""The evaluation pipeline with ground truth and no feature attribution for.
regression models.
This pipeline is used for all custom tabular regression models.
Args:
project: The GCP project that runs the pipeline components.
location: The GCP region that runs the pipeline components.
model_name: The Vertex model resource name to be imported and used for batch prediction. Formatted like projects/{project}/locations/{location}/models/{model} or projects/{project}/locations/{location}/models/{model}@{model_version_id_or_model_version_alias}.
target_field_name: The target field's name. Formatted to be able to find nested columns, delimited by `.`. Prefixed with 'instance.' on the component for Vertex Batch Prediction.
batch_predict_instances_format: The format in which instances are given, must be one of the Model's supportedInputStorageFormats. For more details about this input config, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#InputConfig.
batch_predict_gcs_destination_output_uri: The Google Cloud Storage location of the directory where the output is to be written to. In the given directory a new directory is created. Its name is `prediction-<model-display-name>-<job-create-time>`, where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format. Inside of it files `predictions_0001.<extension>`, `predictions_0002.<extension>`, ..., `predictions_N.<extension>` are created where `<extension>` depends on chosen `predictions_format`, and N may equal 0001 and depends on the total number of successfully predicted instances. If the Model has both `instance` and `prediction` schemata defined then each such file contains predictions as per the `predictions_format`. If prediction for any instance failed (partially or completely), then an additional `errors_0001.<extension>`, `errors_0002.<extension>`,..., `errors_N.<extension>` files are created (N depends on total number of failed predictions). These files contain the failed instances, as per their schema, followed by an additional `error` field which as value has `google.rpc.Status` containing only `code` and `message` fields. For more details about this output config, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#OutputConfig.
batch_predict_gcs_source_uris: Google Cloud Storage URI(-s) to your instances data to run batch prediction on. The instances data should also contain the ground truth (target) data, used for evaluation. May contain wildcards. For more information on wildcards, see https://cloud.google.com/storage/docs/gsutil/addlhelp/WildcardNames. For more details about this input config, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#InputConfig.
batch_predict_bigquery_source_uri: Google BigQuery URI to your instances to run batch prediction on. May contain wildcards. For more details about this input config, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#InputConfig.
batch_predict_predictions_format: The format in which Vertex AI gives the predictions. Must be one of the Model's supportedOutputStorageFormats. For more details about this output config, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#OutputConfig.
batch_predict_bigquery_destination_output_uri: The BigQuery project location where the output is to be written to. In the given project a new dataset is created with name `prediction_<model-display-name>_<job-create-time>` where is made BigQuery-dataset-name compatible (for example, most special characters become underscores), and timestamp is in YYYY_MM_DDThh_mm_ss_sssZ "based on ISO-8601" format. In the dataset two tables will be created, `predictions`, and `errors`. If the Model has both `instance` and `prediction` schemata defined then the tables have columns as follows: The `predictions` table contains instances for which the prediction succeeded, it has columns as per a concatenation of the Model's instance and prediction schemata. The `errors` table contains rows for which the prediction has failed, it has instance columns, as per the instance schema, followed by a single "errors" column, which as values has `google.rpc.Status` represented as a STRUCT, and containing only `code` and `message`. For more details about this output config, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#OutputConfig.
batch_predict_machine_type: The type of machine for running batch prediction on dedicated resources. If the Model supports DEDICATED_RESOURCES this config may be provided (and the job will use these resources). If the Model doesn't support AUTOMATIC_RESOURCES, this config must be provided. For more details about the BatchDedicatedResources, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#BatchDedicatedResources. For more details about the machine spec, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/MachineSpec
batch_predict_starting_replica_count: The number of machine replicas used at the start of the batch operation. If not set, Vertex AI decides starting number, not greater than `max_replica_count`. Only used if `machine_type` is set.
batch_predict_max_replica_count: The maximum number of machine replicas the batch operation may be scaled to. Only used if `machine_type` is set.
batch_predict_accelerator_type: The type of accelerator(s) that may be attached to the machine as per `batch_predict_accelerator_count`. Only used if `batch_predict_machine_type` is set. For more details about the machine spec, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/MachineSpec
batch_predict_accelerator_count: The number of accelerators to attach to the `batch_predict_machine_type`. Only used if `batch_predict_machine_type` is set.
evaluation_prediction_score_column: The column name of the field containing batch prediction scores. Formatted to be able to find nested columns, delimited by `.`.
dataflow_machine_type: The Dataflow machine type for evaluation components.
dataflow_max_num_workers: The max number of Dataflow workers for evaluation components.
dataflow_disk_size_gb: Dataflow worker's disk size in GB for evaluation components.
dataflow_service_account: Custom service account to run Dataflow jobs.
dataflow_subnetwork: Dataflow's fully qualified subnetwork name, when empty the default subnetwork will be used. Example: https://cloud.google.com/dataflow/docs/guides/specifying-networks#example_network_and_subnetwork_specifications
dataflow_use_public_ips: Specifies whether Dataflow workers use public IP addresses.
encryption_spec_key_name: Customer-managed encryption key options. If set, resources created by this pipeline will be encrypted with the provided encryption key. Has the form: `projects/my-project/locations/my-location/keyRings/my-kr/cryptoKeys/my-key`. The key needs to be in the same region as where the compute resource is created.
evaluation_display_name: The display name of the uploaded evaluation resource to the Vertex AI model.
force_runner_mode: Indicate the runner mode to use forcely. Valid options are `Dataflow` and `DirectRunner`.
Returns: A Tuple of google.RegressionMetrics artifact and the imported evaluation metrics resource name.
"""
# fmt: on
outputs = NamedTuple(
'outputs',
evaluation_metrics=RegressionMetrics,
evaluation_resource_name=str,
)
get_model_task = GetVertexModelOp(model_name=model_name)
# Remove the ground truth from the given GCS data.
# This is required for many models as Vertex Batch Prediction can not have the
# ground truth in the data to run, but later the evaluation component requires
# the ground truth data.
target_field_data_remover_task = TargetFieldDataRemoverOp(
project=project,
location=location,
target_field_name=target_field_name,
gcs_source_uris=batch_predict_gcs_source_uris,
bigquery_source_uri=batch_predict_bigquery_source_uri,
instances_format=batch_predict_instances_format,
dataflow_service_account=dataflow_service_account,
dataflow_subnetwork=dataflow_subnetwork,
dataflow_use_public_ips=dataflow_use_public_ips,
encryption_spec_key_name=encryption_spec_key_name,
force_runner_mode=force_runner_mode,
)
# Run Batch Prediction.
batch_predict_task = ModelBatchPredictOp(
project=project,
location=location,
model=get_model_task.outputs['model'],
job_display_name=f'evaluation-batch-predict-{dsl.PIPELINE_JOB_ID_PLACEHOLDER}-{dsl.PIPELINE_TASK_ID_PLACEHOLDER}',
gcs_source_uris=target_field_data_remover_task.outputs[
'gcs_output_directory'
],
bigquery_source_input_uri=target_field_data_remover_task.outputs[
'bigquery_output_table'
],
instances_format=batch_predict_instances_format,
predictions_format=batch_predict_predictions_format,
gcs_destination_output_uri_prefix=batch_predict_gcs_destination_output_uri,
bigquery_destination_output_uri=batch_predict_bigquery_destination_output_uri,
machine_type=batch_predict_machine_type,
starting_replica_count=batch_predict_starting_replica_count,
max_replica_count=batch_predict_max_replica_count,
encryption_spec_key_name=encryption_spec_key_name,
accelerator_type=batch_predict_accelerator_type,
accelerator_count=batch_predict_accelerator_count,
)
# Run evaluation for a regression model.
eval_task = ModelEvaluationRegressionOp(
project=project,
location=location,
target_field_name=target_field_name,
ground_truth_format=batch_predict_instances_format,
ground_truth_gcs_source=batch_predict_gcs_source_uris,
ground_truth_bigquery_source=batch_predict_bigquery_source_uri,
prediction_score_column=evaluation_prediction_score_column,
predictions_format=batch_predict_predictions_format,
predictions_gcs_source=batch_predict_task.outputs['gcs_output_directory'],
predictions_bigquery_source=batch_predict_task.outputs[
'bigquery_output_table'
],
dataflow_machine_type=dataflow_machine_type,
dataflow_max_workers_num=dataflow_max_num_workers,
dataflow_disk_size_gb=dataflow_disk_size_gb,
dataflow_service_account=dataflow_service_account,
dataflow_subnetwork=dataflow_subnetwork,
dataflow_use_public_ips=dataflow_use_public_ips,
encryption_spec_key_name=encryption_spec_key_name,
force_runner_mode=force_runner_mode,
model=get_model_task.outputs['model'],
)
# Import the evaluation result to Vertex AI.
import_evaluation_task = ModelImportEvaluationOp(
regression_metrics=eval_task.outputs['evaluation_metrics'],
model=get_model_task.outputs['model'],
dataset_type=batch_predict_instances_format,
dataset_path=batch_predict_bigquery_source_uri,
dataset_paths=batch_predict_gcs_source_uris,
display_name=evaluation_display_name,
)
return outputs(
evaluation_metrics=eval_task.outputs['evaluation_metrics'],
evaluation_resource_name=import_evaluation_task.outputs[
'evaluation_resource_name'
],
)
@kfp.dsl.pipeline(name='evaluation-pipeline')
def evaluation_automl_unstructure_data_pipeline( # pylint: disable=dangerous-default-value
project: str,
location: str,
prediction_type: str,
model_name: str,
target_field_name: str,
batch_predict_instances_format: str,
batch_predict_gcs_destination_output_uri: str,
batch_predict_gcs_source_uris: List[str] = [], # pylint: disable=g-bare-generic
batch_predict_bigquery_source_uri: str = '',
batch_predict_predictions_format: str = 'jsonl',
batch_predict_bigquery_destination_output_uri: str = '',
batch_predict_machine_type: str = 'n1-standard-16',
batch_predict_starting_replica_count: int = 5,
batch_predict_max_replica_count: int = 10,
batch_predict_accelerator_type: str = '',
batch_predict_accelerator_count: int = 0,
slicing_specs: List[Any] = [], # pylint: disable=g-bare-generic
evaluation_prediction_label_column: str = '',
evaluation_prediction_score_column: str = '',
evaluation_class_labels: List[str] = [], # pylint: disable=g-bare-generic
dataflow_machine_type: str = 'n1-standard-4',
dataflow_max_num_workers: int = 5,
dataflow_disk_size_gb: int = 50,
dataflow_service_account: str = '',
dataflow_subnetwork: str = '',
dataflow_use_public_ips: bool = True,
encryption_spec_key_name: str = '',
evaluation_display_name: str = 'evaluation-automl-unstructured-data-pipeline-{{$.pipeline_job_uuid}}',
force_runner_mode: str = '',
):
# fmt: off
"""The evaluation pipeline with ground truth and no feature attribution.
This pipeline is used for all unstructured AutoML models, including Text,
Video, Image and Custom models.
Args:
project: The GCP project that runs the pipeline components.
location: The GCP region that runs the pipeline components.
prediction_type: The type of prediction the model is to produce. "classification" or "regression".
model_name: The Vertex model resource name to be imported and used for batch prediction. Formatted like projects/{project}/locations/{location}/models/{model} or projects/{project}/locations/{location}/models/{model}@{model_version_id_or_model_version_alias}.
target_field_name: The target field's name. Formatted to be able to find nested columns, delimited by `.`. Prefixed with 'instance.' on the component for Vertex Batch Prediction.
batch_predict_instances_format: The format in which instances are given, must be one of the Model's supportedInputStorageFormats. For more details about this input config, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#InputConfig.
batch_predict_gcs_destination_output_uri: The Google Cloud Storage location of the directory where the output is to be written to. In the given directory a new directory is created. Its name is `prediction-<model-display-name>-<job-create-time>`, where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format. Inside of it files `predictions_0001.<extension>`, `predictions_0002.<extension>`, ..., `predictions_N.<extension>` are created where `<extension>` depends on chosen `predictions_format`, and N may equal 0001 and depends on the total number of successfully predicted instances. If the Model has both `instance` and `prediction` schemata defined then each such file contains predictions as per the `predictions_format`. If prediction for any instance failed (partially or completely), then an additional `errors_0001.<extension>`, `errors_0002.<extension>`,..., `errors_N.<extension>` files are created (N depends on total number of failed predictions). These files contain the failed instances, as per their schema, followed by an additional `error` field which as value has `google.rpc.Status` containing only `code` and `message` fields. For more details about this output config, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#OutputConfig.
batch_predict_gcs_source_uris: Google Cloud Storage URI(-s) to your instances data to run batch prediction on. The instances data should also contain the ground truth (target) data, used for evaluation. May contain wildcards. For more information on wildcards, see https://cloud.google.com/storage/docs/gsutil/addlhelp/WildcardNames. For more details about this input config, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#InputConfig.
batch_predict_bigquery_source_uri: Google BigQuery URI to your instances to run batch prediction on. May contain wildcards. For more details about this input config, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#InputConfig.
batch_predict_predictions_format: The format in which Vertex AI gives the predictions. Must be one of the Model's supportedOutputStorageFormats. For more details about this output config, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#OutputConfig.
batch_predict_bigquery_destination_output_uri: The BigQuery project location where the output is to be written to. In the given project a new dataset is created with name `prediction_<model-display-name>_<job-create-time>` where is made BigQuery-dataset-name compatible (for example, most special characters become underscores), and timestamp is in YYYY_MM_DDThh_mm_ss_sssZ "based on ISO-8601" format. In the dataset two tables will be created, `predictions`, and `errors`. If the Model has both `instance` and `prediction` schemata defined then the tables have columns as follows: The `predictions` table contains instances for which the prediction succeeded, it has columns as per a concatenation of the Model's instance and prediction schemata. The `errors` table contains rows for which the prediction has failed, it has instance columns, as per the instance schema, followed by a single "errors" column, which as values has `google.rpc.Status` represented as a STRUCT, and containing only `code` and `message`. For more details about this output config, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#OutputConfig.
batch_predict_machine_type: The type of machine for running batch prediction on dedicated resources. If the Model supports DEDICATED_RESOURCES this config may be provided (and the job will use these resources). If the Model doesn't support AUTOMATIC_RESOURCES, this config must be provided. For more details about the BatchDedicatedResources, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#BatchDedicatedResources. For more details about the machine spec, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/MachineSpec
batch_predict_starting_replica_count: The number of machine replicas used at the start of the batch operation. If not set, Vertex AI decides starting number, not greater than `max_replica_count`. Only used if `machine_type` is set.
batch_predict_max_replica_count: The maximum number of machine replicas the batch operation may be scaled to. Only used if `machine_type` is set.
batch_predict_accelerator_type: The type of accelerator(s) that may be attached to the machine as per `batch_predict_accelerator_count`. Only used if `batch_predict_machine_type` is set. For more details about the machine spec, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/MachineSpec
batch_predict_accelerator_count: The number of accelerators to attach to the `batch_predict_machine_type`. Only used if `batch_predict_machine_type` is set.
slicing_specs: List of `google.cloud.aiplatform_v1.types.ModelEvaluationSlice.SlicingSpec`. When provided, compute metrics for each defined slice. See sample code in https://cloud.google.com/vertex-ai/docs/pipelines/model-evaluation-component For more details on configuring slices, see https://cloud.google.com/python/docs/reference/aiplatform/latest/google.cloud.aiplatform_v1.types.ModelEvaluationSlice.
evaluation_prediction_label_column: The column name of the field containing classes the model is scoring. Formatted to be able to find nested columns, delimited by `.`.
evaluation_prediction_score_column: The column name of the field containing batch prediction scores. Formatted to be able to find nested columns, delimited by `.`.
evaluation_class_labels: Required for classification prediction type. The list of class names for the target_field_name, in the same order they appear in a file in batch_predict_gcs_source_uris. For instance, if the target_field_name could be either `1` or `0`, then the class_labels input will be ["1", "0"].
dataflow_machine_type: The Dataflow machine type for evaluation components.
dataflow_max_num_workers: The max number of Dataflow workers for evaluation components.
dataflow_disk_size_gb: Dataflow worker's disk size in GB for evaluation components.
dataflow_service_account: Custom service account to run Dataflow jobs.
dataflow_subnetwork: Dataflow's fully qualified subnetwork name, when empty the default subnetwork will be used. Example: https://cloud.google.com/dataflow/docs/guides/specifying-networks#example_network_and_subnetwork_specifications
dataflow_use_public_ips: Specifies whether Dataflow workers use public IP addresses.
encryption_spec_key_name: Customer-managed encryption key options. If set, resources created by this pipeline will be encrypted with the provided encryption key. Has the form: `projects/my-project/locations/my-location/keyRings/my-kr/cryptoKeys/my-key`. The key needs to be in the same region as where the compute resource is created.
evaluation_display_name: The display name of the uploaded evaluation resource to the Vertex AI model.
force_runner_mode: Indicate the runner mode to use forcely. Valid options are `Dataflow` and `DirectRunner`.
"""
# fmt: on
with kfp.dsl.Condition(
prediction_type == 'classification', name='classification'
):
evaluation_automl_unstructure_data_classification_pipeline(
project=project,
location=location,
model_name=model_name,
target_field_name=target_field_name,
batch_predict_instances_format=batch_predict_instances_format,
batch_predict_gcs_destination_output_uri=batch_predict_gcs_destination_output_uri,
batch_predict_gcs_source_uris=batch_predict_gcs_source_uris,
batch_predict_bigquery_source_uri=batch_predict_bigquery_source_uri,
batch_predict_predictions_format=batch_predict_predictions_format,
batch_predict_bigquery_destination_output_uri=batch_predict_bigquery_destination_output_uri,
batch_predict_machine_type=batch_predict_machine_type,
batch_predict_starting_replica_count=batch_predict_starting_replica_count,
batch_predict_max_replica_count=batch_predict_max_replica_count,
batch_predict_accelerator_type=batch_predict_accelerator_type,
batch_predict_accelerator_count=batch_predict_accelerator_count,
slicing_specs=slicing_specs,
evaluation_prediction_label_column=evaluation_prediction_label_column,
evaluation_prediction_score_column=evaluation_prediction_score_column,
evaluation_class_labels=evaluation_class_labels,
dataflow_machine_type=dataflow_machine_type,
dataflow_max_num_workers=dataflow_max_num_workers,
dataflow_disk_size_gb=dataflow_disk_size_gb,
dataflow_service_account=dataflow_service_account,
dataflow_subnetwork=dataflow_subnetwork,
dataflow_use_public_ips=dataflow_use_public_ips,
encryption_spec_key_name=encryption_spec_key_name,
evaluation_display_name=evaluation_display_name,
force_runner_mode=force_runner_mode,
)
with kfp.dsl.Condition(prediction_type == 'regression', name='regression'):
evaluation_automl_unstructure_data_regression_pipeline(
project=project,
location=location,
model_name=model_name,
target_field_name=target_field_name,
batch_predict_instances_format=batch_predict_instances_format,
batch_predict_gcs_destination_output_uri=batch_predict_gcs_destination_output_uri,
batch_predict_gcs_source_uris=batch_predict_gcs_source_uris,
batch_predict_bigquery_source_uri=batch_predict_bigquery_source_uri,
batch_predict_predictions_format=batch_predict_predictions_format,
batch_predict_bigquery_destination_output_uri=batch_predict_bigquery_destination_output_uri,
batch_predict_machine_type=batch_predict_machine_type,
batch_predict_starting_replica_count=batch_predict_starting_replica_count,
batch_predict_max_replica_count=batch_predict_max_replica_count,
batch_predict_accelerator_type=batch_predict_accelerator_type,
batch_predict_accelerator_count=batch_predict_accelerator_count,
evaluation_prediction_score_column=evaluation_prediction_score_column,
dataflow_machine_type=dataflow_machine_type,
dataflow_max_num_workers=dataflow_max_num_workers,
dataflow_disk_size_gb=dataflow_disk_size_gb,
dataflow_service_account=dataflow_service_account,
dataflow_subnetwork=dataflow_subnetwork,
dataflow_use_public_ips=dataflow_use_public_ips,
encryption_spec_key_name=encryption_spec_key_name,
evaluation_display_name=evaluation_display_name,
force_runner_mode=force_runner_mode,
)
| 843 |
0 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/model_evaluation/evaluation_automl_tabular_pipeline.py | # Copyright 2024 The Kubeflow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, List, NamedTuple
from google_cloud_pipeline_components import _placeholders
from google_cloud_pipeline_components._implementation.model import GetVertexModelOp
from google_cloud_pipeline_components.preview.model_evaluation.model_evaluation_import_component import model_evaluation_import as ModelImportEvaluationOp
from google_cloud_pipeline_components.types.artifact_types import ClassificationMetrics
from google_cloud_pipeline_components.types.artifact_types import RegressionMetrics
from google_cloud_pipeline_components.v1.batch_predict_job import ModelBatchPredictOp
from google_cloud_pipeline_components.v1.model_evaluation.classification_component import model_evaluation_classification as ModelEvaluationClassificationOp
from google_cloud_pipeline_components.v1.model_evaluation.regression_component import model_evaluation_regression as ModelEvaluationRegressionOp
import kfp
@kfp.dsl.pipeline(name='evaluation-automl-tabular-classification-pipeline')
def evaluation_automl_tabular_classification_pipeline( # pylint: disable=dangerous-default-value
location: str,
model_name: str,
target_field_name: str,
batch_predict_instances_format: str,
batch_predict_gcs_destination_output_uri: str,
batch_predict_gcs_source_uris: List[str] = [], # pylint: disable=g-bare-generic
batch_predict_bigquery_source_uri: str = '',
batch_predict_predictions_format: str = 'jsonl',
batch_predict_bigquery_destination_output_uri: str = '',
batch_predict_machine_type: str = 'n1-standard-16',
batch_predict_starting_replica_count: int = 5,
batch_predict_max_replica_count: int = 10,
batch_predict_accelerator_type: str = '',
batch_predict_accelerator_count: int = 0,
slicing_specs: List[Any] = [], # pylint: disable=g-bare-generic
evaluation_display_name: str = 'evaluation-automl-tabular-pipeline-{{$.pipeline_job_uuid}}',
dataflow_machine_type: str = 'n1-standard-4',
dataflow_max_num_workers: int = 5,
dataflow_disk_size_gb: int = 50,
dataflow_service_account: str = '',
dataflow_subnetwork: str = '',
dataflow_use_public_ips: bool = True,
encryption_spec_key_name: str = '',
force_runner_mode: str = '',
project: str = _placeholders.PROJECT_ID_PLACEHOLDER,
) -> NamedTuple(
'outputs',
evaluation_metrics=ClassificationMetrics,
evaluation_resource_name=str,
):
# fmt: off
"""The evaluation AutoML tabular pipeline with no feature attribution for.
classification models.
This pipeline guarantees support for AutoML Tabular models. This pipeline does
not include the target_field_data_remover component, which is needed for many
tabular custom models.
Args:
location: The GCP region that runs the pipeline components.
model_name: The Vertex model resource name to be imported and used for batch prediction.
target_field_name: The target field's name. Formatted to be able to find nested columns, delimited by `.`. Prefixed with 'instance.' on the component for Vertex Batch Prediction.
batch_predict_instances_format: The format in which instances are given, must be one of the Model's supportedInputStorageFormats. For more details about this input config, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#InputConfig.
batch_predict_gcs_destination_output_uri: The Google Cloud Storage location of the directory where the output is to be written to. In the given directory a new directory is created. Its name is `prediction-<model-display-name>-<job-create-time>`, where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format. Inside of it files `predictions_0001.<extension>`, `predictions_0002.<extension>`, ..., `predictions_N.<extension>` are created where `<extension>` depends on chosen `predictions_format`, and N may equal 0001 and depends on the total number of successfully predicted instances. If the Model has both `instance` and `prediction` schemata defined then each such file contains predictions as per the `predictions_format`. If prediction for any instance failed (partially or completely), then an additional `errors_0001.<extension>`, `errors_0002.<extension>`,..., `errors_N.<extension>` files are created (N depends on total number of failed predictions). These files contain the failed instances, as per their schema, followed by an additional `error` field which as value has `google.rpc.Status` containing only `code` and `message` fields. For more details about this output config, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#OutputConfig.
batch_predict_gcs_source_uris: Google Cloud Storage URI(-s) to your instances to run batch prediction on. May contain wildcards. For more information on wildcards, see https://cloud.google.com/storage/docs/gsutil/addlhelp/WildcardNames. For more details about this input config, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#InputConfig.
batch_predict_bigquery_source_uri: Google BigQuery URI to your instances to run batch prediction on. May contain wildcards. For more details about this input config, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#InputConfig.
batch_predict_predictions_format: The format in which Vertex AI gives the predictions. Must be one of the Model's supportedOutputStorageFormats. For more details about this output config, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#OutputConfig.
batch_predict_bigquery_destination_output_uri: The BigQuery project location where the output is to be written to. In the given project a new dataset is created with name `prediction_<model-display-name>_<job-create-time>` where is made BigQuery-dataset-name compatible (for example, most special characters become underscores), and timestamp is in YYYY_MM_DDThh_mm_ss_sssZ "based on ISO-8601" format. In the dataset two tables will be created, `predictions`, and `errors`. If the Model has both `instance` and `prediction` schemata defined then the tables have columns as follows: The `predictions` table contains instances for which the prediction succeeded, it has columns as per a concatenation of the Model's instance and prediction schemata. The `errors` table contains rows for which the prediction has failed, it has instance columns, as per the instance schema, followed by a single "errors" column, which as values has `google.rpc.Status` represented as a STRUCT, and containing only `code` and `message`. For more details about this output config, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#OutputConfig.
batch_predict_machine_type: The type of machine for running batch prediction on dedicated resources. If the Model supports DEDICATED_RESOURCES this config may be provided (and the job will use these resources). If the Model doesn't support AUTOMATIC_RESOURCES, this config must be provided. For more details about the BatchDedicatedResources, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#BatchDedicatedResources. For more details about the machine spec, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/MachineSpec
batch_predict_starting_replica_count: The number of machine replicas used at the start of the batch operation. If not set, Vertex AI decides starting number, not greater than `max_replica_count`. Only used if `machine_type` is set.
batch_predict_max_replica_count: The maximum number of machine replicas the batch operation may be scaled to. Only used if `machine_type` is set.
batch_predict_accelerator_type: The type of accelerator(s) that may be attached to the machine as per `batch_predict_accelerator_count`. Only used if `batch_predict_machine_type` is set. For more details about the machine spec, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/MachineSpec
batch_predict_accelerator_count: The number of accelerators to attach to the `batch_predict_machine_type`. Only used if `batch_predict_machine_type` is set.
slicing_specs: List of `google.cloud.aiplatform_v1.types.ModelEvaluationSlice.SlicingSpec`. When provided, compute metrics for each defined slice. See sample code in https://cloud.google.com/vertex-ai/docs/pipelines/model-evaluation-component For more details on configuring slices, see https://cloud.google.com/python/docs/reference/aiplatform/latest/google.cloud.aiplatform_v1.types.ModelEvaluationSlice.
dataflow_machine_type: The Dataflow machine type for evaluation components.
dataflow_max_num_workers: The max number of Dataflow workers for evaluation components.
dataflow_disk_size_gb: Dataflow worker's disk size in GB for evaluation components.
dataflow_service_account: Custom service account to run Dataflow jobs.
dataflow_subnetwork: Dataflow's fully qualified subnetwork name, when empty the default subnetwork will be used. Example: https://cloud.google.com/dataflow/docs/guides/specifying-networks#example_network_and_subnetwork_specifications
dataflow_use_public_ips: Specifies whether Dataflow workers use public IP addresses.
encryption_spec_key_name: Customer-managed encryption key options. If set, resources created by this pipeline will be encrypted with the provided encryption key. Has the form: `projects/my-project/locations/my-location/keyRings/my-kr/cryptoKeys/my-key`. The key needs to be in the same region as where the compute resource is created.
evaluation_display_name: The display name of the uploaded evaluation resource to the Vertex AI model.
force_runner_mode: Indicate the runner mode to use forcely. Valid options are `Dataflow` and `DirectRunner`.
project: The GCP project that runs the pipeline components. Defaults to the project in which the PipelineJob is run.
Returns:
A google.ClassificationMetrics artifact and imported evaluation_resource_name.
"""
# fmt: on
outputs = NamedTuple(
'outputs',
evaluation_metrics=ClassificationMetrics,
evaluation_resource_name=str,
)
# Get the Vertex AI Model.
get_model_task = GetVertexModelOp(model_name=model_name)
# Run Vertex AI Batch Prediction.
batch_predict_task = ModelBatchPredictOp(
project=project,
location=location,
model=get_model_task.outputs['model'],
job_display_name='evaluation-batch-predict-{{$.pipeline_job_uuid}}-{{$.pipeline_task_uuid}}',
gcs_source_uris=batch_predict_gcs_source_uris,
bigquery_source_input_uri=batch_predict_bigquery_source_uri,
instances_format=batch_predict_instances_format,
predictions_format=batch_predict_predictions_format,
gcs_destination_output_uri_prefix=batch_predict_gcs_destination_output_uri,
bigquery_destination_output_uri=batch_predict_bigquery_destination_output_uri,
machine_type=batch_predict_machine_type,
starting_replica_count=batch_predict_starting_replica_count,
max_replica_count=batch_predict_max_replica_count,
encryption_spec_key_name=encryption_spec_key_name,
accelerator_type=batch_predict_accelerator_type,
accelerator_count=batch_predict_accelerator_count,
)
# Run evaluation for a classification model.
eval_task = ModelEvaluationClassificationOp(
project=project,
location=location,
target_field_name=target_field_name,
predictions_format=batch_predict_predictions_format,
predictions_gcs_source=batch_predict_task.outputs['gcs_output_directory'],
predictions_bigquery_source=batch_predict_task.outputs[
'bigquery_output_table'
],
dataflow_machine_type=dataflow_machine_type,
dataflow_max_workers_num=dataflow_max_num_workers,
dataflow_disk_size_gb=dataflow_disk_size_gb,
dataflow_service_account=dataflow_service_account,
dataflow_subnetwork=dataflow_subnetwork,
dataflow_use_public_ips=dataflow_use_public_ips,
encryption_spec_key_name=encryption_spec_key_name,
force_runner_mode=force_runner_mode,
model=get_model_task.outputs['model'],
slicing_specs=slicing_specs,
)
# Import the evaluation result to Vertex AI.
import_evaluation_task = ModelImportEvaluationOp(
classification_metrics=eval_task.outputs['evaluation_metrics'],
model=get_model_task.outputs['model'],
dataset_type=batch_predict_instances_format,
dataset_path=batch_predict_bigquery_source_uri,
dataset_paths=batch_predict_gcs_source_uris,
display_name=evaluation_display_name,
)
return outputs(
evaluation_metrics=eval_task.outputs['evaluation_metrics'],
evaluation_resource_name=import_evaluation_task.outputs[
'evaluation_resource_name'
],
)
@kfp.dsl.pipeline(name='evaluation-automl-tabular-regression-pipeline')
def evaluation_automl_tabular_regression_pipeline( # pylint: disable=dangerous-default-value
project: str,
location: str,
model_name: str,
target_field_name: str,
batch_predict_instances_format: str,
batch_predict_gcs_destination_output_uri: str,
batch_predict_gcs_source_uris: List[str] = [], # pylint: disable=g-bare-generic
batch_predict_bigquery_source_uri: str = '',
batch_predict_predictions_format: str = 'jsonl',
batch_predict_bigquery_destination_output_uri: str = '',
batch_predict_machine_type: str = 'n1-standard-16',
batch_predict_starting_replica_count: int = 5,
batch_predict_max_replica_count: int = 10,
batch_predict_accelerator_type: str = '',
batch_predict_accelerator_count: int = 0,
dataflow_machine_type: str = 'n1-standard-4',
dataflow_max_num_workers: int = 5,
dataflow_disk_size_gb: int = 50,
dataflow_service_account: str = '',
dataflow_subnetwork: str = '',
dataflow_use_public_ips: bool = True,
encryption_spec_key_name: str = '',
evaluation_display_name: str = 'evaluation-automl-tabular-pipeline-{{$.pipeline_job_uuid}}',
force_runner_mode: str = '',
) -> NamedTuple(
'outputs',
evaluation_metrics=RegressionMetrics,
evaluation_resource_name=str,
):
# fmt: off
"""The evaluation AutoML tabular pipeline with no feature attribution for regression models.
This pipeline guarantees support for AutoML Tabular models. This pipeline does not include the target_field_data_remover component, which is needed for many tabular custom models.
Args:
project: The GCP project that runs the pipeline components.
location: The GCP region that runs the pipeline components.
model_name: The Vertex model resource name to be imported and used for batch prediction.
target_field_name: The target field's name. Formatted to be able to find nested columns, delimited by `.`. Prefixed with 'instance.' on the component for Vertex Batch Prediction.
batch_predict_instances_format: The format in which instances are given, must be one of the Model's supportedInputStorageFormats. For more details about this input config, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#InputConfig.
batch_predict_gcs_destination_output_uri: The Google Cloud Storage location of the directory where the output is to be written to. In the given directory a new directory is created. Its name is `prediction-<model-display-name>-<job-create-time>`, where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format. Inside of it files `predictions_0001.<extension>`, `predictions_0002.<extension>`, ..., `predictions_N.<extension>` are created where `<extension>` depends on chosen `predictions_format`, and N may equal 0001 and depends on the total number of successfully predicted instances. If the Model has both `instance` and `prediction` schemata defined then each such file contains predictions as per the `predictions_format`. If prediction for any instance failed (partially or completely), then an additional `errors_0001.<extension>`, `errors_0002.<extension>`,..., `errors_N.<extension>` files are created (N depends on total number of failed predictions). These files contain the failed instances, as per their schema, followed by an additional `error` field which as value has `google.rpc.Status` containing only `code` and `message` fields. For more details about this output config, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#OutputConfig.
batch_predict_gcs_source_uris: Google Cloud Storage URI(-s) to your instances to run batch prediction on. May contain wildcards. For more information on wildcards, see https://cloud.google.com/storage/docs/gsutil/addlhelp/WildcardNames. For more details about this input config, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#InputConfig.
batch_predict_bigquery_source_uri: Google BigQuery URI to your instances to run batch prediction on. May contain wildcards. For more details about this input config, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#InputConfig.
batch_predict_predictions_format: The format in which Vertex AI gives the predictions. Must be one of the Model's supportedOutputStorageFormats. For more details about this output config, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#OutputConfig.
batch_predict_bigquery_destination_output_uri: The BigQuery project location where the output is to be written to. In the given project a new dataset is created with name `prediction_<model-display-name>_<job-create-time>` where is made BigQuery-dataset-name compatible (for example, most special characters become underscores), and timestamp is in YYYY_MM_DDThh_mm_ss_sssZ "based on ISO-8601" format. In the dataset two tables will be created, `predictions`, and `errors`. If the Model has both `instance` and `prediction` schemata defined then the tables have columns as follows: The `predictions` table contains instances for which the prediction succeeded, it has columns as per a concatenation of the Model's instance and prediction schemata. The `errors` table contains rows for which the prediction has failed, it has instance columns, as per the instance schema, followed by a single "errors" column, which as values has `google.rpc.Status` represented as a STRUCT, and containing only `code` and `message`. For more details about this output config, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#OutputConfig.
batch_predict_machine_type: The type of machine for running batch prediction on dedicated resources. If the Model supports DEDICATED_RESOURCES this config may be provided (and the job will use these resources). If the Model doesn't support AUTOMATIC_RESOURCES, this config must be provided. For more details about the BatchDedicatedResources, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#BatchDedicatedResources. For more details about the machine spec, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/MachineSpec
batch_predict_starting_replica_count: The number of machine replicas used at the start of the batch operation. If not set, Vertex AI decides starting number, not greater than `max_replica_count`. Only used if `machine_type` is set.
batch_predict_max_replica_count: The maximum number of machine replicas the batch operation may be scaled to. Only used if `machine_type` is set.
batch_predict_accelerator_type: The type of accelerator(s) that may be attached to the machine as per `batch_predict_accelerator_count`. Only used if `batch_predict_machine_type` is set. For more details about the machine spec, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/MachineSpec
batch_predict_accelerator_count: The number of accelerators to attach to the `batch_predict_machine_type`. Only used if `batch_predict_machine_type` is set.
dataflow_machine_type: The Dataflow machine type for evaluation components.
dataflow_max_num_workers: The max number of Dataflow workers for evaluation components.
dataflow_disk_size_gb: Dataflow worker's disk size in GB for evaluation components.
dataflow_service_account: Custom service account to run Dataflow jobs.
dataflow_subnetwork: Dataflow's fully qualified subnetwork name, when empty the default subnetwork will be used. Example: https://cloud.google.com/dataflow/docs/guides/specifying-networks#example_network_and_subnetwork_specifications
dataflow_use_public_ips: Specifies whether Dataflow workers use public IP addresses.
encryption_spec_key_name: Customer-managed encryption key options. If set, resources created by this pipeline will be encrypted with the provided encryption key. Has the form: `projects/my-project/locations/my-location/keyRings/my-kr/cryptoKeys/my-key`. The key needs to be in the same region as where the compute resource is created.
evaluation_display_name: The display name of the uploaded evaluation resource to the Vertex AI model.
force_runner_mode: Indicate the runner mode to use forcely. Valid options are `Dataflow` and `DirectRunner`.
Returns:
A google.RegressionMetrics artifact and imported evaluation_resource_name.
"""
# fmt: on
outputs = NamedTuple(
'outputs',
evaluation_metrics=RegressionMetrics,
evaluation_resource_name=str,
)
# Get the Vertex AI Model.
get_model_task = GetVertexModelOp(model_name=model_name)
# Run Vertex AI Batch Prediction.
batch_predict_task = ModelBatchPredictOp(
project=project,
location=location,
model=get_model_task.outputs['model'],
job_display_name='evaluation-batch-predict-{{$.pipeline_job_uuid}}-{{$.pipeline_task_uuid}}',
gcs_source_uris=batch_predict_gcs_source_uris,
bigquery_source_input_uri=batch_predict_bigquery_source_uri,
instances_format=batch_predict_instances_format,
predictions_format=batch_predict_predictions_format,
gcs_destination_output_uri_prefix=batch_predict_gcs_destination_output_uri,
bigquery_destination_output_uri=batch_predict_bigquery_destination_output_uri,
machine_type=batch_predict_machine_type,
starting_replica_count=batch_predict_starting_replica_count,
max_replica_count=batch_predict_max_replica_count,
encryption_spec_key_name=encryption_spec_key_name,
accelerator_type=batch_predict_accelerator_type,
accelerator_count=batch_predict_accelerator_count,
)
# Run evaluation for a regression model.
eval_task = ModelEvaluationRegressionOp(
project=project,
location=location,
target_field_name=target_field_name,
predictions_format=batch_predict_predictions_format,
predictions_gcs_source=batch_predict_task.outputs['gcs_output_directory'],
predictions_bigquery_source=batch_predict_task.outputs[
'bigquery_output_table'
],
dataflow_machine_type=dataflow_machine_type,
dataflow_max_workers_num=dataflow_max_num_workers,
dataflow_disk_size_gb=dataflow_disk_size_gb,
dataflow_service_account=dataflow_service_account,
dataflow_subnetwork=dataflow_subnetwork,
dataflow_use_public_ips=dataflow_use_public_ips,
encryption_spec_key_name=encryption_spec_key_name,
force_runner_mode=force_runner_mode,
model=get_model_task.outputs['model'],
)
# Import the evaluation result to Vertex AI.
import_evaluation_task = ModelImportEvaluationOp(
regression_metrics=eval_task.outputs['evaluation_metrics'],
model=get_model_task.outputs['model'],
dataset_type=batch_predict_instances_format,
dataset_path=batch_predict_bigquery_source_uri,
dataset_paths=batch_predict_gcs_source_uris,
display_name=evaluation_display_name,
)
return outputs(
evaluation_metrics=eval_task.outputs['evaluation_metrics'],
evaluation_resource_name=import_evaluation_task.outputs[
'evaluation_resource_name'
],
)
@kfp.dsl.pipeline(name='evaluation-automl-tabular-pipeline')
def evaluation_automl_tabular_pipeline( # pylint: disable=dangerous-default-value
project: str,
location: str,
prediction_type: str,
model_name: str,
target_field_name: str,
batch_predict_instances_format: str,
batch_predict_gcs_destination_output_uri: str,
batch_predict_gcs_source_uris: List[str] = [], # pylint: disable=g-bare-generic
batch_predict_bigquery_source_uri: str = '',
batch_predict_predictions_format: str = 'jsonl',
batch_predict_bigquery_destination_output_uri: str = '',
batch_predict_machine_type: str = 'n1-standard-16',
batch_predict_starting_replica_count: int = 5,
batch_predict_max_replica_count: int = 10,
batch_predict_accelerator_type: str = '',
batch_predict_accelerator_count: int = 0,
slicing_specs: List[Any] = [], # pylint: disable=g-bare-generic
dataflow_machine_type: str = 'n1-standard-4',
dataflow_max_num_workers: int = 5,
dataflow_disk_size_gb: int = 50,
dataflow_service_account: str = '',
dataflow_subnetwork: str = '',
dataflow_use_public_ips: bool = True,
encryption_spec_key_name: str = '',
evaluation_display_name: str = 'evaluation-automl-tabular-pipeline-{{$.pipeline_job_uuid}}',
force_runner_mode: str = '',
):
# fmt: off
"""The evaluation AutoML tabular pipeline with no feature attribution.
This pipeline guarantees support for AutoML Tabular classification and regression models. This pipeline does not include the target_field_data_remover component, which is needed for many tabular custom models and AutoML Tabular Forecasting.
Args:
project: The GCP project that runs the pipeline components.
location: The GCP region that runs the pipeline components.
prediction_type: The type of prediction the model is to produce. "classification" or "regression".
model_name: The Vertex model resource name to be imported and used for batch prediction.
target_field_name: The target field's name. Formatted to be able to find nested columns, delimited by `.`. Prefixed with 'instance.' on the component for Vertex Batch Prediction.
batch_predict_instances_format: The format in which instances are given, must be one of the Model's supportedInputStorageFormats. For more details about this input config, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#InputConfig.
batch_predict_gcs_destination_output_uri: The Google Cloud Storage location of the directory where the output is to be written to. In the given directory a new directory is created. Its name is `prediction-<model-display-name>-<job-create-time>`, where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format. Inside of it files `predictions_0001.<extension>`, `predictions_0002.<extension>`, ..., `predictions_N.<extension>` are created where `<extension>` depends on chosen `predictions_format`, and N may equal 0001 and depends on the total number of successfully predicted instances. If the Model has both `instance` and `prediction` schemata defined then each such file contains predictions as per the `predictions_format`. If prediction for any instance failed (partially or completely), then an additional `errors_0001.<extension>`, `errors_0002.<extension>`,..., `errors_N.<extension>` files are created (N depends on total number of failed predictions). These files contain the failed instances, as per their schema, followed by an additional `error` field which as value has `google.rpc.Status` containing only `code` and `message` fields. For more details about this output config, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#OutputConfig.
batch_predict_gcs_source_uris: Google Cloud Storage URI(-s) to your instances to run batch prediction on. May contain wildcards. For more information on wildcards, see https://cloud.google.com/storage/docs/gsutil/addlhelp/WildcardNames. For more details about this input config, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#InputConfig.
batch_predict_bigquery_source_uri: Google BigQuery URI to your instances to run batch prediction on. May contain wildcards. For more details about this input config, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#InputConfig.
batch_predict_predictions_format: The format in which Vertex AI gives the predictions. Must be one of the Model's supportedOutputStorageFormats. For more details about this output config, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#OutputConfig.
batch_predict_bigquery_destination_output_uri: The BigQuery project location where the output is to be written to. In the given project a new dataset is created with name `prediction_<model-display-name>_<job-create-time>` where is made BigQuery-dataset-name compatible (for example, most special characters become underscores), and timestamp is in YYYY_MM_DDThh_mm_ss_sssZ "based on ISO-8601" format. In the dataset two tables will be created, `predictions`, and `errors`. If the Model has both `instance` and `prediction` schemata defined then the tables have columns as follows: The `predictions` table contains instances for which the prediction succeeded, it has columns as per a concatenation of the Model's instance and prediction schemata. The `errors` table contains rows for which the prediction has failed, it has instance columns, as per the instance schema, followed by a single "errors" column, which as values has `google.rpc.Status` represented as a STRUCT, and containing only `code` and `message`. For more details about this output config, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#OutputConfig.
batch_predict_machine_type: The type of machine for running batch prediction on dedicated resources. If the Model supports DEDICATED_RESOURCES this config may be provided (and the job will use these resources). If the Model doesn't support AUTOMATIC_RESOURCES, this config must be provided. For more details about the BatchDedicatedResources, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#BatchDedicatedResources. For more details about the machine spec, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/MachineSpec
batch_predict_starting_replica_count: The number of machine replicas used at the start of the batch operation. If not set, Vertex AI decides starting number, not greater than `max_replica_count`. Only used if `machine_type` is set.
batch_predict_max_replica_count: The maximum number of machine replicas the batch operation may be scaled to. Only used if `machine_type` is set.
batch_predict_accelerator_type: The type of accelerator(s) that may be attached to the machine as per `batch_predict_accelerator_count`. Only used if `batch_predict_machine_type` is set. For more details about the machine spec, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/MachineSpec
batch_predict_accelerator_count: The number of accelerators to attach to the `batch_predict_machine_type`. Only used if `batch_predict_machine_type` is set.
slicing_specs: List of `google.cloud.aiplatform_v1.types.ModelEvaluationSlice.SlicingSpec`. When provided, compute metrics for each defined slice. See sample code in https://cloud.google.com/vertex-ai/docs/pipelines/model-evaluation-component For more details on configuring slices, see https://cloud.google.com/python/docs/reference/aiplatform/latest/google.cloud.aiplatform_v1.types.ModelEvaluationSlice.
dataflow_machine_type: The Dataflow machine type for evaluation components.
dataflow_max_num_workers: The max number of Dataflow workers for evaluation components.
dataflow_disk_size_gb: Dataflow worker's disk size in GB for evaluation components.
dataflow_service_account: Custom service account to run Dataflow jobs.
dataflow_subnetwork: Dataflow's fully qualified subnetwork name, when empty the default subnetwork will be used. Example: https://cloud.google.com/dataflow/docs/guides/specifying-networks#example_network_and_subnetwork_specifications
dataflow_use_public_ips: Specifies whether Dataflow workers use public IP addresses.
encryption_spec_key_name: Customer-managed encryption key options. If set, resources created by this pipeline will be encrypted with the provided encryption key. Has the form: `projects/my-project/locations/my-location/keyRings/my-kr/cryptoKeys/my-key`. The key needs to be in the same region as where the compute resource is created.
evaluation_display_name: The display name of the uploaded evaluation resource to the Vertex AI model.
force_runner_mode: Indicate the runner mode to use forcely. Valid options are `Dataflow` and `DirectRunner`.
"""
# fmt: on
with kfp.dsl.Condition(
prediction_type == 'classification', name='classification'
):
evaluation_automl_tabular_classification_pipeline(
project=project,
location=location,
model_name=model_name,
target_field_name=target_field_name,
batch_predict_instances_format=batch_predict_instances_format,
batch_predict_gcs_destination_output_uri=batch_predict_gcs_destination_output_uri,
batch_predict_gcs_source_uris=batch_predict_gcs_source_uris,
batch_predict_bigquery_source_uri=batch_predict_bigquery_source_uri,
batch_predict_predictions_format=batch_predict_predictions_format,
batch_predict_bigquery_destination_output_uri=batch_predict_bigquery_destination_output_uri,
batch_predict_machine_type=batch_predict_machine_type,
batch_predict_starting_replica_count=batch_predict_starting_replica_count,
batch_predict_max_replica_count=batch_predict_max_replica_count,
batch_predict_accelerator_type=batch_predict_accelerator_type,
batch_predict_accelerator_count=batch_predict_accelerator_count,
slicing_specs=slicing_specs,
dataflow_machine_type=dataflow_machine_type,
dataflow_max_num_workers=dataflow_max_num_workers,
dataflow_disk_size_gb=dataflow_disk_size_gb,
dataflow_service_account=dataflow_service_account,
dataflow_subnetwork=dataflow_subnetwork,
dataflow_use_public_ips=dataflow_use_public_ips,
encryption_spec_key_name=encryption_spec_key_name,
evaluation_display_name=evaluation_display_name,
force_runner_mode=force_runner_mode,
)
with kfp.dsl.Condition(prediction_type == 'regression', name='regression'):
evaluation_automl_tabular_regression_pipeline(
project=project,
location=location,
model_name=model_name,
target_field_name=target_field_name,
batch_predict_instances_format=batch_predict_instances_format,
batch_predict_gcs_destination_output_uri=batch_predict_gcs_destination_output_uri,
batch_predict_gcs_source_uris=batch_predict_gcs_source_uris,
batch_predict_bigquery_source_uri=batch_predict_bigquery_source_uri,
batch_predict_predictions_format=batch_predict_predictions_format,
batch_predict_bigquery_destination_output_uri=batch_predict_bigquery_destination_output_uri,
batch_predict_machine_type=batch_predict_machine_type,
batch_predict_starting_replica_count=batch_predict_starting_replica_count,
batch_predict_max_replica_count=batch_predict_max_replica_count,
batch_predict_accelerator_type=batch_predict_accelerator_type,
batch_predict_accelerator_count=batch_predict_accelerator_count,
dataflow_machine_type=dataflow_machine_type,
dataflow_max_num_workers=dataflow_max_num_workers,
dataflow_disk_size_gb=dataflow_disk_size_gb,
dataflow_service_account=dataflow_service_account,
dataflow_subnetwork=dataflow_subnetwork,
dataflow_use_public_ips=dataflow_use_public_ips,
encryption_spec_key_name=encryption_spec_key_name,
evaluation_display_name=evaluation_display_name,
force_runner_mode=force_runner_mode,
)
| 844 |
0 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/model_evaluation/evaluation_feature_attribution_pipeline.py | # Copyright 2024 The Kubeflow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Dict, List, NamedTuple
from google_cloud_pipeline_components import _placeholders
from google_cloud_pipeline_components._implementation.model import GetVertexModelOp
from google_cloud_pipeline_components._implementation.model_evaluation import FeatureAttributionGraphComponentOp
from google_cloud_pipeline_components._implementation.model_evaluation import TargetFieldDataRemoverOp
from google_cloud_pipeline_components.preview.model_evaluation.model_evaluation_import_component import model_evaluation_import as ModelImportEvaluationOp
from google_cloud_pipeline_components.types.artifact_types import ClassificationMetrics
from google_cloud_pipeline_components.types.artifact_types import RegressionMetrics
from google_cloud_pipeline_components.v1.batch_predict_job import ModelBatchPredictOp
from google_cloud_pipeline_components.v1.model_evaluation.classification_component import model_evaluation_classification as ModelEvaluationClassificationOp
from google_cloud_pipeline_components.v1.model_evaluation.regression_component import model_evaluation_regression as ModelEvaluationRegressionOp
import kfp
@kfp.dsl.pipeline(name='evaluation-feature-attribution-classification-pipeline')
def evaluation_feature_attribution_classification_pipeline( # pylint: disable=dangerous-default-value
location: str,
model_name: str,
target_field_name: str,
batch_predict_instances_format: str,
batch_predict_gcs_destination_output_uri: str,
batch_predict_gcs_source_uris: List[str] = [], # pylint: disable=g-bare-generic
batch_predict_bigquery_source_uri: str = '',
batch_predict_predictions_format: str = 'jsonl',
batch_predict_bigquery_destination_output_uri: str = '',
batch_predict_machine_type: str = 'n1-standard-16',
batch_predict_starting_replica_count: int = 5,
batch_predict_max_replica_count: int = 10,
batch_predict_explanation_metadata: Dict[str, Any] = {}, # pylint: disable=g-bare-generic
batch_predict_explanation_parameters: Dict[str, Any] = {}, # pylint: disable=g-bare-generic
batch_predict_explanation_data_sample_size: int = 10000,
batch_predict_accelerator_type: str = '',
batch_predict_accelerator_count: int = 0,
slicing_specs: List[Any] = [], # pylint: disable=g-bare-generic
evaluation_prediction_label_column: str = '',
evaluation_prediction_score_column: str = '',
evaluation_class_labels: List[str] = [], # pylint: disable=g-bare-generic
dataflow_machine_type: str = 'n1-standard-4',
dataflow_max_num_workers: int = 5,
dataflow_disk_size_gb: int = 50,
dataflow_service_account: str = '',
dataflow_subnetwork: str = '',
dataflow_use_public_ips: bool = True,
encryption_spec_key_name: str = '',
evaluation_display_name: str = 'evaluation-feature-attribution-pipeline-{{$.pipeline_job_uuid}}',
force_runner_mode: str = '',
project: str = _placeholders.PROJECT_ID_PLACEHOLDER,
) -> NamedTuple(
'outputs',
evaluation_metrics=ClassificationMetrics,
evaluation_resource_name=str,
):
# fmt: off
"""The evaluation custom tabular pipeline with feature attribution for classification models.
This pipeline gives support for custom models that contain a valid explanation_spec. This pipeline includes the target_field_data_remover component, which is needed for many tabular custom models.
Args:
location: The GCP region that runs the pipeline components.
model_name: The Vertex model resource name to be imported and used for batch prediction.
target_field_name: The target field's name. Formatted to be able to find nested columns, delimited by `.`. Prefixed with 'instance.' on the component for Vertex Batch Prediction.
batch_predict_instances_format: The format in which instances are given, must be one of the Model's supportedInputStorageFormats. For more details about this input config, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#InputConfig.
batch_predict_gcs_destination_output_uri: The Google Cloud Storage location of the directory where the output is to be written to. In the given directory a new directory is created. Its name is `prediction-<model-display-name>-<job-create-time>`, where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format. Inside of it files `predictions_0001.<extension>`, `predictions_0002.<extension>`, ..., `predictions_N.<extension>` are created where `<extension>` depends on chosen `predictions_format`, and N may equal 0001 and depends on the total number of successfully predicted instances. If the Model has both `instance` and `prediction` schemata defined then each such file contains predictions as per the `predictions_format`. If prediction for any instance failed (partially or completely), then an additional `errors_0001.<extension>`, `errors_0002.<extension>`,..., `errors_N.<extension>` files are created (N depends on total number of failed predictions). These files contain the failed instances, as per their schema, followed by an additional `error` field which as value has `google.rpc.Status` containing only `code` and `message` fields. For more details about this output config, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#OutputConfig.
batch_predict_gcs_source_uris: Google Cloud Storage URI(-s) to your instances data to run batch prediction on. The instances data should also contain the ground truth (target) data, used for evaluation. May contain wildcards. For more information on wildcards, see https://cloud.google.com/storage/docs/gsutil/addlhelp/WildcardNames. For more details about this input config, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#InputConfig.
batch_predict_bigquery_source_uri: Google BigQuery URI to your instances to run batch prediction on. May contain wildcards. For more details about this input config, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#InputConfig.
batch_predict_predictions_format: The format in which Vertex AI gives the predictions. Must be one of the Model's supportedOutputStorageFormats. For more details about this output config, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#OutputConfig.
batch_predict_bigquery_destination_output_uri: The BigQuery project location where the output is to be written to. In the given project a new dataset is created with name `prediction_<model-display-name>_<job-create-time>` where is made BigQuery-dataset-name compatible (for example, most special characters become underscores), and timestamp is in YYYY_MM_DDThh_mm_ss_sssZ "based on ISO-8601" format. In the dataset two tables will be created, `predictions`, and `errors`. If the Model has both `instance` and `prediction` schemata defined then the tables have columns as follows: The `predictions` table contains instances for which the prediction succeeded, it has columns as per a concatenation of the Model's instance and prediction schemata. The `errors` table contains rows for which the prediction has failed, it has instance columns, as per the instance schema, followed by a single "errors" column, which as values has `google.rpc.Status` represented as a STRUCT, and containing only `code` and `message`. For more details about this output config, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#OutputConfig.
batch_predict_machine_type: The type of machine for running batch prediction on dedicated resources. If the Model supports DEDICATED_RESOURCES this config may be provided (and the job will use these resources). If the Model doesn't support AUTOMATIC_RESOURCES, this config must be provided. For more details about the BatchDedicatedResources, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#BatchDedicatedResources. For more details about the machine spec, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/MachineSpec
batch_predict_starting_replica_count: The number of machine replicas used at the start of the batch operation. If not set, Vertex AI decides starting number, not greater than `max_replica_count`. Only used if `machine_type` is set.
batch_predict_max_replica_count: The maximum number of machine replicas the batch operation may be scaled to. Only used if `machine_type` is set.
batch_predict_explanation_metadata: Explanation metadata configuration for this BatchPredictionJob. Can be specified only if `generate_explanation` is set to `True`. This value overrides the value of `Model.explanation_metadata`. All fields of `explanation_metadata` are optional in the request. If a field of the `explanation_metadata` object is not populated, the corresponding field of the `Model.explanation_metadata` object is inherited. For more details, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/ExplanationSpec#explanationmetadata.
batch_predict_explanation_parameters: Parameters to configure explaining for Model's predictions. Can be specified only if `generate_explanation` is set to `True`. This value overrides the value of `Model.explanation_parameters`. All fields of `explanation_parameters` are optional in the request. If a field of the `explanation_parameters` object is not populated, the corresponding field of the `Model.explanation_parameters` object is inherited. For more details, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/ExplanationSpec#ExplanationParameters.
batch_predict_explanation_data_sample_size: Desired size to downsample the input dataset that will then be used for batch explanation.
batch_predict_accelerator_type: The type of accelerator(s) that may be attached to the machine as per `batch_predict_accelerator_count`. Only used if `batch_predict_machine_type` is set. For more details about the machine spec, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/MachineSpec
batch_predict_accelerator_count: The number of accelerators to attach to the `batch_predict_machine_type`. Only used if `batch_predict_machine_type` is set.
slicing_specs: List of `google.cloud.aiplatform_v1.types.ModelEvaluationSlice.SlicingSpec`. When provided, compute metrics for each defined slice. See [sample code](https://cloud.google.com/vertex-ai/docs/pipelines/model-evaluation-component) and more details on [configuring slices](https://cloud.google.com/python/docs/reference/aiplatform/latest/google.cloud.aiplatform_v1.types.ModelEvaluationSlice).
evaluation_prediction_label_column: The column name of the field containing classes the model is scoring. Formatted to be able to find nested columns, delimited by `.`.
evaluation_prediction_score_column: The column name of the field containing batch prediction scores. Formatted to be able to find nested columns, delimited by `.`.
evaluation_class_labels: Required for classification prediction type. The list of class names for the target_field_name, in the same order they appear in a file in batch_predict_gcs_source_uris. For instance, if the target_field_name could be either `1` or `0`, then the class_labels input will be ["1", "0"].
dataflow_machine_type: The Dataflow machine type for evaluation components.
dataflow_max_num_workers: The max number of Dataflow workers for evaluation components.
dataflow_disk_size_gb: Dataflow worker's disk size in GB for evaluation components.
dataflow_service_account: Custom service account to run Dataflow jobs.
dataflow_subnetwork: Dataflow's fully qualified subnetwork name, when empty the default subnetwork will be used. Example: https://cloud.google.com/dataflow/docs/guides/specifying-networks#example_network_and_subnetwork_specifications
dataflow_use_public_ips: Specifies whether Dataflow workers use public IP addresses.
encryption_spec_key_name: Customer-managed encryption key options. If set, resources created by this pipeline will be encrypted with the provided encryption key. Has the form: `projects/my-project/locations/my-location/keyRings/my-kr/cryptoKeys/my-key`. The key needs to be in the same region as where the compute resource is created.
evaluation_display_name: The display name of the uploaded evaluation resource to the Vertex AI model.
force_runner_mode: Indicate the runner mode to use forcely. Valid options are `Dataflow` and `DirectRunner`.
project: The GCP project that runs the pipeline components. Defaults to the project in which the PipelineJob is run.
Returns: A google.ClassificationMetrics artifact.
"""
# fmt: on
outputs = NamedTuple(
'outputs',
evaluation_metrics=ClassificationMetrics,
evaluation_resource_name=str,
)
get_model_task = GetVertexModelOp(model_name=model_name)
# Remove the ground truth from the given GCS or BQ data.
# This is required for many models as Vertex Batch Prediction can not have the
# ground truth in the data to run, but later the evaluation component requires
# the ground truth data.
target_field_data_remover_task = TargetFieldDataRemoverOp(
project=project,
location=location,
target_field_name=target_field_name,
gcs_source_uris=batch_predict_gcs_source_uris,
bigquery_source_uri=batch_predict_bigquery_source_uri,
instances_format=batch_predict_instances_format,
dataflow_service_account=dataflow_service_account,
dataflow_subnetwork=dataflow_subnetwork,
dataflow_use_public_ips=dataflow_use_public_ips,
encryption_spec_key_name=encryption_spec_key_name,
force_runner_mode=force_runner_mode,
)
# Run Batch Prediction.
batch_predict_task = ModelBatchPredictOp(
project=project,
location=location,
model=get_model_task.outputs['model'],
job_display_name='model-registry-batch-predict-evaluation-{{$.pipeline_job_uuid}}-{{$.pipeline_task_uuid}}',
gcs_source_uris=target_field_data_remover_task.outputs[
'gcs_output_directory'
],
bigquery_source_input_uri=target_field_data_remover_task.outputs[
'bigquery_output_table'
],
instances_format=batch_predict_instances_format,
predictions_format=batch_predict_predictions_format,
gcs_destination_output_uri_prefix=batch_predict_gcs_destination_output_uri,
bigquery_destination_output_uri=batch_predict_bigquery_destination_output_uri,
machine_type=batch_predict_machine_type,
starting_replica_count=batch_predict_starting_replica_count,
max_replica_count=batch_predict_max_replica_count,
encryption_spec_key_name=encryption_spec_key_name,
accelerator_type=batch_predict_accelerator_type,
accelerator_count=batch_predict_accelerator_count,
)
# Run feature attribution steps.
feature_attribution_graph = FeatureAttributionGraphComponentOp(
project=project,
location=location,
prediction_type='classification',
vertex_model=get_model_task.outputs['model'],
batch_predict_instances_format=batch_predict_instances_format,
batch_predict_gcs_destination_output_uri=batch_predict_gcs_destination_output_uri,
batch_predict_gcs_source_uris=target_field_data_remover_task.outputs[
'gcs_output_directory'
],
batch_predict_bigquery_source_uri=target_field_data_remover_task.outputs[
'bigquery_output_table'
],
batch_predict_predictions_format=batch_predict_predictions_format,
batch_predict_bigquery_destination_output_uri=batch_predict_bigquery_destination_output_uri,
batch_predict_machine_type=batch_predict_machine_type,
batch_predict_starting_replica_count=batch_predict_starting_replica_count,
batch_predict_max_replica_count=batch_predict_max_replica_count,
batch_predict_explanation_metadata=batch_predict_explanation_metadata,
batch_predict_explanation_parameters=batch_predict_explanation_parameters,
batch_predict_explanation_data_sample_size=batch_predict_explanation_data_sample_size,
batch_predict_accelerator_type=batch_predict_accelerator_type,
batch_predict_accelerator_count=batch_predict_accelerator_count,
dataflow_machine_type=dataflow_machine_type,
dataflow_max_num_workers=dataflow_max_num_workers,
dataflow_disk_size_gb=dataflow_disk_size_gb,
dataflow_service_account=dataflow_service_account,
dataflow_subnetwork=dataflow_subnetwork,
dataflow_use_public_ips=dataflow_use_public_ips,
encryption_spec_key_name=encryption_spec_key_name,
force_runner_mode=force_runner_mode,
)
# Run evaluation for a classification model.
eval_task = ModelEvaluationClassificationOp(
project=project,
location=location,
class_labels=evaluation_class_labels,
prediction_label_column=evaluation_prediction_label_column,
prediction_score_column=evaluation_prediction_score_column,
target_field_name=target_field_name,
ground_truth_format=batch_predict_instances_format,
ground_truth_gcs_source=batch_predict_gcs_source_uris,
ground_truth_bigquery_source=batch_predict_bigquery_source_uri,
predictions_format=batch_predict_predictions_format,
predictions_gcs_source=batch_predict_task.outputs['gcs_output_directory'],
predictions_bigquery_source=batch_predict_task.outputs[
'bigquery_output_table'
],
dataflow_machine_type=dataflow_machine_type,
dataflow_max_workers_num=dataflow_max_num_workers,
dataflow_disk_size_gb=dataflow_disk_size_gb,
dataflow_service_account=dataflow_service_account,
dataflow_subnetwork=dataflow_subnetwork,
dataflow_use_public_ips=dataflow_use_public_ips,
encryption_spec_key_name=encryption_spec_key_name,
force_runner_mode=force_runner_mode,
model=get_model_task.outputs['model'],
slicing_specs=slicing_specs,
)
# Import the evaluation result to Vertex AI.
import_evaluation_task = ModelImportEvaluationOp(
classification_metrics=eval_task.outputs['evaluation_metrics'],
feature_attributions=feature_attribution_graph.outputs[
'feature_attributions'
],
model=get_model_task.outputs['model'],
dataset_type=batch_predict_instances_format,
dataset_path=batch_predict_bigquery_source_uri,
dataset_paths=batch_predict_gcs_source_uris,
display_name=evaluation_display_name,
)
return outputs(
evaluation_metrics=eval_task.outputs['evaluation_metrics'],
evaluation_resource_name=import_evaluation_task.outputs[
'evaluation_resource_name'
],
)
@kfp.dsl.pipeline(name='evaluation-feature-attribution-regression-pipeline')
def evaluation_feature_attribution_regression_pipeline( # pylint: disable=dangerous-default-value
project: str,
location: str,
model_name: str,
target_field_name: str,
batch_predict_instances_format: str,
batch_predict_gcs_destination_output_uri: str,
batch_predict_gcs_source_uris: List[str] = [], # pylint: disable=g-bare-generic
batch_predict_bigquery_source_uri: str = '',
batch_predict_predictions_format: str = 'jsonl',
batch_predict_bigquery_destination_output_uri: str = '',
batch_predict_machine_type: str = 'n1-standard-16',
batch_predict_starting_replica_count: int = 5,
batch_predict_max_replica_count: int = 10,
batch_predict_explanation_metadata: Dict[str, Any] = {}, # pylint: disable=g-bare-generic
batch_predict_explanation_parameters: Dict[str, Any] = {}, # pylint: disable=g-bare-generic
batch_predict_explanation_data_sample_size: int = 10000,
batch_predict_accelerator_type: str = '',
batch_predict_accelerator_count: int = 0,
evaluation_prediction_score_column: str = '',
dataflow_machine_type: str = 'n1-standard-4',
dataflow_max_num_workers: int = 5,
dataflow_disk_size_gb: int = 50,
dataflow_service_account: str = '',
dataflow_subnetwork: str = '',
dataflow_use_public_ips: bool = True,
encryption_spec_key_name: str = '',
evaluation_display_name: str = 'evaluation-feature-attribution-pipeline-{{$.pipeline_job_uuid}}',
force_runner_mode: str = '',
) -> NamedTuple(
'outputs',
evaluation_metrics=RegressionMetrics,
evaluation_resource_name=str,
):
# fmt: off
"""The evaluation custom tabular pipeline with feature attribution for.
regression models.
This pipeline gives support for custom models that contain a
valid explanation_spec. This pipeline includes the target_field_data_remover
component, which is needed for many tabular custom models.
Args:
project: The GCP project that runs the pipeline components.
location: The GCP region that runs the pipeline components.
model_name: The Vertex model resource name to be imported and used for batch prediction.
target_field_name: The target field's name. Formatted to be able to find nested columns, delimited by `.`. Prefixed with 'instance.' on the component for Vertex Batch Prediction.
batch_predict_instances_format: The format in which instances are given, must be one of the Model's supportedInputStorageFormats. For more details about this input config, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#InputConfig.
batch_predict_gcs_destination_output_uri: The Google Cloud Storage location of the directory where the output is to be written to. In the given directory a new directory is created. Its name is `prediction-<model-display-name>-<job-create-time>`, where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format. Inside of it files `predictions_0001.<extension>`, `predictions_0002.<extension>`, ..., `predictions_N.<extension>` are created where `<extension>` depends on chosen `predictions_format`, and N may equal 0001 and depends on the total number of successfully predicted instances. If the Model has both `instance` and `prediction` schemata defined then each such file contains predictions as per the `predictions_format`. If prediction for any instance failed (partially or completely), then an additional `errors_0001.<extension>`, `errors_0002.<extension>`,..., `errors_N.<extension>` files are created (N depends on total number of failed predictions). These files contain the failed instances, as per their schema, followed by an additional `error` field which as value has `google.rpc.Status` containing only `code` and `message` fields. For more details about this output config, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#OutputConfig.
batch_predict_gcs_source_uris: Google Cloud Storage URI(-s) to your instances data to run batch prediction on. The instances data should also contain the ground truth (target) data, used for evaluation. May contain wildcards. For more information on wildcards, see https://cloud.google.com/storage/docs/gsutil/addlhelp/WildcardNames. For more details about this input config, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#InputConfig.
batch_predict_bigquery_source_uri: Google BigQuery URI to your instances to run batch prediction on. May contain wildcards. For more details about this input config, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#InputConfig.
batch_predict_predictions_format: The format in which Vertex AI gives the predictions. Must be one of the Model's supportedOutputStorageFormats. For more details about this output config, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#OutputConfig.
batch_predict_bigquery_destination_output_uri: The BigQuery project location where the output is to be written to. In the given project a new dataset is created with name `prediction_<model-display-name>_<job-create-time>` where is made BigQuery-dataset-name compatible (for example, most special characters become underscores), and timestamp is in YYYY_MM_DDThh_mm_ss_sssZ "based on ISO-8601" format. In the dataset two tables will be created, `predictions`, and `errors`. If the Model has both `instance` and `prediction` schemata defined then the tables have columns as follows: The `predictions` table contains instances for which the prediction succeeded, it has columns as per a concatenation of the Model's instance and prediction schemata. The `errors` table contains rows for which the prediction has failed, it has instance columns, as per the instance schema, followed by a single "errors" column, which as values has `google.rpc.Status` represented as a STRUCT, and containing only `code` and `message`. For more details about this output config, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#OutputConfig.
batch_predict_machine_type: The type of machine for running batch prediction on dedicated resources. If the Model supports DEDICATED_RESOURCES this config may be provided (and the job will use these resources). If the Model doesn't support AUTOMATIC_RESOURCES, this config must be provided. For more details about the BatchDedicatedResources, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#BatchDedicatedResources. For more details about the machine spec, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/MachineSpec
batch_predict_starting_replica_count: The number of machine replicas used at the start of the batch operation. If not set, Vertex AI decides starting number, not greater than `max_replica_count`. Only used if `machine_type` is set.
batch_predict_max_replica_count: The maximum number of machine replicas the batch operation may be scaled to. Only used if `machine_type` is set.
batch_predict_explanation_metadata: Explanation metadata configuration for this BatchPredictionJob. Can be specified only if `generate_explanation` is set to `True`. This value overrides the value of `Model.explanation_metadata`. All fields of `explanation_metadata` are optional in the request. If a field of the `explanation_metadata` object is not populated, the corresponding field of the `Model.explanation_metadata` object is inherited. For more details, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/ExplanationSpec#explanationmetadata.
batch_predict_explanation_parameters: Parameters to configure explaining for Model's predictions. Can be specified only if `generate_explanation` is set to `True`. This value overrides the value of `Model.explanation_parameters`. All fields of `explanation_parameters` are optional in the request. If a field of the `explanation_parameters` object is not populated, the corresponding field of the `Model.explanation_parameters` object is inherited. For more details, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/ExplanationSpec#ExplanationParameters.
batch_predict_explanation_data_sample_size: Desired size to downsample the input dataset that will then be used for batch explanation.
batch_predict_accelerator_type: The type of accelerator(s) that may be attached to the machine as per `batch_predict_accelerator_count`. Only used if `batch_predict_machine_type` is set. For more details about the machine spec, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/MachineSpec
batch_predict_accelerator_count: The number of accelerators to attach to the `batch_predict_machine_type`. Only used if `batch_predict_machine_type` is set.
evaluation_prediction_score_column: The column name of the field containing batch prediction scores. Formatted to be able to find nested columns, delimited by `.`.
dataflow_machine_type: The Dataflow machine type for evaluation components.
dataflow_max_num_workers: The max number of Dataflow workers for evaluation components.
dataflow_disk_size_gb: Dataflow worker's disk size in GB for evaluation components.
dataflow_service_account: Custom service account to run Dataflow jobs.
dataflow_subnetwork: Dataflow's fully qualified subnetwork name, when empty the default subnetwork will be used. Example: https://cloud.google.com/dataflow/docs/guides/specifying-networks#example_network_and_subnetwork_specifications
dataflow_use_public_ips: Specifies whether Dataflow workers use public IP addresses.
encryption_spec_key_name: Customer-managed encryption key options. If set, resources created by this pipeline will be encrypted with the provided encryption key. Has the form: `projects/my-project/locations/my-location/keyRings/my-kr/cryptoKeys/my-key`. The key needs to be in the same region as where the compute resource is created.
evaluation_display_name: The display name of the uploaded evaluation resource to the Vertex AI model.
force_runner_mode: Indicate the runner mode to use forcely. Valid options are `Dataflow` and `DirectRunner`.
Returns:
A google.RegressionMetrics artifact.
"""
# fmt: on
outputs = NamedTuple(
'outputs',
evaluation_metrics=RegressionMetrics,
evaluation_resource_name=str,
)
get_model_task = GetVertexModelOp(model_name=model_name)
# Remove the ground truth from the given GCS or BQ data.
# This is required for many models as Vertex Batch Prediction can not have the
# ground truth in the data to run, but later the evaluation component requires
# the ground truth data.
target_field_data_remover_task = TargetFieldDataRemoverOp(
project=project,
location=location,
target_field_name=target_field_name,
gcs_source_uris=batch_predict_gcs_source_uris,
bigquery_source_uri=batch_predict_bigquery_source_uri,
instances_format=batch_predict_instances_format,
dataflow_service_account=dataflow_service_account,
dataflow_subnetwork=dataflow_subnetwork,
dataflow_use_public_ips=dataflow_use_public_ips,
encryption_spec_key_name=encryption_spec_key_name,
force_runner_mode=force_runner_mode,
)
# Run Batch Prediction.
batch_predict_task = ModelBatchPredictOp(
project=project,
location=location,
model=get_model_task.outputs['model'],
job_display_name='model-registry-batch-predict-evaluation-{{$.pipeline_job_uuid}}-{{$.pipeline_task_uuid}}',
gcs_source_uris=target_field_data_remover_task.outputs[
'gcs_output_directory'
],
bigquery_source_input_uri=target_field_data_remover_task.outputs[
'bigquery_output_table'
],
instances_format=batch_predict_instances_format,
predictions_format=batch_predict_predictions_format,
gcs_destination_output_uri_prefix=batch_predict_gcs_destination_output_uri,
bigquery_destination_output_uri=batch_predict_bigquery_destination_output_uri,
machine_type=batch_predict_machine_type,
starting_replica_count=batch_predict_starting_replica_count,
max_replica_count=batch_predict_max_replica_count,
encryption_spec_key_name=encryption_spec_key_name,
accelerator_type=batch_predict_accelerator_type,
accelerator_count=batch_predict_accelerator_count,
)
# Run feature attribution steps.
feature_attribution_graph = FeatureAttributionGraphComponentOp(
project=project,
location=location,
prediction_type='regression',
vertex_model=get_model_task.outputs['model'],
batch_predict_instances_format=batch_predict_instances_format,
batch_predict_gcs_destination_output_uri=batch_predict_gcs_destination_output_uri,
batch_predict_gcs_source_uris=target_field_data_remover_task.outputs[
'gcs_output_directory'
],
batch_predict_bigquery_source_uri=target_field_data_remover_task.outputs[
'bigquery_output_table'
],
batch_predict_predictions_format=batch_predict_predictions_format,
batch_predict_bigquery_destination_output_uri=batch_predict_bigquery_destination_output_uri,
batch_predict_machine_type=batch_predict_machine_type,
batch_predict_starting_replica_count=batch_predict_starting_replica_count,
batch_predict_max_replica_count=batch_predict_max_replica_count,
batch_predict_explanation_metadata=batch_predict_explanation_metadata,
batch_predict_explanation_parameters=batch_predict_explanation_parameters,
batch_predict_explanation_data_sample_size=batch_predict_explanation_data_sample_size,
batch_predict_accelerator_type=batch_predict_accelerator_type,
batch_predict_accelerator_count=batch_predict_accelerator_count,
dataflow_machine_type=dataflow_machine_type,
dataflow_max_num_workers=dataflow_max_num_workers,
dataflow_disk_size_gb=dataflow_disk_size_gb,
dataflow_service_account=dataflow_service_account,
dataflow_subnetwork=dataflow_subnetwork,
dataflow_use_public_ips=dataflow_use_public_ips,
encryption_spec_key_name=encryption_spec_key_name,
force_runner_mode=force_runner_mode,
)
# Run evaluation for a regression model.
eval_task = ModelEvaluationRegressionOp(
project=project,
location=location,
target_field_name=target_field_name,
ground_truth_format=batch_predict_instances_format,
ground_truth_gcs_source=batch_predict_gcs_source_uris,
ground_truth_bigquery_source=batch_predict_bigquery_source_uri,
prediction_score_column=evaluation_prediction_score_column,
predictions_format=batch_predict_predictions_format,
predictions_gcs_source=batch_predict_task.outputs['gcs_output_directory'],
predictions_bigquery_source=batch_predict_task.outputs[
'bigquery_output_table'
],
dataflow_machine_type=dataflow_machine_type,
dataflow_max_workers_num=dataflow_max_num_workers,
dataflow_disk_size_gb=dataflow_disk_size_gb,
dataflow_service_account=dataflow_service_account,
dataflow_subnetwork=dataflow_subnetwork,
dataflow_use_public_ips=dataflow_use_public_ips,
encryption_spec_key_name=encryption_spec_key_name,
force_runner_mode=force_runner_mode,
model=get_model_task.outputs['model'],
)
# Import the evaluation result to Vertex AI.
import_evaluation_task = ModelImportEvaluationOp(
regression_metrics=eval_task.outputs['evaluation_metrics'],
feature_attributions=feature_attribution_graph.outputs[
'feature_attributions'
],
model=get_model_task.outputs['model'],
dataset_type=batch_predict_instances_format,
dataset_path=batch_predict_bigquery_source_uri,
dataset_paths=batch_predict_gcs_source_uris,
display_name=evaluation_display_name,
)
return outputs(
evaluation_metrics=eval_task.outputs['evaluation_metrics'],
evaluation_resource_name=import_evaluation_task.outputs[
'evaluation_resource_name'
],
)
@kfp.dsl.pipeline(name='evaluation-feature-attribution-pipeline')
def evaluation_feature_attribution_pipeline( # pylint: disable=dangerous-default-value
project: str,
location: str,
prediction_type: str,
model_name: str,
target_field_name: str,
batch_predict_instances_format: str,
batch_predict_gcs_destination_output_uri: str,
batch_predict_gcs_source_uris: List[str] = [], # pylint: disable=g-bare-generic
batch_predict_bigquery_source_uri: str = '',
batch_predict_predictions_format: str = 'jsonl',
batch_predict_bigquery_destination_output_uri: str = '',
batch_predict_machine_type: str = 'n1-standard-16',
batch_predict_starting_replica_count: int = 5,
batch_predict_max_replica_count: int = 10,
batch_predict_explanation_metadata: Dict[str, Any] = {}, # pylint: disable=g-bare-generic
batch_predict_explanation_parameters: Dict[str, Any] = {}, # pylint: disable=g-bare-generic
batch_predict_explanation_data_sample_size: int = 10000,
batch_predict_accelerator_type: str = '',
batch_predict_accelerator_count: int = 0,
slicing_specs: List[Any] = [], # pylint: disable=g-bare-generic
evaluation_prediction_label_column: str = '',
evaluation_prediction_score_column: str = '',
evaluation_class_labels: List[str] = [], # pylint: disable=g-bare-generic
dataflow_machine_type: str = 'n1-standard-4',
dataflow_max_num_workers: int = 5,
dataflow_disk_size_gb: int = 50,
dataflow_service_account: str = '',
dataflow_subnetwork: str = '',
dataflow_use_public_ips: bool = True,
encryption_spec_key_name: str = '',
evaluation_display_name: str = 'evaluation-feature-attribution-pipeline-{{$.pipeline_job_uuid}}',
force_runner_mode: str = '',
):
# fmt: off
"""The evaluation custom tabular pipeline with feature attribution.
This pipeline gives support for custom models that contain a
valid explanation_spec. This pipeline includes the target_field_data_remover
component, which is needed for many tabular custom models.
Args:
project: The GCP project that runs the pipeline components.
location: The GCP region that runs the pipeline components.
prediction_type: The type of prediction the model is to produce. "classification" or "regression".
model_name: The Vertex model resource name to be imported and used for batch prediction.
target_field_name: The target field's name. Formatted to be able to find nested columns, delimited by `.`. Prefixed with 'instance.' on the component for Vertex Batch Prediction.
batch_predict_instances_format: The format in which instances are given, must be one of the Model's supportedInputStorageFormats. For more details about this input config, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#InputConfig.
batch_predict_gcs_destination_output_uri: The Google Cloud Storage location of the directory where the output is to be written to. In the given directory a new directory is created. Its name is `prediction-<model-display-name>-<job-create-time>`, where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format. Inside of it files `predictions_0001.<extension>`, `predictions_0002.<extension>`, ..., `predictions_N.<extension>` are created where `<extension>` depends on chosen `predictions_format`, and N may equal 0001 and depends on the total number of successfully predicted instances. If the Model has both `instance` and `prediction` schemata defined then each such file contains predictions as per the `predictions_format`. If prediction for any instance failed (partially or completely), then an additional `errors_0001.<extension>`, `errors_0002.<extension>`,..., `errors_N.<extension>` files are created (N depends on total number of failed predictions). These files contain the failed instances, as per their schema, followed by an additional `error` field which as value has `google.rpc.Status` containing only `code` and `message` fields. For more details about this output config, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#OutputConfig.
batch_predict_gcs_source_uris: Google Cloud Storage URI(-s) to your instances data to run batch prediction on. The instances data should also contain the ground truth (target) data, used for evaluation. May contain wildcards. For more information on wildcards, see https://cloud.google.com/storage/docs/gsutil/addlhelp/WildcardNames. For more details about this input config, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#InputConfig.
batch_predict_bigquery_source_uri: Google BigQuery URI to your instances to run batch prediction on. May contain wildcards. For more details about this input config, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#InputConfig.
batch_predict_predictions_format: The format in which Vertex AI gives the predictions. Must be one of the Model's supportedOutputStorageFormats. For more details about this output config, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#OutputConfig.
batch_predict_bigquery_destination_output_uri: The BigQuery project location where the output is to be written to. In the given project a new dataset is created with name `prediction_<model-display-name>_<job-create-time>` where is made BigQuery-dataset-name compatible (for example, most special characters become underscores), and timestamp is in YYYY_MM_DDThh_mm_ss_sssZ "based on ISO-8601" format. In the dataset two tables will be created, `predictions`, and `errors`. If the Model has both `instance` and `prediction` schemata defined then the tables have columns as follows: The `predictions` table contains instances for which the prediction succeeded, it has columns as per a concatenation of the Model's instance and prediction schemata. The `errors` table contains rows for which the prediction has failed, it has instance columns, as per the instance schema, followed by a single "errors" column, which as values has `google.rpc.Status` represented as a STRUCT, and containing only `code` and `message`. For more details about this output config, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#OutputConfig.
batch_predict_machine_type: The type of machine for running batch prediction on dedicated resources. If the Model supports DEDICATED_RESOURCES this config may be provided (and the job will use these resources). If the Model doesn't support AUTOMATIC_RESOURCES, this config must be provided. For more details about the BatchDedicatedResources, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#BatchDedicatedResources. For more details about the machine spec, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/MachineSpec
batch_predict_starting_replica_count: The number of machine replicas used at the start of the batch operation. If not set, Vertex AI decides starting number, not greater than `max_replica_count`. Only used if `machine_type` is set.
batch_predict_max_replica_count: The maximum number of machine replicas the batch operation may be scaled to. Only used if `machine_type` is set.
batch_predict_explanation_metadata: Explanation metadata configuration for this BatchPredictionJob. Can be specified only if `generate_explanation` is set to `True`. This value overrides the value of `Model.explanation_metadata`. All fields of `explanation_metadata` are optional in the request. If a field of the `explanation_metadata` object is not populated, the corresponding field of the `Model.explanation_metadata` object is inherited. For more details, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/ExplanationSpec#explanationmetadata.
batch_predict_explanation_parameters: Parameters to configure explaining for Model's predictions. Can be specified only if `generate_explanation` is set to `True`. This value overrides the value of `Model.explanation_parameters`. All fields of `explanation_parameters` are optional in the request. If a field of the `explanation_parameters` object is not populated, the corresponding field of the `Model.explanation_parameters` object is inherited. For more details, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/ExplanationSpec#ExplanationParameters.
batch_predict_explanation_data_sample_size: Desired size to downsample the input dataset that will then be used for batch explanation.
batch_predict_accelerator_type: The type of accelerator(s) that may be attached to the machine as per `batch_predict_accelerator_count`. Only used if `batch_predict_machine_type` is set. For more details about the machine spec, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/MachineSpec
batch_predict_accelerator_count: The number of accelerators to attach to the `batch_predict_machine_type`. Only used if `batch_predict_machine_type` is set.
slicing_specs: List of `google.cloud.aiplatform_v1.types.ModelEvaluationSlice.SlicingSpec`. When provided, compute metrics for each defined slice. See [sample code](https://cloud.google.com/vertex-ai/docs/pipelines/model-evaluation-component) and more details on [configuring slices](https://cloud.google.com/python/docs/reference/aiplatform/latest/google.cloud.aiplatform_v1.types.ModelEvaluationSlice).
evaluation_prediction_label_column: The column name of the field containing classes the model is scoring. Formatted to be able to find nested columns, delimited by `.`.
evaluation_prediction_score_column: The column name of the field containing batch prediction scores. Formatted to be able to find nested columns, delimited by `.`.
evaluation_class_labels: Required for classification prediction type. The list of class names for the target_field_name, in the same order they appear in a file in batch_predict_gcs_source_uris. For instance, if the target_field_name could be either `1` or `0`, then the class_labels input will be ["1", "0"].
dataflow_machine_type: The Dataflow machine type for evaluation components.
dataflow_max_num_workers: The max number of Dataflow workers for evaluation components.
dataflow_disk_size_gb: Dataflow worker's disk size in GB for evaluation components.
dataflow_service_account: Custom service account to run Dataflow jobs.
dataflow_subnetwork: Dataflow's fully qualified subnetwork name, when empty the default subnetwork will be used. Example: https://cloud.google.com/dataflow/docs/guides/specifying-networks#example_network_and_subnetwork_specifications
dataflow_use_public_ips: Specifies whether Dataflow workers use public IP addresses.
encryption_spec_key_name: Customer-managed encryption key options. If set, resources created by this pipeline will be encrypted with the provided encryption key. Has the form: `projects/my-project/locations/my-location/keyRings/my-kr/cryptoKeys/my-key`. The key needs to be in the same region as where the compute resource is created.
evaluation_display_name: The display name of the uploaded evaluation resource to the Vertex AI model.
force_runner_mode: Indicate the runner mode to use forcely. Valid options are `Dataflow` and `DirectRunner`.
"""
# fmt: on
with kfp.dsl.Condition(
prediction_type == 'classification', name='classification'
):
evaluation_feature_attribution_classification_pipeline(
project=project,
location=location,
model_name=model_name,
target_field_name=target_field_name,
batch_predict_instances_format=batch_predict_instances_format,
batch_predict_gcs_destination_output_uri=batch_predict_gcs_destination_output_uri,
batch_predict_gcs_source_uris=batch_predict_gcs_source_uris,
batch_predict_bigquery_source_uri=batch_predict_bigquery_source_uri,
batch_predict_predictions_format=batch_predict_predictions_format,
batch_predict_bigquery_destination_output_uri=batch_predict_bigquery_destination_output_uri,
batch_predict_machine_type=batch_predict_machine_type,
batch_predict_starting_replica_count=batch_predict_starting_replica_count,
batch_predict_max_replica_count=batch_predict_max_replica_count,
batch_predict_explanation_metadata=batch_predict_explanation_metadata,
batch_predict_explanation_parameters=batch_predict_explanation_parameters,
batch_predict_explanation_data_sample_size=batch_predict_explanation_data_sample_size,
batch_predict_accelerator_type=batch_predict_accelerator_type,
batch_predict_accelerator_count=batch_predict_accelerator_count,
slicing_specs=slicing_specs,
evaluation_prediction_label_column=evaluation_prediction_label_column,
evaluation_prediction_score_column=evaluation_prediction_score_column,
evaluation_class_labels=evaluation_class_labels,
dataflow_machine_type=dataflow_machine_type,
dataflow_max_num_workers=dataflow_max_num_workers,
dataflow_disk_size_gb=dataflow_disk_size_gb,
dataflow_service_account=dataflow_service_account,
dataflow_subnetwork=dataflow_subnetwork,
dataflow_use_public_ips=dataflow_use_public_ips,
encryption_spec_key_name=encryption_spec_key_name,
evaluation_display_name=evaluation_display_name,
force_runner_mode=force_runner_mode,
)
with kfp.dsl.Condition(prediction_type == 'regression', name='regression'):
evaluation_feature_attribution_regression_pipeline(
project=project,
location=location,
model_name=model_name,
target_field_name=target_field_name,
batch_predict_instances_format=batch_predict_instances_format,
batch_predict_gcs_destination_output_uri=batch_predict_gcs_destination_output_uri,
batch_predict_gcs_source_uris=batch_predict_gcs_source_uris,
batch_predict_bigquery_source_uri=batch_predict_bigquery_source_uri,
batch_predict_predictions_format=batch_predict_predictions_format,
batch_predict_bigquery_destination_output_uri=batch_predict_bigquery_destination_output_uri,
batch_predict_machine_type=batch_predict_machine_type,
batch_predict_starting_replica_count=batch_predict_starting_replica_count,
batch_predict_max_replica_count=batch_predict_max_replica_count,
batch_predict_explanation_metadata=batch_predict_explanation_metadata,
batch_predict_explanation_parameters=batch_predict_explanation_parameters,
batch_predict_explanation_data_sample_size=batch_predict_explanation_data_sample_size,
batch_predict_accelerator_type=batch_predict_accelerator_type,
batch_predict_accelerator_count=batch_predict_accelerator_count,
evaluation_prediction_score_column=evaluation_prediction_score_column,
dataflow_machine_type=dataflow_machine_type,
dataflow_max_num_workers=dataflow_max_num_workers,
dataflow_disk_size_gb=dataflow_disk_size_gb,
dataflow_service_account=dataflow_service_account,
dataflow_subnetwork=dataflow_subnetwork,
dataflow_use_public_ips=dataflow_use_public_ips,
encryption_spec_key_name=encryption_spec_key_name,
evaluation_display_name=evaluation_display_name,
force_runner_mode=force_runner_mode,
)
| 845 |
0 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/model_evaluation/__init__.py | # Copyright 2023 The Kubeflow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model evaluation pipelines."""
from google_cloud_pipeline_components.v1.model_evaluation.classification_component import model_evaluation_classification as ModelEvaluationClassificationOp
from google_cloud_pipeline_components.v1.model_evaluation.error_analysis_pipeline import vision_model_error_analysis_pipeline
from google_cloud_pipeline_components.v1.model_evaluation.evaluated_annotation_pipeline import evaluated_annotation_pipeline
from google_cloud_pipeline_components.v1.model_evaluation.evaluation_automl_tabular_feature_attribution_pipeline import evaluation_automl_tabular_feature_attribution_pipeline
from google_cloud_pipeline_components.v1.model_evaluation.evaluation_automl_tabular_pipeline import evaluation_automl_tabular_pipeline
from google_cloud_pipeline_components.v1.model_evaluation.evaluation_automl_unstructure_data_pipeline import evaluation_automl_unstructure_data_pipeline
from google_cloud_pipeline_components.v1.model_evaluation.evaluation_feature_attribution_pipeline import evaluation_feature_attribution_pipeline
from google_cloud_pipeline_components.v1.model_evaluation.evaluation_llm_classification_pipeline import evaluation_llm_classification_pipeline
from google_cloud_pipeline_components.v1.model_evaluation.evaluation_llm_text_generation_pipeline import evaluation_llm_text_generation_pipeline
from google_cloud_pipeline_components.v1.model_evaluation.forecasting_component import model_evaluation_forecasting as ModelEvaluationForecastingOp
from google_cloud_pipeline_components.v1.model_evaluation.model_based_llm_evaluation.autosxs.autosxs_pipeline import autosxs_pipeline
from google_cloud_pipeline_components.v1.model_evaluation.regression_component import model_evaluation_regression as ModelEvaluationRegressionOp
__all__ = [
'autosxs_pipeline',
'evaluated_annotation_pipeline',
'evaluation_automl_tabular_feature_attribution_pipeline',
'evaluation_automl_tabular_pipeline',
'evaluation_automl_unstructure_data_pipeline',
'evaluation_feature_attribution_pipeline',
'evaluation_llm_classification_pipeline',
'evaluation_llm_text_generation_pipeline',
'vision_model_error_analysis_pipeline',
'ModelEvaluationClassificationOp',
'ModelEvaluationRegressionOp',
'ModelEvaluationForecastingOp',
]
| 846 |
0 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/model_evaluation/evaluation_llm_text_generation_pipeline.py | # Copyright 2024 The Kubeflow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Vertex Gen AI Evaluation for Text Generation/QA/Summarization tasks."""
from typing import Dict, List, NamedTuple
from google_cloud_pipeline_components._implementation.model_evaluation import LLMEvaluationPreprocessorOp
from google_cloud_pipeline_components._implementation.model_evaluation import LLMEvaluationTextGenerationOp
from google_cloud_pipeline_components._implementation.model_evaluation import ModelNamePreprocessorOp
from google_cloud_pipeline_components.preview.model_evaluation.model_evaluation_import_component import model_evaluation_import as ModelImportEvaluationOp
from google_cloud_pipeline_components.types.artifact_types import VertexModel
from google_cloud_pipeline_components.v1.batch_predict_job import ModelBatchPredictOp
from kfp import dsl
# pylint: disable=unused-argument, unexpected-keyword-arg
_PIPELINE_NAME = 'evaluation-llm-text-generation-pipeline'
@dsl.pipeline(name=_PIPELINE_NAME)
def evaluation_llm_text_generation_pipeline( # pylint: disable=dangerous-default-value
project: str,
location: str,
batch_predict_gcs_source_uris: List[str],
batch_predict_gcs_destination_output_uri: str,
model_name: str = 'publishers/google/models/text-bison@002',
evaluation_task: str = 'text-generation',
role_field_name: str = 'role',
input_field_name: str = 'input_text',
target_field_name: str = 'output_text',
batch_predict_instances_format: str = 'jsonl',
batch_predict_predictions_format: str = 'jsonl',
batch_predict_model_parameters: Dict[str, str] = {},
enable_row_based_metrics: bool = False,
machine_type: str = 'e2-standard-4',
service_account: str = '',
network: str = '',
encryption_spec_key_name: str = '',
evaluation_display_name: str = 'evaluation-llm-text-generation-pipeline-{{$.pipeline_job_uuid}}',
) -> NamedTuple(
'outputs', evaluation_metrics=dsl.Metrics, evaluation_resource_name=str
):
# fmt: off
"""LLM Text Generation Evaluation pipeline.
This pipeline supports evaluating large language models, publisher or managed
models, performing the following generative tasks: `summarization`, `question-answering`, and `text-generation`.
Args:
project: Required. The GCP project that runs the pipeline components.
location: Required. The GCP region that runs the pipeline components.
batch_predict_gcs_source_uris: Required. Google Cloud Storage URI(s) to your eval dataset instances data to run batch prediction on. The instances data should also contain the ground truth (target) data, used for evaluation. May contain wildcards. For more information on [wildcards](https://cloud.google.com/storage/docs/gsutil/addlhelp/WildcardNames). For more details about this [input config](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#InputConfig). The content of gcs source files should be preset to one of the following formats:
1) Prediction & Evaluation Dataset format, guaranteeing "prompt" and "ground_truth" attributes are included
{
"prompt": "your input/prompt text",
"ground_truth": "your ground truth output text"
}
or
2) Tuning Dataset format, guaranteeing "input_text" and "output_text" attributes are included.
{
"input_text": "your input/prompt text",
"output_text": "your ground truth output text"
}
batch_predict_gcs_destination_output_uri: Required. The Google Cloud Storage location of the directory where the eval pipeline output is to be written to.
model_name: The Model name used to run evaluation. Must be a publisher Model or a managed Model sharing the same ancestor location. Starting this job has no impact on any existing deployments of the Model and their resources.
evaluation_task: The task that the large language model will be evaluated on. The evaluation component computes a set of metrics relevant to that specific task. Currently supported tasks are: `summarization`, `question-answering`, `text-generation`.
role_field_name: The field name of the role for input eval dataset instances that contains the input prompts to the LLM.
input_field_name: The field name of the input eval dataset instances that contains the input prompts to the LLM.
target_field_name: The field name of the eval dataset instance that contains an example reference text response. Alternatively referred to as the ground truth (or ground_truth_column) field. If not set, defaulted to `output_text`.
batch_predict_instances_format: The format in which instances are given, must be one of the Model's supportedInputStorageFormats. Only "jsonl" is currently supported. For more details about this input config, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#InputConfig.
batch_predict_predictions_format: The format in which Vertex AI gives the predictions. Must be one of the Model's supportedOutputStorageFormats. Only "jsonl" is currently supported. For more details about this output config, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#OutputConfig.
batch_predict_model_parameters: A map of parameters that govern the predictions. Some acceptable parameters include: maxOutputTokens, topK, topP, and temperature.
enable_row_based_metrics: Flag of if row based metrics is enabled, default value is false.
machine_type: The machine type of this custom job. If not set, defaulted to `e2-standard-4`. More details: https://cloud.google.com/compute/docs/machine-resource
service_account: Sets the default service account for workload run-as account. The service account running the pipeline (https://cloud.google.com/vertex-ai/docs/pipelines/configure-project#service-account) submitting jobs must have act-as permission on this run-as account. If unspecified, the Vertex AI Custom Code Service Agent(https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) for the CustomJob's project.
network: The full name of the Compute Engine network to which the job should be peered. For example, `projects/12345/global/networks/myVPC`. Format is of the form `projects/{project}/global/networks/{network}`. Where `{project}` is a project number, as in `12345`, and `{network}` is a network name, as in `myVPC`. To specify this field, you must have already configured VPC Network Peering for Vertex AI (https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If left unspecified, the job is not peered with any network.
encryption_spec_key_name: Customer-managed encryption key options. If set, resources created by this pipeline will be encrypted with the provided encryption key. Has the form: `projects/my-project/locations/my-location/keyRings/my-kr/cryptoKeys/my-key`. The key needs to be in the same region as where the compute resource is created.
evaluation_display_name: The display name of the uploaded evaluation resource to the Vertex AI model.
Returns:
evaluation_metrics: Metrics Artifact for LLM Text Generation.
evaluation_resource_name: If run on a user's managed VertexModel, the imported evaluation resource name. Empty if run on a publisher model.
"""
# fmt: on
outputs = NamedTuple(
'outputs',
evaluation_metrics=dsl.Metrics,
evaluation_resource_name=str,
)
preprocessed_model_name = ModelNamePreprocessorOp(
project=project,
location=location,
model_name=model_name,
service_account=service_account,
)
get_vertex_model_task = dsl.importer(
artifact_uri=(
f'https://{location}-aiplatform.googleapis.com/v1/{preprocessed_model_name.outputs["processed_model_name"]}'
),
artifact_class=VertexModel,
metadata={
'resourceName': preprocessed_model_name.outputs[
'processed_model_name'
]
},
)
get_vertex_model_task.set_display_name('get-vertex-model')
eval_dataset_preprocessor_task = LLMEvaluationPreprocessorOp(
project=project,
location=location,
gcs_source_uris=batch_predict_gcs_source_uris,
input_field_name=input_field_name,
role_field_name=role_field_name,
target_field_name=target_field_name,
model_name=model_name,
machine_type=machine_type,
service_account=service_account,
network=network,
encryption_spec_key_name=encryption_spec_key_name,
)
batch_predict_task = ModelBatchPredictOp(
project=project,
location=location,
model=get_vertex_model_task.outputs['artifact'],
job_display_name='evaluation-batch-predict-{{$.pipeline_job_uuid}}-{{$.pipeline_task_uuid}}',
gcs_source_uris=eval_dataset_preprocessor_task.outputs[
'preprocessed_gcs_source_uris'
],
instances_format=batch_predict_instances_format,
predictions_format=batch_predict_predictions_format,
gcs_destination_output_uri_prefix=batch_predict_gcs_destination_output_uri,
model_parameters=batch_predict_model_parameters,
encryption_spec_key_name=encryption_spec_key_name,
)
eval_task = LLMEvaluationTextGenerationOp(
project=project,
location=location,
model_name=model_name,
evaluation_task=evaluation_task,
target_field_name=target_field_name,
predictions_format=batch_predict_predictions_format,
enable_row_based_metrics=enable_row_based_metrics,
joined_predictions_gcs_source=batch_predict_task.outputs[
'gcs_output_directory'
],
machine_type=machine_type,
service_account=service_account,
network=network,
encryption_spec_key_name=encryption_spec_key_name,
)
get_vertex_eval_model_task = dsl.importer(
artifact_uri=(
f'https://{location}-aiplatform.googleapis.com/v1/{model_name}'
),
artifact_class=VertexModel,
metadata={'resourceName': model_name},
)
get_vertex_eval_model_task.set_display_name('get-vertex-eval-model')
with dsl.If(enable_row_based_metrics == True):
import_evaluation_task_with_row_based_metrics = ModelImportEvaluationOp(
metrics=eval_task.outputs['evaluation_metrics'],
row_based_metrics=eval_task.outputs['row_based_metrics'],
model=get_vertex_eval_model_task.outputs['artifact'],
problem_type=evaluation_task,
dataset_type=batch_predict_predictions_format,
dataset_paths=batch_predict_gcs_source_uris,
display_name=evaluation_display_name,
)
with dsl.Else():
import_evaluation_task = ModelImportEvaluationOp(
metrics=eval_task.outputs['evaluation_metrics'],
model=get_vertex_eval_model_task.outputs['artifact'],
problem_type=evaluation_task,
dataset_type=batch_predict_predictions_format,
dataset_paths=batch_predict_gcs_source_uris,
display_name=evaluation_display_name,
)
oneof = dsl.OneOf(
import_evaluation_task_with_row_based_metrics.outputs[
'evaluation_resource_name'
],
import_evaluation_task.outputs['evaluation_resource_name'],
)
return outputs(
evaluation_metrics=eval_task.outputs['evaluation_metrics'],
evaluation_resource_name=oneof,
)
| 847 |
0 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/model_evaluation/forecasting_component.py | # Copyright 2023 The Kubeflow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List
from google_cloud_pipeline_components import _placeholders
from google_cloud_pipeline_components._implementation.model_evaluation import version
from google_cloud_pipeline_components.types.artifact_types import BQTable
from google_cloud_pipeline_components.types.artifact_types import ForecastingMetrics
from google_cloud_pipeline_components.types.artifact_types import VertexModel
from kfp import dsl
from kfp.dsl import container_component
@container_component
def model_evaluation_forecasting(
gcp_resources: dsl.OutputPath(str),
evaluation_metrics: dsl.Output[ForecastingMetrics],
target_field_name: str,
model: dsl.Input[VertexModel] = None,
location: str = 'us-central1',
predictions_format: str = 'jsonl',
predictions_gcs_source: dsl.Input[dsl.Artifact] = None,
predictions_bigquery_source: dsl.Input[BQTable] = None,
ground_truth_format: str = 'jsonl',
ground_truth_gcs_source: List[str] = [],
ground_truth_bigquery_source: str = '',
forecasting_type: str = 'point',
forecasting_quantiles: List[float] = [],
point_evaluation_quantile: float = 0.5,
prediction_score_column: str = 'prediction.value',
dataflow_service_account: str = '',
dataflow_disk_size_gb: int = 50,
dataflow_machine_type: str = 'n1-standard-4',
dataflow_workers_num: int = 1,
dataflow_max_workers_num: int = 5,
dataflow_subnetwork: str = '',
dataflow_use_public_ips: bool = True,
encryption_spec_key_name: str = '',
force_runner_mode: str = '',
project: str = _placeholders.PROJECT_ID_PLACEHOLDER,
):
# fmt: off
"""Computes a `google.ForecastingMetrics` Artifact, containing evaluation
metrics given a model's prediction results.
Creates a Dataflow job with Apache Beam and TFMA to compute evaluation
metrics.
Supports point forecasting and quantile forecasting for tabular data.
Args:
location: Location for running the evaluation.
predictions_format: The file format for the batch prediction results. `jsonl`, `csv`, and `bigquery` are the allowed formats, from Vertex Batch Prediction.
predictions_gcs_source: An artifact with its URI pointing toward a GCS directory with prediction or explanation files to be used for this evaluation. For prediction results, the files should be named "prediction.results-*". For explanation results, the files should be named "explanation.results-*".
predictions_bigquery_source: BigQuery table with prediction or explanation data to be used for this evaluation. For prediction results, the table column should be named "predicted_*".
ground_truth_format: Required for custom tabular and non tabular data. The file format for the ground truth files. `jsonl`, `csv`, and `bigquery` are the allowed formats.
ground_truth_gcs_source: Required for custom tabular and non tabular data. The GCS URIs representing where the ground truth is located. Used to provide ground truth for each prediction instance when they are not part of the batch prediction jobs prediction instance.
ground_truth_bigquery_source: Required for custom tabular. The BigQuery table URI representing where the ground truth is located. Used to provide ground truth for each prediction instance when they are not part of the batch prediction jobs prediction instance.
forecasting_type: The forecasting type being addressed by this evaluation run. `point` and `quantile` are the supported types.
forecasting_quantiles: Required for a `quantile` forecasting_type. The list of quantiles in the same order appeared in the quantile prediction score column.
point_evaluation_quantile: Required for a `quantile` forecasting_type. A quantile in the list of forecasting_quantiles that will be used for point evaluation metrics.
target_field_name: The full name path of the features target field in the predictions file. Formatted to be able to find nested columns, delimited by `.`. Alternatively referred to as the ground truth (or ground_truth_column) field.
model: The Vertex model used for evaluation. Must be located in the same region as the location argument. It is used to set the default configurations for AutoML and custom-trained models.
prediction_score_column: The column name of the field containing batch prediction scores. Formatted to be able to find nested columns, delimited by `.`.
dataflow_service_account: Service account to run the Dataflow job. If not set, Dataflow will use the default worker service account. For more details, see https://cloud.google.com/dataflow/docs/concepts/secURIty-and-permissions#default_worker_service_account
dataflow_disk_size_gb: The disk size (in GB) of the machine executing the evaluation run.
dataflow_machine_type: The machine type executing the evaluation run.
dataflow_workers_num: The number of workers executing the evaluation run.
dataflow_max_workers_num: The max number of workers executing the evaluation run.
dataflow_subnetwork: Dataflow's fully qualified subnetwork name, when empty the default subnetwork will be used. More details: https://cloud.google.com/dataflow/docs/guides/specifying-networks#example_network_and_subnetwork_specifications
dataflow_use_public_ips: Specifies whether Dataflow workers use public IP addresses.
encryption_spec_key_name: Customer-managed encryption key options. If set, resources created by this pipeline will be encrypted with the provided encryption key. Has the form: `projects/my-project/locations/my-location/keyRings/my-kr/cryptoKeys/my-key`. The key needs to be in the same region as where the compute resource is created.
force_runner_mode: Flag to choose Beam runner. Valid options are `DirectRunner` and `Dataflow`.
project: Project to run evaluation container. Defaults to the project in which the PipelineJob is run.
Returns:
evaluation_metrics: `google.ForecastingMetrics` representing the forecasting evaluation metrics in GCS.
gcp_resources: Serialized gcp_resources proto tracking the Dataflow job. For more details, see https://github.com/kubeflow/pipelines/blob/master/components/google-cloud/google_cloud_pipeline_components/proto/README.md.
"""
# fmt: off
return dsl.ContainerSpec(
image=version.EVAL_IMAGE_TAG,
command=[
'python3',
'/main.py',
],
args=[
'--setup_file',
'/setup.py',
'--json_mode',
'true',
'--project_id',
project,
'--location',
location,
'--problem_type',
'forecasting',
'--forecasting_type',
forecasting_type,
'--forecasting_quantiles',
forecasting_quantiles,
'--point_evaluation_quantile',
point_evaluation_quantile,
'--target_field_name',
dsl.ConcatPlaceholder(['instance.', target_field_name]),
'--batch_prediction_format',
predictions_format,
dsl.IfPresentPlaceholder(
input_name='predictions_gcs_source',
then=[
'--batch_prediction_gcs_source',
predictions_gcs_source.uri,
],
),
dsl.IfPresentPlaceholder(
input_name='predictions_bigquery_source',
then=[
'--batch_prediction_bigquery_source',
dsl.ConcatPlaceholder([
'bq://',
predictions_bigquery_source.metadata['projectId'],
'.',
predictions_bigquery_source.metadata['datasetId'],
'.',
predictions_bigquery_source.metadata['tableId'],
]),
],
),
dsl.IfPresentPlaceholder(
input_name='model',
then=[
'--model_name',
model.metadata['resourceName'],
],
),
'--ground_truth_format',
ground_truth_format,
'--ground_truth_gcs_source',
ground_truth_gcs_source,
'--ground_truth_bigquery_source',
ground_truth_bigquery_source,
'--root_dir',
f'{dsl.PIPELINE_ROOT_PLACEHOLDER}/{dsl.PIPELINE_JOB_ID_PLACEHOLDER}-{dsl.PIPELINE_TASK_ID_PLACEHOLDER}',
'--prediction_score_column',
prediction_score_column,
'--dataflow_job_prefix',
f'evaluation-forecasting-{dsl.PIPELINE_JOB_ID_PLACEHOLDER}-{dsl.PIPELINE_TASK_ID_PLACEHOLDER}',
'--dataflow_service_account',
dataflow_service_account,
'--dataflow_disk_size',
dataflow_disk_size_gb,
'--dataflow_machine_type',
dataflow_machine_type,
'--dataflow_workers_num',
dataflow_workers_num,
'--dataflow_max_workers_num',
dataflow_max_workers_num,
'--dataflow_subnetwork',
dataflow_subnetwork,
'--dataflow_use_public_ips',
dataflow_use_public_ips,
'--kms_key_name',
encryption_spec_key_name,
'--force_runner_mode',
force_runner_mode,
'--output_metrics_gcs_path',
evaluation_metrics.path,
'--gcp_resources',
gcp_resources,
'--executor_input',
'{{$}}',
],
)
| 848 |
0 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/model_evaluation/regression_component.py | # Copyright 2023 The Kubeflow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List
from google_cloud_pipeline_components import _placeholders
from google_cloud_pipeline_components._implementation.model_evaluation import version
from google_cloud_pipeline_components.types.artifact_types import BQTable
from google_cloud_pipeline_components.types.artifact_types import RegressionMetrics
from google_cloud_pipeline_components.types.artifact_types import VertexModel
from kfp import dsl
from kfp.dsl import container_component
@container_component
def model_evaluation_regression(
gcp_resources: dsl.OutputPath(str),
evaluation_metrics: dsl.Output[RegressionMetrics],
target_field_name: str,
model: dsl.Input[VertexModel] = None,
location: str = 'us-central1',
predictions_format: str = 'jsonl',
predictions_gcs_source: dsl.Input[dsl.Artifact] = None,
predictions_bigquery_source: dsl.Input[BQTable] = None,
ground_truth_format: str = 'jsonl',
ground_truth_gcs_source: List[str] = [],
ground_truth_bigquery_source: str = '',
prediction_score_column: str = 'prediction.value',
dataflow_service_account: str = '',
dataflow_disk_size_gb: int = 50,
dataflow_machine_type: str = 'n1-standard-4',
dataflow_workers_num: int = 1,
dataflow_max_workers_num: int = 5,
dataflow_subnetwork: str = '',
dataflow_use_public_ips: bool = True,
encryption_spec_key_name: str = '',
force_runner_mode: str = '',
project: str = _placeholders.PROJECT_ID_PLACEHOLDER,
):
# fmt: off
"""Computes a `google.RegressionMetrics` Artifact, containing evaluation
metrics given a model's prediction results.
Creates a Dataflow job with Apache Beam and TFMA to compute evaluation
metrics.
Supports regression for tabular data.
Args:
location: Location for running the evaluation.
predictions_format: The file format for the batch prediction results. `jsonl`, `csv`, and `bigquery` are the allowed formats, from Vertex Batch Prediction.
predictions_gcs_source: An artifact with its URI pointing toward a GCS directory with prediction or explanation files to be used for this evaluation. For prediction results, the files should be named "prediction.results-*". For explanation results, the files should be named "explanation.results-*".
predictions_bigquery_source: BigQuery table with prediction or explanation data to be used for this evaluation. For prediction results, the table column should be named "predicted_*".
ground_truth_format: Required for custom tabular and non tabular data. The file format for the ground truth files. `jsonl`, `csv`, and `bigquery` are the allowed formats.
ground_truth_gcs_source: Required for custom tabular and non tabular data. The GCS URIs representing where the ground truth is located. Used to provide ground truth for each prediction instance when they are not part of the batch prediction jobs prediction instance.
ground_truth_bigquery_source: Required for custom tabular. The BigQuery table URI representing where the ground truth is located. Used to provide ground truth for each prediction instance when they are not part of the batch prediction jobs prediction instance.
target_field_name: The target field's name. Formatted to be able to find nested columns, delimited by `.`. Prefixed with 'instance.' on the component for Vertex Batch Prediction.
model: The Vertex model used for evaluation. Must be located in the same region as the location argument. It is used to set the default configurations for AutoML and custom-trained models.
prediction_score_column: The column name of the field containing batch prediction scores. Formatted to be able to find nested columns, delimited by `.`.
dataflow_service_account: Service account to run the Dataflow job. If not set, Dataflow will use the default worker service account. For more details, see https://cloud.google.com/dataflow/docs/concepts/secURIty-and-permissions#default_worker_service_account
dataflow_disk_size_gb: The disk size (in GB) of the machine executing the evaluation run.
dataflow_machine_type: The machine type executing the evaluation run.
dataflow_workers_num: The number of workers executing the evaluation run.
dataflow_max_workers_num: The max number of workers executing the evaluation run.
dataflow_subnetwork: Dataflow's fully qualified subnetwork name, when empty the default subnetwork will be used. More details: https://cloud.google.com/dataflow/docs/guides/specifying-networks#example_network_and_subnetwork_specifications
dataflow_use_public_ips: Specifies whether Dataflow workers use public IP addresses.
encryption_spec_key_name: Customer-managed encryption key options. If set, resources created by this pipeline will be encrypted with the provided encryption key. Has the form: `projects/my-project/locations/my-location/keyRings/my-kr/cryptoKeys/my-key`. The key needs to be in the same region as where the compute resource is created.
force_runner_mode: Flag to choose Beam runner. Valid options are `DirectRunner` and `Dataflow`.
project: Project to run evaluation container. Defaults to the project in which the PipelineJob is run.
Returns:
evaluation_metrics: `google.RegressionMetrics` representing the regression evaluation metrics in GCS.
gcp_resources: Serialized gcp_resources proto tracking the Dataflow job. For more details, see https://github.com/kubeflow/pipelines/blob/master/components/google-cloud/google_cloud_pipeline_components/proto/README.md.
"""
# fmt: on
return dsl.ContainerSpec(
image=version.EVAL_IMAGE_TAG,
command=[
'python3',
'/main.py',
],
args=[
'--setup_file',
'/setup.py',
'--json_mode',
'true',
'--project_id',
project,
'--location',
location,
'--problem_type',
'regression',
'--target_field_name',
dsl.ConcatPlaceholder(['instance.', target_field_name]),
'--batch_prediction_format',
predictions_format,
dsl.IfPresentPlaceholder(
input_name='predictions_gcs_source',
then=[
'--batch_prediction_gcs_source',
predictions_gcs_source.uri,
],
),
dsl.IfPresentPlaceholder(
input_name='predictions_bigquery_source',
then=[
'--batch_prediction_bigquery_source',
dsl.ConcatPlaceholder([
'bq://',
predictions_bigquery_source.metadata['projectId'],
'.',
predictions_bigquery_source.metadata['datasetId'],
'.',
predictions_bigquery_source.metadata['tableId'],
]),
],
),
dsl.IfPresentPlaceholder(
input_name='model',
then=[
'--model_name',
model.metadata['resourceName'],
],
),
'--ground_truth_format',
ground_truth_format,
'--ground_truth_gcs_source',
ground_truth_gcs_source,
'--ground_truth_bigquery_source',
ground_truth_bigquery_source,
'--root_dir',
f'{dsl.PIPELINE_ROOT_PLACEHOLDER}/{dsl.PIPELINE_JOB_ID_PLACEHOLDER}-{dsl.PIPELINE_TASK_ID_PLACEHOLDER}',
'--prediction_score_column',
prediction_score_column,
'--dataflow_job_prefix',
f'evaluation-regression-{dsl.PIPELINE_JOB_ID_PLACEHOLDER}-{dsl.PIPELINE_TASK_ID_PLACEHOLDER}',
'--dataflow_service_account',
dataflow_service_account,
'--dataflow_disk_size',
dataflow_disk_size_gb,
'--dataflow_machine_type',
dataflow_machine_type,
'--dataflow_workers_num',
dataflow_workers_num,
'--dataflow_max_workers_num',
dataflow_max_workers_num,
'--dataflow_subnetwork',
dataflow_subnetwork,
'--dataflow_use_public_ips',
dataflow_use_public_ips,
'--kms_key_name',
encryption_spec_key_name,
'--force_runner_mode',
force_runner_mode,
'--output_metrics_gcs_path',
evaluation_metrics.path,
'--gcp_resources',
gcp_resources,
'--executor_input',
'{{$}}',
],
)
| 849 |
0 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/model_evaluation/error_analysis_pipeline.py | # Copyright 2024 The Kubeflow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List
from google_cloud_pipeline_components import _placeholders
from google_cloud_pipeline_components._implementation.model import GetVertexModelOp
from google_cloud_pipeline_components._implementation.model_evaluation import ErrorAnalysisAnnotationOp
from google_cloud_pipeline_components._implementation.model_evaluation import EvaluatedAnnotationOp
from google_cloud_pipeline_components._implementation.model_evaluation import EvaluationDatasetPreprocessorOp as DatasetPreprocessorOp
from google_cloud_pipeline_components._implementation.model_evaluation import FeatureExtractorOp
from google_cloud_pipeline_components._implementation.model_evaluation import ModelImportEvaluatedAnnotationOp
from google_cloud_pipeline_components.preview.model_evaluation.model_evaluation_import_component import model_evaluation_import as ModelImportEvaluationOp
from google_cloud_pipeline_components.v1.batch_predict_job import ModelBatchPredictOp
from google_cloud_pipeline_components.v1.dataset import GetVertexDatasetOp
from google_cloud_pipeline_components.v1.model_evaluation.classification_component import model_evaluation_classification as ModelEvaluationClassificationOp
from kfp import dsl
@dsl.pipeline(name='automl-vision-error-analysis-pipeline')
def vision_model_error_analysis_pipeline( # pylint: disable=dangerous-default-value
location: str,
model_name: str,
batch_predict_gcs_destination_output_uri: str,
test_dataset_resource_name: str = '',
test_dataset_annotation_set_name: str = '',
training_dataset_resource_name: str = '',
training_dataset_annotation_set_name: str = '',
test_dataset_storage_source_uris: List[str] = [],
training_dataset_storage_source_uris: List[str] = [],
batch_predict_instances_format: str = 'jsonl',
batch_predict_predictions_format: str = 'jsonl',
batch_predict_machine_type: str = 'n1-standard-32',
batch_predict_starting_replica_count: int = 5,
batch_predict_max_replica_count: int = 10,
batch_predict_accelerator_type: str = '',
batch_predict_accelerator_count: int = 0,
dataflow_machine_type: str = 'n1-standard-8',
dataflow_max_num_workers: int = 5,
dataflow_disk_size_gb: int = 50,
dataflow_service_account: str = '',
dataflow_subnetwork: str = '',
dataflow_use_public_ips: bool = True,
encryption_spec_key_name: str = '',
evaluation_display_name: str = 'evaluation-automl-vision-error-analysis-pipeline-{{$.pipeline_job_uuid}}',
force_runner_mode: str = '',
project: str = _placeholders.PROJECT_ID_PLACEHOLDER,
):
# fmt: off
"""The evaluation vision error analysis pipeline.
This pipeline can help you to continuously discover dataset example errors
with nearest neighbor distances and outlier flags, and provides you with
actionable steps to improve the model performance. It uses GCP services
including Dataflow and BatchPrediction.
Args:
location: The GCP region that runs the pipeline components.
model_name: The Vertex model resource name to be imported and used for batch prediction, in the format of `projects/{project}/locations/{location}/models/{model}` or `projects/{project}/locations/{location}/models/{model}@{model_version_id or model_version_alias}`
batch_predict_gcs_destination_output_uri: The Google Cloud Storage location of the directory where the output is to be written to. In the given directory a new directory is created. Its name is `prediction-<model-display-name>-<job-create-time>`, where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format. Inside of it files `predictions_0001.<extension>`, `predictions_0002.<extension>`, ..., `predictions_N.<extension>` are created where `<extension>` depends on chosen `predictions_format`, and N may equal 0001 and depends on the total number of successfully predicted instances. If the Model has both `instance` and `prediction` schemata defined then each such file contains predictions as per the `predictions_format`. If prediction for any instance failed (partially or completely), then an additional `errors_0001.<extension>`, `errors_0002.<extension>`,..., `errors_N.<extension>` files are created (N depends on total number of failed predictions). These files contain the failed instances, as per their schema, followed by an additional `error` field which as value has `google.rpc.Status` containing only `code` and `message` fields. For more details about this output config, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#OutputConfig.
test_dataset_resource_name: A Vertex dataset resource name of the test dataset. If `test_dataset_storage_source_uris` is also provided, this argument will override the GCS source.
test_dataset_annotation_set_name: A string of the annotation_set resource name containing the ground truth of the test datset used for evaluation.
training_dataset_resource_name: A Vertex dataset resource name of the training dataset. If `training_dataset_storage_source_uris` is also provided, this argument will override the GCS source.
training_dataset_annotation_set_name: A string of the annotation_set resource name containing the ground truth of the test datset used for feature extraction.
test_dataset_storage_source_uris: Google Cloud Storage URI(-s) to unmanaged test datasets.`jsonl` is currently the only allowed format. If `test_dataset` is also provided, this field will be overridden by the provided Vertex Dataset.
training_dataset_storage_source_uris: Google Cloud Storage URI(-s) to unmanaged test datasets.`jsonl` is currently the only allowed format. If `training_dataset` is also provided, this field will be overridden by the provided Vertex Dataset.
batch_predict_instances_format: The format in which instances are given, must be one of the Model's supportedInputStorageFormats. For more details about this input config, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#InputConfig.
batch_predict_predictions_format: The format in which Vertex AI gives the predictions. Must be one of the Model's supportedOutputStorageFormats. For more details about this output config, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#OutputConfig.
batch_predict_machine_type: The type of machine for running batch prediction on dedicated resources. If the Model supports DEDICATED_RESOURCES this config may be provided (and the job will use these resources). If the Model doesn't support AUTOMATIC_RESOURCES, this config must be provided. For more details about the BatchDedicatedResources, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#BatchDedicatedResources. For more details about the machine spec, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/MachineSpec
batch_predict_starting_replica_count: The number of machine replicas used at the start of the batch operation. If not set, Vertex AI decides starting number, not greater than `max_replica_count`. Only used if `machine_type` is set.
batch_predict_max_replica_count: The maximum number of machine replicas the batch operation may be scaled to. Only used if `machine_type` is set.
batch_predict_accelerator_type: The type of accelerator(s) that may be attached to the machine as per `batch_predict_accelerator_count`. Only used if `batch_predict_machine_type` is set. For more details about the machine spec, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/MachineSpec
batch_predict_accelerator_count: The number of accelerators to attach to the `batch_predict_machine_type`. Only used if `batch_predict_machine_type` is set.
dataflow_machine_type: The Dataflow machine type for evaluation components.
dataflow_max_num_workers: The max number of Dataflow workers for evaluation components.
dataflow_disk_size_gb: The disk size (in GB) of the machine executing the evaluation run.
dataflow_service_account: Custom service account to run Dataflow jobs.
dataflow_subnetwork: Dataflow's fully qualified subnetwork name, when empty the default subnetwork will be used. Example: https://cloud.google.com/dataflow/docs/guides/specifying-networks#example_network_and_subnetwork_specifications
dataflow_use_public_ips: Specifies whether Dataflow workers use public IP addresses.
encryption_spec_key_name: Customer-managed encryption key options. If set, resources created by this pipeline will be encrypted with the provided encryption key. Has the form: `projects/my-project/locations/my-location/keyRings/my-kr/cryptoKeys/my-key`. The key needs to be in the same region as where the compute resource is created.
evaluation_display_name: The display name of the uploaded evaluation resource to the Vertex AI model.
force_runner_mode: Indicate the runner mode to use forcely. Valid options are `Dataflow` and `DirectRunner`.
project: The GCP project that runs the pipeline components. Defaults to the project in which the PipelineJob is run.
"""
# fmt: on
with dsl.Condition(
(
test_dataset_resource_name != ''
and training_dataset_resource_name != ''
and test_dataset_annotation_set_name != ''
and training_dataset_annotation_set_name != ''
),
name='VertexDataset',
):
get_test_dataset_task = GetVertexDatasetOp(
dataset_resource_name=test_dataset_resource_name
)
get_training_dataset_task = GetVertexDatasetOp(
dataset_resource_name=training_dataset_resource_name
)
dataset_preprocessor_task = DatasetPreprocessorOp(
project=project,
location=location,
test_dataset=get_test_dataset_task.outputs['dataset'],
test_dataset_annotation_set_name=test_dataset_annotation_set_name,
training_dataset=get_training_dataset_task.outputs['dataset'],
training_dataset_annotation_set_name=training_dataset_annotation_set_name,
)
get_model_task = GetVertexModelOp(model_name=model_name)
batch_predict_task = ModelBatchPredictOp(
project=project,
location=location,
model=get_model_task.outputs['model'],
job_display_name=(
f'{evaluation_display_name}-{dsl.PIPELINE_JOB_ID_PLACEHOLDER}'
),
gcs_source_uris=dataset_preprocessor_task.outputs[
'batch_prediction_storage_source'
],
instances_format=batch_predict_instances_format,
predictions_format=batch_predict_predictions_format,
gcs_destination_output_uri_prefix=batch_predict_gcs_destination_output_uri,
machine_type=batch_predict_machine_type,
starting_replica_count=batch_predict_starting_replica_count,
max_replica_count=batch_predict_max_replica_count,
encryption_spec_key_name=encryption_spec_key_name,
accelerator_type=batch_predict_accelerator_type,
accelerator_count=batch_predict_accelerator_count,
)
eval_task = ModelEvaluationClassificationOp(
project=project,
location=location,
target_field_name='ground_truth',
ground_truth_format='jsonl',
ground_truth_gcs_source=dataset_preprocessor_task.outputs[
'model_evaluation_storage_source'
],
predictions_format='jsonl',
predictions_gcs_source=batch_predict_task.outputs[
'gcs_output_directory'
],
model=get_model_task.outputs['model'],
dataflow_machine_type=dataflow_machine_type,
dataflow_max_workers_num=dataflow_max_num_workers,
dataflow_disk_size_gb=dataflow_disk_size_gb,
dataflow_service_account=dataflow_service_account,
dataflow_subnetwork=dataflow_subnetwork,
dataflow_use_public_ips=dataflow_use_public_ips,
encryption_spec_key_name=encryption_spec_key_name,
force_runner_mode=force_runner_mode,
prediction_score_column='',
prediction_label_column='',
)
evaluated_annotation_task = EvaluatedAnnotationOp(
project=project,
location=location,
predictions_storage_source=batch_predict_task.outputs[
'gcs_output_directory'
],
ground_truth_storage_source=dataset_preprocessor_task.outputs[
'test_data_items_storage_source'
],
dataflow_machine_type=dataflow_machine_type,
dataflow_max_workers_num=dataflow_max_num_workers,
dataflow_disk_size_gb=dataflow_disk_size_gb,
dataflow_service_account=dataflow_service_account,
dataflow_subnetwork=dataflow_subnetwork,
dataflow_use_public_ips=dataflow_use_public_ips,
encryption_spec_key_name=encryption_spec_key_name,
)
feature_extractor_task = FeatureExtractorOp(
project=project,
location=location,
root_dir=batch_predict_gcs_destination_output_uri,
test_dataset=get_test_dataset_task.outputs['dataset'],
training_dataset=get_training_dataset_task.outputs['dataset'],
preprocessed_test_dataset_storage_source=dataset_preprocessor_task.outputs[
'test_data_items_storage_source'
],
preprocessed_training_dataset_storage_source=dataset_preprocessor_task.outputs[
'training_data_items_storage_source'
],
feature_extractor_machine_type=batch_predict_machine_type,
encryption_spec_key_name=encryption_spec_key_name,
)
error_analysis_task = ErrorAnalysisAnnotationOp(
project=project,
location=location,
root_dir=batch_predict_gcs_destination_output_uri,
embeddings_dir=feature_extractor_task.outputs['embeddings_dir'],
)
model_evaluation_importer_task = ModelImportEvaluationOp(
classification_metrics=eval_task.outputs['evaluation_metrics'],
model=get_model_task.outputs['model'],
dataset_type=batch_predict_instances_format,
dataset_paths=dataset_preprocessor_task.outputs[
'batch_prediction_storage_source'
],
display_name=evaluation_display_name,
)
ModelImportEvaluatedAnnotationOp(
model=get_model_task.outputs['model'],
evaluated_annotation_output_uri=evaluated_annotation_task.outputs[
'evaluated_annotation_output_uri'
],
evaluation_importer_gcp_resources=model_evaluation_importer_task.outputs[
'gcp_resources'
],
error_analysis_output_uri=error_analysis_task.outputs[
'error_analysis_output_uri'
],
)
with dsl.Condition(
((
test_dataset_resource_name == ''
and training_dataset_resource_name == ''
and test_dataset_annotation_set_name == ''
and training_dataset_annotation_set_name == ''
)),
name='CustomDataset',
):
dataset_preprocessor_task = DatasetPreprocessorOp(
project=project,
location=location,
test_dataset_storage_source_uris=test_dataset_storage_source_uris,
training_dataset_storage_source_uris=training_dataset_storage_source_uris,
)
get_model_task = GetVertexModelOp(model_name=model_name)
batch_predict_task = ModelBatchPredictOp(
project=project,
location=location,
model=get_model_task.outputs['model'],
job_display_name='sdk-batch-predict-evaluation',
gcs_source_uris=dataset_preprocessor_task.outputs[
'batch_prediction_storage_source'
],
instances_format=batch_predict_instances_format,
predictions_format=batch_predict_predictions_format,
gcs_destination_output_uri_prefix=batch_predict_gcs_destination_output_uri,
machine_type=batch_predict_machine_type,
starting_replica_count=batch_predict_starting_replica_count,
max_replica_count=batch_predict_max_replica_count,
encryption_spec_key_name=encryption_spec_key_name,
accelerator_type=batch_predict_accelerator_type,
accelerator_count=batch_predict_accelerator_count,
)
eval_task = ModelEvaluationClassificationOp(
project=project,
location=location,
target_field_name='ground_truth',
ground_truth_format='jsonl',
ground_truth_gcs_source=dataset_preprocessor_task.outputs[
'model_evaluation_storage_source'
],
predictions_format='jsonl',
predictions_gcs_source=batch_predict_task.outputs[
'gcs_output_directory'
],
model=get_model_task.outputs['model'],
dataflow_machine_type=dataflow_machine_type,
dataflow_max_workers_num=dataflow_max_num_workers,
dataflow_disk_size_gb=dataflow_disk_size_gb,
dataflow_service_account=dataflow_service_account,
dataflow_subnetwork=dataflow_subnetwork,
dataflow_use_public_ips=dataflow_use_public_ips,
encryption_spec_key_name=encryption_spec_key_name,
force_runner_mode=force_runner_mode,
prediction_score_column='',
prediction_label_column='',
)
evaluated_annotation_task = EvaluatedAnnotationOp(
project=project,
location=location,
predictions_storage_source=batch_predict_task.outputs[
'gcs_output_directory'
],
ground_truth_storage_source=dataset_preprocessor_task.outputs[
'test_data_items_storage_source'
],
dataflow_machine_type=dataflow_machine_type,
dataflow_max_workers_num=dataflow_max_num_workers,
dataflow_disk_size_gb=dataflow_disk_size_gb,
dataflow_service_account=dataflow_service_account,
dataflow_subnetwork=dataflow_subnetwork,
dataflow_use_public_ips=dataflow_use_public_ips,
encryption_spec_key_name=encryption_spec_key_name,
)
feature_extractor_task = FeatureExtractorOp(
project=project,
location=location,
root_dir=batch_predict_gcs_destination_output_uri,
preprocessed_test_dataset_storage_source=dataset_preprocessor_task.outputs[
'test_data_items_storage_source'
],
preprocessed_training_dataset_storage_source=dataset_preprocessor_task.outputs[
'training_data_items_storage_source'
],
feature_extractor_machine_type=batch_predict_machine_type,
encryption_spec_key_name=encryption_spec_key_name,
)
error_analysis_task = ErrorAnalysisAnnotationOp(
project=project,
location=location,
root_dir=batch_predict_gcs_destination_output_uri,
embeddings_dir=feature_extractor_task.outputs['embeddings_dir'],
)
model_evaluation_importer_task = ModelImportEvaluationOp(
classification_metrics=eval_task.outputs['evaluation_metrics'],
model=get_model_task.outputs['model'],
dataset_type=batch_predict_instances_format,
dataset_paths=dataset_preprocessor_task.outputs[
'batch_prediction_storage_source'
],
display_name=evaluation_display_name,
)
ModelImportEvaluatedAnnotationOp(
model=get_model_task.outputs['model'],
evaluated_annotation_output_uri=evaluated_annotation_task.outputs[
'evaluated_annotation_output_uri'
],
evaluation_importer_gcp_resources=model_evaluation_importer_task.outputs[
'gcp_resources'
],
error_analysis_output_uri=error_analysis_task.outputs[
'error_analysis_output_uri'
],
)
| 850 |
0 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/model_evaluation/evaluation_automl_tabular_feature_attribution_pipeline.py | # Copyright 2024 The Kubeflow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Dict, List, NamedTuple
from google_cloud_pipeline_components import _placeholders
from google_cloud_pipeline_components._implementation.model import GetVertexModelOp
from google_cloud_pipeline_components._implementation.model_evaluation import FeatureAttributionGraphComponentOp
from google_cloud_pipeline_components.preview.model_evaluation.model_evaluation_import_component import model_evaluation_import as ModelImportEvaluationOp
from google_cloud_pipeline_components.types.artifact_types import ClassificationMetrics
from google_cloud_pipeline_components.types.artifact_types import RegressionMetrics
from google_cloud_pipeline_components.v1.batch_predict_job import ModelBatchPredictOp
from google_cloud_pipeline_components.v1.model_evaluation.classification_component import model_evaluation_classification as ModelEvaluationClassificationOp
from google_cloud_pipeline_components.v1.model_evaluation.regression_component import model_evaluation_regression as ModelEvaluationRegressionOp
import kfp
@kfp.dsl.pipeline(
name='evaluation-automl-tabular-feature-attribution-classification-pipeline'
)
def evaluation_automl_tabular_feature_attribution_classification_pipeline( # pylint: disable=dangerous-default-value
location: str,
model_name: str,
target_field_name: str,
batch_predict_instances_format: str,
batch_predict_gcs_destination_output_uri: str,
batch_predict_gcs_source_uris: List[str] = [], # pylint: disable=g-bare-generic
batch_predict_bigquery_source_uri: str = '',
batch_predict_predictions_format: str = 'jsonl',
batch_predict_bigquery_destination_output_uri: str = '',
batch_predict_machine_type: str = 'n1-standard-16',
batch_predict_starting_replica_count: int = 5,
batch_predict_max_replica_count: int = 10,
batch_predict_explanation_metadata: Dict[str, Any] = {}, # pylint: disable=g-bare-generic
batch_predict_explanation_parameters: Dict[str, Any] = {}, # pylint: disable=g-bare-generic
batch_predict_explanation_data_sample_size: int = 10000,
batch_predict_accelerator_type: str = '',
batch_predict_accelerator_count: int = 0,
slicing_specs: List[Any] = [], # pylint: disable=g-bare-generic
evaluation_display_name: str = 'evaluation-automl-tabular-feature-attribution-pipeline-{{$.pipeline_job_uuid}}',
dataflow_machine_type: str = 'n1-standard-4',
dataflow_max_num_workers: int = 5,
dataflow_disk_size_gb: int = 50,
dataflow_service_account: str = '',
dataflow_subnetwork: str = '',
dataflow_use_public_ips: bool = True,
encryption_spec_key_name: str = '',
force_runner_mode: str = '',
project: str = _placeholders.PROJECT_ID_PLACEHOLDER,
) -> NamedTuple(
'outputs',
evaluation_metrics=ClassificationMetrics,
evaluation_resource_name=str,
):
# fmt: off
"""The evaluation AutoML tabular pipeline with feature attribution for classification models.
This pipeline guarantees support for AutoML Tabular models that contain a valid explanation_spec. This pipeline does not include the target_field_data_remover component, which is needed for many tabular custom models.
Args:
location: The GCP region that runs the pipeline components.
model_name: The Vertex model resource name to be imported and used for batch prediction.
target_field_name: The target field's name. Formatted to be able to find nested columns, delimited by `.`. Prefixed with 'instance.' on the component for Vertex Batch Prediction.
batch_predict_instances_format: The format in which instances are given, must be one of the Model's supportedInputStorageFormats. For more details about this input config, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#InputConfig.
batch_predict_gcs_destination_output_uri: The Google Cloud Storage location of the directory where the output is to be written to. In the given directory a new directory is created. Its name is `prediction-<model-display-name>-<job-create-time>`, where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format. Inside of it files `predictions_0001.<extension>`, `predictions_0002.<extension>`, ..., `predictions_N.<extension>` are created where `<extension>` depends on chosen `predictions_format`, and N may equal 0001 and depends on the total number of successfully predicted instances. If the Model has both `instance` and `prediction` schemata defined then each such file contains predictions as per the `predictions_format`. If prediction for any instance failed (partially or completely), then an additional `errors_0001.<extension>`, `errors_0002.<extension>`,..., `errors_N.<extension>` files are created (N depends on total number of failed predictions). These files contain the failed instances, as per their schema, followed by an additional `error` field which as value has `google.rpc.Status` containing only `code` and `message` fields. For more details about this output config, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#OutputConfig.
batch_predict_gcs_source_uris: Google Cloud Storage URI(-s) to your instances to run batch prediction on. May contain wildcards. For more information on wildcards, see https://cloud.google.com/storage/docs/gsutil/addlhelp/WildcardNames. For more details about this input config, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#InputConfig.
batch_predict_bigquery_source_uri: Google BigQuery URI to your instances to run batch prediction on. May contain wildcards. For more details about this input config, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#InputConfig.
batch_predict_predictions_format: The format in which Vertex AI gives the predictions. Must be one of the Model's supportedOutputStorageFormats. For more details about this output config, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#OutputConfig.
batch_predict_bigquery_destination_output_uri: The BigQuery project location where the output is to be written to. In the given project a new dataset is created with name `prediction_<model-display-name>_<job-create-time>` where is made BigQuery-dataset-name compatible (for example, most special characters become underscores), and timestamp is in YYYY_MM_DDThh_mm_ss_sssZ "based on ISO-8601" format. In the dataset two tables will be created, `predictions`, and `errors`. If the Model has both `instance` and `prediction` schemata defined then the tables have columns as follows: The `predictions` table contains instances for which the prediction succeeded, it has columns as per a concatenation of the Model's instance and prediction schemata. The `errors` table contains rows for which the prediction has failed, it has instance columns, as per the instance schema, followed by a single "errors" column, which as values has `google.rpc.Status` represented as a STRUCT, and containing only `code` and `message`. For more details about this output config, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#OutputConfig.
batch_predict_machine_type: The type of machine for running batch prediction on dedicated resources. If the Model supports DEDICATED_RESOURCES this config may be provided (and the job will use these resources). If the Model doesn't support AUTOMATIC_RESOURCES, this config must be provided. For more details about the BatchDedicatedResources, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#BatchDedicatedResources. For more details about the machine spec, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/MachineSpec
batch_predict_starting_replica_count: The number of machine replicas used at the start of the batch operation. If not set, Vertex AI decides starting number, not greater than `max_replica_count`. Only used if `machine_type` is set.
batch_predict_max_replica_count: The maximum number of machine replicas the batch operation may be scaled to. Only used if `machine_type` is set.
batch_predict_explanation_metadata: Explanation metadata configuration for this BatchPredictionJob. Can be specified only if `generate_explanation` is set to `True`. This value overrides the value of `Model.explanation_metadata`. All fields of `explanation_metadata` are optional in the request. If a field of the `explanation_metadata` object is not populated, the corresponding field of the `Model.explanation_metadata` object is inherited. For more details, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/ExplanationSpec#explanationmetadata.
batch_predict_explanation_parameters: Parameters to configure explaining for Model's predictions. Can be specified only if `generate_explanation` is set to `True`. This value overrides the value of `Model.explanation_parameters`. All fields of `explanation_parameters` are optional in the request. If a field of the `explanation_parameters` object is not populated, the corresponding field of the `Model.explanation_parameters` object is inherited. For more details, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/ExplanationSpec#ExplanationParameters.
batch_predict_explanation_data_sample_size: Desired size to downsample the input dataset that will then be used for batch explanation.
batch_predict_accelerator_type: The type of accelerator(s) that may be attached to the machine as per `batch_predict_accelerator_count`. Only used if `batch_predict_machine_type` is set. For more details about the machine spec, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/MachineSpec
batch_predict_accelerator_count: The number of accelerators to attach to the `batch_predict_machine_type`. Only used if `batch_predict_machine_type` is set.
slicing_specs: List of `google.cloud.aiplatform_v1.types.ModelEvaluationSlice.SlicingSpec`. When provided, compute metrics for each defined slice. See sample code in https://cloud.google.com/vertex-ai/docs/pipelines/model-evaluation-component For more details on configuring slices, see https://cloud.google.com/python/docs/reference/aiplatform/latest/google.cloud.aiplatform_v1.types.ModelEvaluationSlice.
dataflow_machine_type: The Dataflow machine type for evaluation components.
dataflow_max_num_workers: The max number of Dataflow workers for evaluation components.
dataflow_disk_size_gb: Dataflow worker's disk size in GB for evaluation components.
dataflow_service_account: Custom service account to run Dataflow jobs.
dataflow_subnetwork: Dataflow's fully qualified subnetwork name, when empty the default subnetwork will be used. Example: https://cloud.google.com/dataflow/docs/guides/specifying-networks#example_network_and_subnetwork_specifications
dataflow_use_public_ips: Specifies whether Dataflow workers use public IP addresses.
encryption_spec_key_name: Customer-managed encryption key options. If set, resources created by this pipeline will be encrypted with the provided encryption key. Has the form: `projects/my-project/locations/my-location/keyRings/my-kr/cryptoKeys/my-key`. The key needs to be in the same region as where the compute resource is created.
evaluation_display_name: The display name of the uploaded evaluation resource to the Vertex AI model.
force_runner_mode: Indicate the runner mode to use forcely. Valid options are `Dataflow` and `DirectRunner`.
project: The GCP project that runs the pipeline components. Defaults to the project in which the PipelineJob is run.
Returns:
A google.ClassificationMetrics artifact.
"""
# fmt: on
outputs = NamedTuple(
'outputs',
evaluation_metrics=ClassificationMetrics,
evaluation_resource_name=str,
)
get_model_task = GetVertexModelOp(model_name=model_name)
# Run Batch Prediction.
batch_predict_task = ModelBatchPredictOp(
project=project,
location=location,
model=get_model_task.outputs['model'],
job_display_name='model-registry-batch-predict-evaluation-{{$.pipeline_job_uuid}}-{{$.pipeline_task_uuid}}',
gcs_source_uris=batch_predict_gcs_source_uris,
bigquery_source_input_uri=batch_predict_bigquery_source_uri,
instances_format=batch_predict_instances_format,
predictions_format=batch_predict_predictions_format,
gcs_destination_output_uri_prefix=batch_predict_gcs_destination_output_uri,
bigquery_destination_output_uri=batch_predict_bigquery_destination_output_uri,
machine_type=batch_predict_machine_type,
starting_replica_count=batch_predict_starting_replica_count,
max_replica_count=batch_predict_max_replica_count,
encryption_spec_key_name=encryption_spec_key_name,
accelerator_type=batch_predict_accelerator_type,
accelerator_count=batch_predict_accelerator_count,
)
# Run feature attribution steps.
feature_attribution_graph = FeatureAttributionGraphComponentOp(
project=project,
location=location,
prediction_type='classification',
vertex_model=get_model_task.outputs['model'],
batch_predict_instances_format=batch_predict_instances_format,
batch_predict_gcs_destination_output_uri=batch_predict_gcs_destination_output_uri,
batch_predict_gcs_source_uris=batch_predict_gcs_source_uris,
batch_predict_bigquery_source_uri=batch_predict_bigquery_source_uri,
batch_predict_predictions_format=batch_predict_predictions_format,
batch_predict_bigquery_destination_output_uri=batch_predict_bigquery_destination_output_uri,
batch_predict_machine_type=batch_predict_machine_type,
batch_predict_starting_replica_count=batch_predict_starting_replica_count,
batch_predict_max_replica_count=batch_predict_max_replica_count,
batch_predict_explanation_metadata=batch_predict_explanation_metadata,
batch_predict_explanation_parameters=batch_predict_explanation_parameters,
batch_predict_explanation_data_sample_size=batch_predict_explanation_data_sample_size,
batch_predict_accelerator_type=batch_predict_accelerator_type,
batch_predict_accelerator_count=batch_predict_accelerator_count,
dataflow_machine_type=dataflow_machine_type,
dataflow_max_num_workers=dataflow_max_num_workers,
dataflow_disk_size_gb=dataflow_disk_size_gb,
dataflow_service_account=dataflow_service_account,
dataflow_subnetwork=dataflow_subnetwork,
dataflow_use_public_ips=dataflow_use_public_ips,
encryption_spec_key_name=encryption_spec_key_name,
force_runner_mode=force_runner_mode,
)
# Run evaluation for a classification model.
eval_task = ModelEvaluationClassificationOp(
project=project,
location=location,
target_field_name=target_field_name,
predictions_format=batch_predict_predictions_format,
predictions_gcs_source=batch_predict_task.outputs['gcs_output_directory'],
predictions_bigquery_source=batch_predict_task.outputs[
'bigquery_output_table'
],
dataflow_machine_type=dataflow_machine_type,
dataflow_max_workers_num=dataflow_max_num_workers,
dataflow_disk_size_gb=dataflow_disk_size_gb,
dataflow_service_account=dataflow_service_account,
dataflow_subnetwork=dataflow_subnetwork,
dataflow_use_public_ips=dataflow_use_public_ips,
encryption_spec_key_name=encryption_spec_key_name,
force_runner_mode=force_runner_mode,
model=get_model_task.outputs['model'],
slicing_specs=slicing_specs,
)
# Import the evaluation result to Vertex AI.
import_evaluation_task = ModelImportEvaluationOp(
classification_metrics=eval_task.outputs['evaluation_metrics'],
feature_attributions=feature_attribution_graph.outputs[
'feature_attributions'
],
model=get_model_task.outputs['model'],
dataset_type=batch_predict_instances_format,
dataset_path=batch_predict_bigquery_source_uri,
dataset_paths=batch_predict_gcs_source_uris,
display_name=evaluation_display_name,
)
return outputs(
evaluation_metrics=eval_task.outputs['evaluation_metrics'],
evaluation_resource_name=import_evaluation_task.outputs[
'evaluation_resource_name'
],
)
@kfp.dsl.pipeline(
name='evaluation-automl-tabular-feature-attribution-regression-pipeline'
)
def evaluation_automl_tabular_feature_attribution_regression_pipeline( # pylint: disable=dangerous-default-value
project: str,
location: str,
model_name: str,
target_field_name: str,
batch_predict_instances_format: str,
batch_predict_gcs_destination_output_uri: str,
batch_predict_gcs_source_uris: List[str] = [], # pylint: disable=g-bare-generic
batch_predict_bigquery_source_uri: str = '',
batch_predict_predictions_format: str = 'jsonl',
batch_predict_bigquery_destination_output_uri: str = '',
batch_predict_machine_type: str = 'n1-standard-16',
batch_predict_starting_replica_count: int = 5,
batch_predict_max_replica_count: int = 10,
batch_predict_explanation_metadata: Dict[str, Any] = {}, # pylint: disable=g-bare-generic
batch_predict_explanation_parameters: Dict[str, Any] = {}, # pylint: disable=g-bare-generic
batch_predict_explanation_data_sample_size: int = 10000,
batch_predict_accelerator_type: str = '',
batch_predict_accelerator_count: int = 0,
dataflow_machine_type: str = 'n1-standard-4',
dataflow_max_num_workers: int = 5,
dataflow_disk_size_gb: int = 50,
dataflow_service_account: str = '',
dataflow_subnetwork: str = '',
dataflow_use_public_ips: bool = True,
encryption_spec_key_name: str = '',
evaluation_display_name: str = 'evaluation-automl-tabular-feature-attribution-pipeline-{{$.pipeline_job_uuid}}',
force_runner_mode: str = '',
) -> NamedTuple(
'outputs',
evaluation_metrics=RegressionMetrics,
evaluation_resource_name=str,
):
# fmt: off
"""The evaluation AutoML tabular pipeline with feature attribution for regression models.
This pipeline guarantees support for AutoML Tabular models that contain a valid explanation_spec. This pipeline does not include the target_field_data_remover component, which is needed for many tabular custom models.
Args:
project: The GCP project that runs the pipeline components.
location: The GCP region that runs the pipeline components.
model_name: The Vertex model resource name to be imported and used for batch prediction.
target_field_name: The target field's name. Formatted to be able to find nested columns, delimited by `.`. Prefixed with 'instance.' on the component for Vertex Batch Prediction.
batch_predict_instances_format: The format in which instances are given, must be one of the Model's supportedInputStorageFormats. For more details about this input config, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#InputConfig.
batch_predict_gcs_destination_output_uri: The Google Cloud Storage location of the directory where the output is to be written to. In the given directory a new directory is created. Its name is `prediction-<model-display-name>-<job-create-time>`, where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format. Inside of it files `predictions_0001.<extension>`, `predictions_0002.<extension>`, ..., `predictions_N.<extension>` are created where `<extension>` depends on chosen `predictions_format`, and N may equal 0001 and depends on the total number of successfully predicted instances. If the Model has both `instance` and `prediction` schemata defined then each such file contains predictions as per the `predictions_format`. If prediction for any instance failed (partially or completely), then an additional `errors_0001.<extension>`, `errors_0002.<extension>`,..., `errors_N.<extension>` files are created (N depends on total number of failed predictions). These files contain the failed instances, as per their schema, followed by an additional `error` field which as value has `google.rpc.Status` containing only `code` and `message` fields. For more details about this output config, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#OutputConfig.
batch_predict_gcs_source_uris: Google Cloud Storage URI(-s) to your instances to run batch prediction on. May contain wildcards. For more information on wildcards, see https://cloud.google.com/storage/docs/gsutil/addlhelp/WildcardNames. For more details about this input config, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#InputConfig.
batch_predict_bigquery_source_uri: Google BigQuery URI to your instances to run batch prediction on. May contain wildcards. For more details about this input config, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#InputConfig.
batch_predict_predictions_format: The format in which Vertex AI gives the predictions. Must be one of the Model's supportedOutputStorageFormats. For more details about this output config, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#OutputConfig.
batch_predict_bigquery_destination_output_uri: The BigQuery project location where the output is to be written to. In the given project a new dataset is created with name `prediction_<model-display-name>_<job-create-time>` where is made BigQuery-dataset-name compatible (for example, most special characters become underscores), and timestamp is in YYYY_MM_DDThh_mm_ss_sssZ "based on ISO-8601" format. In the dataset two tables will be created, `predictions`, and `errors`. If the Model has both `instance` and `prediction` schemata defined then the tables have columns as follows: The `predictions` table contains instances for which the prediction succeeded, it has columns as per a concatenation of the Model's instance and prediction schemata. The `errors` table contains rows for which the prediction has failed, it has instance columns, as per the instance schema, followed by a single "errors" column, which as values has `google.rpc.Status` represented as a STRUCT, and containing only `code` and `message`. For more details about this output config, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#OutputConfig.
batch_predict_machine_type: The type of machine for running batch prediction on dedicated resources. If the Model supports DEDICATED_RESOURCES this config may be provided (and the job will use these resources). If the Model doesn't support AUTOMATIC_RESOURCES, this config must be provided. For more details about the BatchDedicatedResources, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#BatchDedicatedResources. For more details about the machine spec, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/MachineSpec
batch_predict_starting_replica_count: The number of machine replicas used at the start of the batch operation. If not set, Vertex AI decides starting number, not greater than `max_replica_count`. Only used if `machine_type` is set.
batch_predict_max_replica_count: The maximum number of machine replicas the batch operation may be scaled to. Only used if `machine_type` is set.
batch_predict_explanation_metadata: Explanation metadata configuration for this BatchPredictionJob. Can be specified only if `generate_explanation` is set to `True`. This value overrides the value of `Model.explanation_metadata`. All fields of `explanation_metadata` are optional in the request. If a field of the `explanation_metadata` object is not populated, the corresponding field of the `Model.explanation_metadata` object is inherited. For more details, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/ExplanationSpec#explanationmetadata.
batch_predict_explanation_parameters: Parameters to configure explaining for Model's predictions. Can be specified only if `generate_explanation` is set to `True`. This value overrides the value of `Model.explanation_parameters`. All fields of `explanation_parameters` are optional in the request. If a field of the `explanation_parameters` object is not populated, the corresponding field of the `Model.explanation_parameters` object is inherited. For more details, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/ExplanationSpec#ExplanationParameters.
batch_predict_explanation_data_sample_size: Desired size to downsample the input dataset that will then be used for batch explanation.
batch_predict_accelerator_type: The type of accelerator(s) that may be attached to the machine as per `batch_predict_accelerator_count`. Only used if `batch_predict_machine_type` is set. For more details about the machine spec, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/MachineSpec
batch_predict_accelerator_count: The number of accelerators to attach to the `batch_predict_machine_type`. Only used if `batch_predict_machine_type` is set.
dataflow_machine_type: The Dataflow machine type for evaluation components.
dataflow_max_num_workers: The max number of Dataflow workers for evaluation components.
dataflow_disk_size_gb: Dataflow worker's disk size in GB for evaluation components.
dataflow_service_account: Custom service account to run Dataflow jobs.
dataflow_subnetwork: Dataflow's fully qualified subnetwork name, when empty the default subnetwork will be used. Example: https://cloud.google.com/dataflow/docs/guides/specifying-networks#example_network_and_subnetwork_specifications
dataflow_use_public_ips: Specifies whether Dataflow workers use public IP addresses.
encryption_spec_key_name: Customer-managed encryption key options. If set, resources created by this pipeline will be encrypted with the provided encryption key. Has the form: `projects/my-project/locations/my-location/keyRings/my-kr/cryptoKeys/my-key`. The key needs to be in the same region as where the compute resource is created.
evaluation_display_name: The display name of the uploaded evaluation resource to the Vertex AI model.
force_runner_mode: Indicate the runner mode to use forcely. Valid options are `Dataflow` and `DirectRunner`.
Returns: A google.RegressionMetrics artifact.
"""
# fmt: on
outputs = NamedTuple(
'outputs',
evaluation_metrics=RegressionMetrics,
evaluation_resource_name=str,
)
get_model_task = GetVertexModelOp(model_name=model_name)
# Run Batch Prediction.
batch_predict_task = ModelBatchPredictOp(
project=project,
location=location,
model=get_model_task.outputs['model'],
job_display_name='model-registry-batch-predict-evaluation-{{$.pipeline_job_uuid}}-{{$.pipeline_task_uuid}}',
gcs_source_uris=batch_predict_gcs_source_uris,
bigquery_source_input_uri=batch_predict_bigquery_source_uri,
instances_format=batch_predict_instances_format,
predictions_format=batch_predict_predictions_format,
gcs_destination_output_uri_prefix=batch_predict_gcs_destination_output_uri,
bigquery_destination_output_uri=batch_predict_bigquery_destination_output_uri,
machine_type=batch_predict_machine_type,
starting_replica_count=batch_predict_starting_replica_count,
max_replica_count=batch_predict_max_replica_count,
encryption_spec_key_name=encryption_spec_key_name,
accelerator_type=batch_predict_accelerator_type,
accelerator_count=batch_predict_accelerator_count,
)
# Run feature attribution steps.
feature_attribution_graph = FeatureAttributionGraphComponentOp(
project=project,
location=location,
prediction_type='regression',
vertex_model=get_model_task.outputs['model'],
batch_predict_instances_format=batch_predict_instances_format,
batch_predict_gcs_destination_output_uri=batch_predict_gcs_destination_output_uri,
batch_predict_gcs_source_uris=batch_predict_gcs_source_uris,
batch_predict_bigquery_source_uri=batch_predict_bigquery_source_uri,
batch_predict_predictions_format=batch_predict_predictions_format,
batch_predict_bigquery_destination_output_uri=batch_predict_bigquery_destination_output_uri,
batch_predict_machine_type=batch_predict_machine_type,
batch_predict_starting_replica_count=batch_predict_starting_replica_count,
batch_predict_max_replica_count=batch_predict_max_replica_count,
batch_predict_explanation_metadata=batch_predict_explanation_metadata,
batch_predict_explanation_parameters=batch_predict_explanation_parameters,
batch_predict_explanation_data_sample_size=batch_predict_explanation_data_sample_size,
batch_predict_accelerator_type=batch_predict_accelerator_type,
batch_predict_accelerator_count=batch_predict_accelerator_count,
dataflow_machine_type=dataflow_machine_type,
dataflow_max_num_workers=dataflow_max_num_workers,
dataflow_disk_size_gb=dataflow_disk_size_gb,
dataflow_service_account=dataflow_service_account,
dataflow_subnetwork=dataflow_subnetwork,
dataflow_use_public_ips=dataflow_use_public_ips,
encryption_spec_key_name=encryption_spec_key_name,
force_runner_mode=force_runner_mode,
)
# Run evaluation for a regression model.
eval_task = ModelEvaluationRegressionOp(
project=project,
location=location,
target_field_name=target_field_name,
predictions_format=batch_predict_predictions_format,
predictions_gcs_source=batch_predict_task.outputs['gcs_output_directory'],
predictions_bigquery_source=batch_predict_task.outputs[
'bigquery_output_table'
],
dataflow_machine_type=dataflow_machine_type,
dataflow_max_workers_num=dataflow_max_num_workers,
dataflow_disk_size_gb=dataflow_disk_size_gb,
dataflow_service_account=dataflow_service_account,
dataflow_subnetwork=dataflow_subnetwork,
dataflow_use_public_ips=dataflow_use_public_ips,
encryption_spec_key_name=encryption_spec_key_name,
force_runner_mode=force_runner_mode,
model=get_model_task.outputs['model'],
)
# Import the evaluation result to Vertex AI.
import_evaluation_task = ModelImportEvaluationOp(
regression_metrics=eval_task.outputs['evaluation_metrics'],
feature_attributions=feature_attribution_graph.outputs[
'feature_attributions'
],
model=get_model_task.outputs['model'],
dataset_type=batch_predict_instances_format,
dataset_path=batch_predict_bigquery_source_uri,
dataset_paths=batch_predict_gcs_source_uris,
display_name=evaluation_display_name,
)
return outputs(
evaluation_metrics=eval_task.outputs['evaluation_metrics'],
evaluation_resource_name=import_evaluation_task.outputs[
'evaluation_resource_name'
],
)
@kfp.dsl.pipeline(name='evaluation-automl-tabular-feature-attribution-pipeline')
def evaluation_automl_tabular_feature_attribution_pipeline( # pylint: disable=dangerous-default-value
project: str,
location: str,
prediction_type: str,
model_name: str,
target_field_name: str,
batch_predict_instances_format: str,
batch_predict_gcs_destination_output_uri: str,
batch_predict_gcs_source_uris: List[str] = [], # pylint: disable=g-bare-generic
batch_predict_bigquery_source_uri: str = '',
batch_predict_predictions_format: str = 'jsonl',
batch_predict_bigquery_destination_output_uri: str = '',
batch_predict_machine_type: str = 'n1-standard-16',
batch_predict_starting_replica_count: int = 5,
batch_predict_max_replica_count: int = 10,
batch_predict_explanation_metadata: Dict[str, Any] = {}, # pylint: disable=g-bare-generic
batch_predict_explanation_parameters: Dict[str, Any] = {}, # pylint: disable=g-bare-generic
batch_predict_explanation_data_sample_size: int = 10000,
batch_predict_accelerator_type: str = '',
batch_predict_accelerator_count: int = 0,
slicing_specs: List[Any] = [], # pylint: disable=g-bare-generic
dataflow_machine_type: str = 'n1-standard-4',
dataflow_max_num_workers: int = 5,
dataflow_disk_size_gb: int = 50,
dataflow_service_account: str = '',
dataflow_subnetwork: str = '',
dataflow_use_public_ips: bool = True,
encryption_spec_key_name: str = '',
evaluation_display_name: str = 'evaluation-automl-tabular-feature-attribution-pipeline-{{$.pipeline_job_uuid}}',
force_runner_mode: str = '',
):
# fmt: off
"""The evaluation AutoML tabular pipeline with feature attribution.
This pipeline guarantees support for AutoML Tabular classification and
regression models that contain a valid explanation_spec. This pipeline does
not include the target_field_data_remover component, which is needed for many
tabular custom models.
Args:
project: The GCP project that runs the pipeline components.
location: The GCP region that runs the pipeline components.
prediction_type: The type of prediction the model is to produce. "classification" or "regression".
model_name: The Vertex model resource name to be imported and used for batch prediction.
target_field_name: The target field's name. Formatted to be able to find nested columns, delimited by `.`. Prefixed with 'instance.' on the component for Vertex Batch Prediction.
batch_predict_instances_format: The format in which instances are given, must be one of the Model's supportedInputStorageFormats. For more details about this input config, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#InputConfig.
batch_predict_gcs_destination_output_uri: The Google Cloud Storage location of the directory where the output is to be written to. In the given directory a new directory is created. Its name is `prediction-<model-display-name>-<job-create-time>`, where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format. Inside of it files `predictions_0001.<extension>`, `predictions_0002.<extension>`, ..., `predictions_N.<extension>` are created where `<extension>` depends on chosen `predictions_format`, and N may equal 0001 and depends on the total number of successfully predicted instances. If the Model has both `instance` and `prediction` schemata defined then each such file contains predictions as per the `predictions_format`. If prediction for any instance failed (partially or completely), then an additional `errors_0001.<extension>`, `errors_0002.<extension>`,..., `errors_N.<extension>` files are created (N depends on total number of failed predictions). These files contain the failed instances, as per their schema, followed by an additional `error` field which as value has `google.rpc.Status` containing only `code` and `message` fields. For more details about this output config, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#OutputConfig.
batch_predict_gcs_source_uris: Google Cloud Storage URI(-s) to your instances to run batch prediction on. May contain wildcards. For more information on wildcards, see https://cloud.google.com/storage/docs/gsutil/addlhelp/WildcardNames. For more details about this input config, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#InputConfig.
batch_predict_bigquery_source_uri: Google BigQuery URI to your instances to run batch prediction on. May contain wildcards. For more details about this input config, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#InputConfig.
batch_predict_predictions_format: The format in which Vertex AI gives the predictions. Must be one of the Model's supportedOutputStorageFormats. For more details about this output config, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#OutputConfig.
batch_predict_bigquery_destination_output_uri: The BigQuery project location where the output is to be written to. In the given project a new dataset is created with name `prediction_<model-display-name>_<job-create-time>` where is made BigQuery-dataset-name compatible (for example, most special characters become underscores), and timestamp is in YYYY_MM_DDThh_mm_ss_sssZ "based on ISO-8601" format. In the dataset two tables will be created, `predictions`, and `errors`. If the Model has both `instance` and `prediction` schemata defined then the tables have columns as follows: The `predictions` table contains instances for which the prediction succeeded, it has columns as per a concatenation of the Model's instance and prediction schemata. The `errors` table contains rows for which the prediction has failed, it has instance columns, as per the instance schema, followed by a single "errors" column, which as values has `google.rpc.Status` represented as a STRUCT, and containing only `code` and `message`. For more details about this output config, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#OutputConfig.
batch_predict_machine_type: The type of machine for running batch prediction on dedicated resources. If the Model supports DEDICATED_RESOURCES this config may be provided (and the job will use these resources). If the Model doesn't support AUTOMATIC_RESOURCES, this config must be provided. For more details about the BatchDedicatedResources, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#BatchDedicatedResources. For more details about the machine spec, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/MachineSpec
batch_predict_starting_replica_count: The number of machine replicas used at the start of the batch operation. If not set, Vertex AI decides starting number, not greater than `max_replica_count`. Only used if `machine_type` is set.
batch_predict_max_replica_count: The maximum number of machine replicas the batch operation may be scaled to. Only used if `machine_type` is set.
batch_predict_explanation_metadata: Explanation metadata configuration for this BatchPredictionJob. Can be specified only if `generate_explanation` is set to `True`. This value overrides the value of `Model.explanation_metadata`. All fields of `explanation_metadata` are optional in the request. If a field of the `explanation_metadata` object is not populated, the corresponding field of the `Model.explanation_metadata` object is inherited. For more details, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/ExplanationSpec#explanationmetadata.
batch_predict_explanation_parameters: Parameters to configure explaining for Model's predictions. Can be specified only if `generate_explanation` is set to `True`. This value overrides the value of `Model.explanation_parameters`. All fields of `explanation_parameters` are optional in the request. If a field of the `explanation_parameters` object is not populated, the corresponding field of the `Model.explanation_parameters` object is inherited. For more details, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/ExplanationSpec#ExplanationParameters.
batch_predict_explanation_data_sample_size: Desired size to downsample the input dataset that will then be used for batch explanation.
batch_predict_accelerator_type: The type of accelerator(s) that may be attached to the machine as per `batch_predict_accelerator_count`. Only used if `batch_predict_machine_type` is set. For more details about the machine spec, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/MachineSpec
batch_predict_accelerator_count: The number of accelerators to attach to the `batch_predict_machine_type`. Only used if `batch_predict_machine_type` is set.
slicing_specs: List of `google.cloud.aiplatform_v1.types.ModelEvaluationSlice.SlicingSpec`. When provided, compute metrics for each defined slice. See sample code in https://cloud.google.com/vertex-ai/docs/pipelines/model-evaluation-component For more details on configuring slices, see https://cloud.google.com/python/docs/reference/aiplatform/latest/google.cloud.aiplatform_v1.types.ModelEvaluationSlice.
dataflow_machine_type: The Dataflow machine type for evaluation components.
dataflow_max_num_workers: The max number of Dataflow workers for evaluation components.
dataflow_disk_size_gb: Dataflow worker's disk size in GB for evaluation components.
dataflow_service_account: Custom service account to run Dataflow jobs.
dataflow_subnetwork: Dataflow's fully qualified subnetwork name, when empty the default subnetwork will be used. Example: https://cloud.google.com/dataflow/docs/guides/specifying-networks#example_network_and_subnetwork_specifications
dataflow_use_public_ips: Specifies whether Dataflow workers use public IP addresses.
encryption_spec_key_name: Customer-managed encryption key options. If set, resources created by this pipeline will be encrypted with the provided encryption key. Has the form: `projects/my-project/locations/my-location/keyRings/my-kr/cryptoKeys/my-key`. The key needs to be in the same region as where the compute resource is created.
evaluation_display_name: The display name of the uploaded evaluation resource to the Vertex AI model.
force_runner_mode: Indicate the runner mode to use forcely. Valid options are `Dataflow` and `DirectRunner`.
"""
# fmt: on
with kfp.dsl.Condition(
prediction_type == 'classification', name='classification'
):
evaluation_automl_tabular_feature_attribution_classification_pipeline(
project=project,
location=location,
model_name=model_name,
target_field_name=target_field_name,
batch_predict_instances_format=batch_predict_instances_format,
batch_predict_gcs_destination_output_uri=batch_predict_gcs_destination_output_uri,
batch_predict_gcs_source_uris=batch_predict_gcs_source_uris,
batch_predict_bigquery_source_uri=batch_predict_bigquery_source_uri,
batch_predict_predictions_format=batch_predict_predictions_format,
batch_predict_bigquery_destination_output_uri=batch_predict_bigquery_destination_output_uri,
batch_predict_machine_type=batch_predict_machine_type,
batch_predict_starting_replica_count=batch_predict_starting_replica_count,
batch_predict_max_replica_count=batch_predict_max_replica_count,
batch_predict_explanation_metadata=batch_predict_explanation_metadata,
batch_predict_explanation_parameters=batch_predict_explanation_parameters,
batch_predict_explanation_data_sample_size=batch_predict_explanation_data_sample_size,
batch_predict_accelerator_type=batch_predict_accelerator_type,
batch_predict_accelerator_count=batch_predict_accelerator_count,
slicing_specs=slicing_specs,
dataflow_machine_type=dataflow_machine_type,
dataflow_max_num_workers=dataflow_max_num_workers,
dataflow_disk_size_gb=dataflow_disk_size_gb,
dataflow_service_account=dataflow_service_account,
dataflow_subnetwork=dataflow_subnetwork,
dataflow_use_public_ips=dataflow_use_public_ips,
encryption_spec_key_name=encryption_spec_key_name,
evaluation_display_name=evaluation_display_name,
force_runner_mode=force_runner_mode,
)
with kfp.dsl.Condition(prediction_type == 'regression', name='regression'):
evaluation_automl_tabular_feature_attribution_regression_pipeline(
project=project,
location=location,
model_name=model_name,
target_field_name=target_field_name,
batch_predict_instances_format=batch_predict_instances_format,
batch_predict_gcs_destination_output_uri=batch_predict_gcs_destination_output_uri,
batch_predict_gcs_source_uris=batch_predict_gcs_source_uris,
batch_predict_bigquery_source_uri=batch_predict_bigquery_source_uri,
batch_predict_predictions_format=batch_predict_predictions_format,
batch_predict_bigquery_destination_output_uri=batch_predict_bigquery_destination_output_uri,
batch_predict_machine_type=batch_predict_machine_type,
batch_predict_starting_replica_count=batch_predict_starting_replica_count,
batch_predict_max_replica_count=batch_predict_max_replica_count,
batch_predict_explanation_metadata=batch_predict_explanation_metadata,
batch_predict_explanation_parameters=batch_predict_explanation_parameters,
batch_predict_explanation_data_sample_size=batch_predict_explanation_data_sample_size,
batch_predict_accelerator_type=batch_predict_accelerator_type,
batch_predict_accelerator_count=batch_predict_accelerator_count,
dataflow_machine_type=dataflow_machine_type,
dataflow_max_num_workers=dataflow_max_num_workers,
dataflow_disk_size_gb=dataflow_disk_size_gb,
dataflow_service_account=dataflow_service_account,
dataflow_subnetwork=dataflow_subnetwork,
dataflow_use_public_ips=dataflow_use_public_ips,
encryption_spec_key_name=encryption_spec_key_name,
evaluation_display_name=evaluation_display_name,
force_runner_mode=force_runner_mode,
)
| 851 |
0 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/model_evaluation | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/model_evaluation/model_based_llm_evaluation/__init__.py | # Copyright 2023 The Kubeflow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model based LLM evaluation GA components."""
from google_cloud_pipeline_components.v1.model_evaluation.model_based_llm_evaluation.autosxs.autosxs_pipeline import autosxs_pipeline
__all__ = [
'autosxs_pipeline',
]
| 852 |
0 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/model_evaluation/model_based_llm_evaluation | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/model_evaluation/model_based_llm_evaluation/autosxs/__init__.py | # Copyright 2023 The Kubeflow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 853 |
0 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/model_evaluation/model_based_llm_evaluation | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/model_evaluation/model_based_llm_evaluation/autosxs/autosxs_pipeline.py | # Copyright 2023 The Kubeflow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Optimization AI Inference and AutoSxS pipeline function."""
from typing import Any, Dict, List, NamedTuple
from google_cloud_pipeline_components import _placeholders
from google_cloud_pipeline_components._implementation.llm import batch_prediction_pairwise
from google_cloud_pipeline_components._implementation.llm import model_evaluation_text_generation_pairwise
from google_cloud_pipeline_components._implementation.llm import online_evaluation_pairwise
from kfp import dsl
PipelineOutput = NamedTuple(
'Outputs',
model_a_evaluation_resource_name=str,
model_b_evaluation_resource_name=str,
evaluation_count=int,
evaluation_dataset_path=str,
)
# pylint: disable=dangerous-default-value,g-bare-generic,unused-argument
@dsl.pipeline(
name='autosxs-template',
description='Determines the SxS winrate between two models.',
)
def autosxs_pipeline(
evaluation_dataset: str,
task: str,
id_columns: List[str],
autorater_prompt_parameters: Dict[str, Dict[str, str]],
model_a: str = '',
model_b: str = '',
model_a_prompt_parameters: Dict[str, Dict[str, str]] = {},
model_b_prompt_parameters: Dict[str, Dict[str, str]] = {},
response_column_a: str = '',
response_column_b: str = '',
model_a_parameters: Dict[str, str] = {},
model_b_parameters: Dict[str, str] = {},
human_preference_column: str = '',
project: str = _placeholders.PROJECT_ID_PLACEHOLDER,
location: str = _placeholders.LOCATION_PLACEHOLDER,
judgments_format: str = 'jsonl',
bigquery_destination_prefix: str = '',
experimental_args: Dict[str, Any] = {},
encryption_spec_key_name: str = '',
) -> PipelineOutput:
# fmt: off
"""Evaluates two models side-by-side using an arbiter model.
Args:
evaluation_dataset: A BigQuery table or comma-separated list of GCS paths to a JSONL dataset containing evaluation examples.
task: Evaluation task in the form `{task}@{version}`. task can be one of `[summarization, question_answering]`. Version is an integer with 3 digits or "latest". Ex: `summarization@001` or `question_answering@latest`.
id_columns: The columns which distinguish unique evaluation examples.
autorater_prompt_parameters: Map of autorater prompt parameters to columns or templates. The expected parameters are: `inference_instruction` (details on how to perform a task) and `inference_context` (content to reference to perform the task). As an example, `{'inference_context': {'column': 'my_prompt'}}` uses the evaluation dataset's `my_prompt` column for the AutoRater's context.
model_a: A fully-qualified model resource name (`projects/{project}/locations/{location}/models/{model}@{version}`) or publisher model resource name (`publishers/{publisher}/models/{model}`). This parameter is optional if Model A responses are specified.
model_b: A fully-qualified model resource name (`projects/{project}/locations/{location}/models/{model}@{version}`) or publisher model resource name (`publishers/{publisher}/models/{model}`). This parameter is optional if Model B responses are specified.
model_a_prompt_parameters: Map of Model A prompt template parameters to columns or templates. This parameter is optional if Model A predictions are predefined. Example - `{'prompt': {'column': 'my_prompt'}}` uses the evaluation dataset's `my_prompt` column for the prompt parameter named `prompt`.
model_b_prompt_parameters: Map of Model B prompt template parameters to columns or templates. This parameter is optional if Model B predictions are predefined. Example - `{'prompt': {'column': 'my_prompt'}}` uses the evaluation dataset's `my_prompt` column for the prompt parameter named `prompt`.
response_column_a: Either the name of a column in the evaluation dataset containing predefined predictions, or the name of the column in the Model A output containing predictions. If no value is provided, the correct model output column name will attempt to be inferred.
response_column_b: Either the name of a column in the evaluation dataset containing predefined predictions, or the name of the column in the Model B output containing predictions. If no value is provided, the correct model output column name will attempt to be inferred.
model_a_parameters: The parameters that govern the predictions from model A, such as temperature or maximum output tokens.
model_b_parameters: The parameters that govern the predictions from model B, such as temperature or maximum output tokens.
human_preference_column: The column containing ground truth winners for each example. Providing this parameter adds additional metrics for checking the AutoRater alignment with human preferences.
project: Project used to run custom jobs. This should be the same project used to run the pipeline.
location: Location used to run custom jobs. This should be the same location used to run the pipeline.
judgments_format: The format to write judgments to. Can be either `[json, bigquery]`.
bigquery_destination_prefix: BigQuery table to write judgments to if the specified format is 'bigquery'.
experimental_args: Experimentally released arguments. Subject to change.
encryption_spec_key_name: Customer-managed encryption key options. If this is set, then all resources created by the pipeline will be encrypted with the provided encryption key.
Returns:
model_a_evaluation_resource_name: The path to write the ModelEvaluation for Model A to if Model A is a ModelRegistry Model.
model_b_evaluation_resource_name: The path to write the ModelEvaluation for Model B to if Model B is a ModelRegistry Model.
evaluation_count: The count of how many evaluations were included for this AutoSxS run.
evaluation_dataset_path: The path to the overall evaluation dataset including judgments.
"""
# fmt: on
responses = batch_prediction_pairwise.batch_prediction_pairwise(
display_name='autosxs-{{$.pipeline_job_uuid}}-{{$.pipeline_task_uuid}}',
evaluation_dataset=evaluation_dataset,
id_columns=id_columns,
task=task,
autorater_prompt_parameters=autorater_prompt_parameters,
response_column_a=response_column_a,
response_column_b=response_column_b,
model_a=model_a,
model_b=model_b,
model_a_prompt_parameters=model_a_prompt_parameters,
model_b_prompt_parameters=model_b_prompt_parameters,
model_a_parameters=model_a_parameters,
model_b_parameters=model_b_parameters,
human_preference_column=human_preference_column,
experimental_args=experimental_args,
project=project,
location=location,
encryption_spec_key_name=encryption_spec_key_name,
).set_display_name('AutoSxS Batch Prediction')
winners = online_evaluation_pairwise.online_evaluation_pairwise(
inference_output_uri=responses.outputs[
'preprocessed_evaluation_dataset_uri'
],
id_columns=id_columns,
human_preference_column=human_preference_column,
task=task,
judgments_format=judgments_format,
bigquery_destination_prefix=bigquery_destination_prefix,
experimental_args=experimental_args,
project=project,
location=location,
encryption_spec_key_name=encryption_spec_key_name,
autorater_prompt_parameters=autorater_prompt_parameters,
).set_display_name('AutoSxS Autorater')
metrics = model_evaluation_text_generation_pairwise.model_evaluation_text_generation_pairwise(
judgments_dir=winners.outputs['judgments_uri'],
human_preference_column=human_preference_column,
project=project,
location=location,
encryption_spec_key_name=encryption_spec_key_name,
model_a=model_a,
model_b=model_b,
evaluation_dataset=evaluation_dataset,
evaluation_dataset_metadata=winners.outputs['metadata'],
task=task,
).set_display_name(
'AutoSxS Metrics'
)
return PipelineOutput(
model_a_evaluation_resource_name=metrics.outputs[
'model_a_evaluation_path'
],
model_b_evaluation_resource_name=metrics.outputs[
'model_b_evaluation_path'
],
evaluation_count=metrics.outputs['evaluation_count_path'],
# Needs to be a component output
evaluation_dataset_path=metrics.outputs['evaluation_dataset_path'],
)
| 854 |
0 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/wait_gcp_resources/component.py | # Copyright 2023 The Kubeflow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from google_cloud_pipeline_components import _image
from google_cloud_pipeline_components import utils
from kfp import dsl
from kfp.dsl import OutputPath
@utils.gcpc_output_name_converter('gcp_resources')
@dsl.container_component
def wait_gcp_resources(
gcp_resources: str,
output__gcp_resources: OutputPath(str),
):
# fmt: off
"""Waits for the completion of one or more GCP resources by polling for
completion statuses.
Currently this component only supports waiting on a [DataflowJob](https://cloud.google.com/config-connector/docs/reference/resource-docs/dataflow/dataflowjob) resource. To use this component, first create a component that outputs a `gcp_resources` proto as JSON, then pass it to this component's `gcp_resources` parameter. See [details](https://github.com/kubeflow/pipelines/tree/master/components/google-cloud/google_cloud_pipeline_components/proto) on how to create a `gcp_resources` proto as a component output.
```
dataflow_python_op = gcpc.v1.dataflow.LaunchPythonOp( python_file_path=... ) dataflow_wait_op = WaitGcpResourcesOp( gcp_resources=dataflow_python_op.outputs["gcp_resources"] )
```
Args:
gcp_resources: Serialized JSON of `gcp_resources` proto, indicating the resource(s) this component should wait on.
Returns:
gcp_resources: The `gcp_resource`, including any relevant error information.
"""
# fmt: on
return dsl.ContainerSpec(
image=_image.GCPC_IMAGE_TAG,
command=[
'python3',
'-u',
'-m',
'google_cloud_pipeline_components.container.v1.wait_gcp_resources.launcher',
],
args=[
'--type',
'Wait',
'--project',
'',
'--location',
'',
'--payload',
gcp_resources,
'--gcp_resources',
output__gcp_resources,
],
)
| 855 |
0 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/wait_gcp_resources/__init__.py | # Copyright 2021 The Kubeflow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# fmt: off
"""Wait on the completion of GCP resources spawned from an upstream pipeline component."""
# fmt: on
from google_cloud_pipeline_components.v1.wait_gcp_resources.component import wait_gcp_resources as WaitGcpResourcesOp
__all__ = [
'WaitGcpResourcesOp',
]
| 856 |
0 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/custom_job/component.py | # Copyright 2023 The Kubeflow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, List
from google_cloud_pipeline_components import _placeholders
from google_cloud_pipeline_components import utils
from kfp import dsl
# keep identical to create_custom_training_job_from_component
@dsl.container_component
def custom_training_job(
display_name: str,
gcp_resources: dsl.OutputPath(str),
location: str = 'us-central1',
worker_pool_specs: List[Dict[str, str]] = [],
timeout: str = '604800s',
restart_job_on_worker_restart: bool = False,
service_account: str = '',
tensorboard: str = '',
enable_web_access: bool = False,
network: str = '',
reserved_ip_ranges: List[str] = [],
base_output_directory: str = '',
labels: Dict[str, str] = {},
encryption_spec_key_name: str = '',
persistent_resource_id: str = _placeholders.PERSISTENT_RESOURCE_ID_PLACEHOLDER,
project: str = _placeholders.PROJECT_ID_PLACEHOLDER,
):
# fmt: off
"""Launch a Vertex AI [custom training job](https://cloud.google.com/vertex-ai/docs/training/create-custom-job) using the [CustomJob](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.customJobs) API. See [Create custom training jobs ](https://cloud.google.com/vertex-ai/docs/training/create-custom-job) for more information.
Args:
location: Location for creating the custom training job. If not set, default to us-central1.
display_name: The name of the CustomJob.
worker_pool_specs: Serialized json spec of the worker pools including machine type and Docker image. All worker pools except the first one are optional and can be skipped by providing an empty value. See [more information](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/CustomJobSpec#WorkerPoolSpec).
timeout: The maximum job running time. The default is 7 days. A duration in seconds with up to nine fractional digits, terminated by 's', for example: "3.5s".
restart_job_on_worker_restart: Restarts the entire CustomJob if a worker gets restarted. This feature can be used by distributed training jobs that are not resilient to workers leaving and joining a job.
service_account: Sets the default service account for workload run-as account. The [service account ](https://cloud.google.com/vertex-ai/docs/pipelines/configure-project#service-account) running the pipeline submitting jobs must have act-as permission on this run-as account. If unspecified, the Vertex AI Custom Code [Service Agent ](https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) for the CustomJob's project.
tensorboard: The name of a Vertex AI TensorBoard resource to which this CustomJob will upload TensorBoard logs.
enable_web_access: Whether you want Vertex AI to enable [interactive shell access ](https://cloud.google.com/vertex-ai/docs/training/monitor-debug-interactive-shell) to training containers. If `True`, you can access interactive shells at the URIs given by [CustomJob.web_access_uris][].
network: The full name of the Compute Engine network to which the job should be peered. For example, `projects/12345/global/networks/myVPC`. Format is of the form `projects/{project}/global/networks/{network}`. Where `{project}` is a project number, as in `12345`, and `{network}` is a network name. Private services access must already be configured for the network. If left unspecified, the job is not peered with any network.
reserved_ip_ranges: A list of names for the reserved IP ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided IP ranges. Otherwise, the job will be deployed to any IP ranges under the provided VPC network.
base_output_directory: The Cloud Storage location to store the output of this CustomJob or HyperparameterTuningJob. See [more information ](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/GcsDestination).
labels: The labels with user-defined metadata to organize the CustomJob. See [more information](https://goo.gl/xmQnxf).
encryption_spec_key_name: Customer-managed encryption key options for the CustomJob. If this is set, then all resources created by the CustomJob will be encrypted with the provided encryption key.
persistent_resource_id: The ID of the PersistentResource in the same Project and Location which to run. The default value is a placeholder that will be resolved to the PipelineJob [RuntimeConfig](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.pipelineJobs#PipelineJob.RuntimeConfig)'s persistent resource id at runtime. However, if the PipelineJob doesn't set Persistent Resource as the job level runtime, the placedholder will be resolved to an empty string and the custom job will be run on demand. If the value is set explicitly, the custom job will runs in the specified persistent resource, in this case, please note the network and CMEK configs on the job should be consistent with those on the PersistentResource, otherwise, the job will be rejected.
project: Project to create the custom training job in. Defaults to the project in which the PipelineJob is run.
Returns:
gcp_resources: Serialized JSON of `gcp_resources` [proto](https://github.com/kubeflow/pipelines/tree/master/components/google-cloud/google_cloud_pipeline_components/proto) which tracks the CustomJob.
"""
# fmt: on
return utils.build_serverless_customjob_container_spec(
project=project,
location=location,
custom_job_payload={
'display_name': display_name,
'job_spec': {
'worker_pool_specs': worker_pool_specs,
'scheduling': {
'timeout': timeout,
'restart_job_on_worker_restart': (
restart_job_on_worker_restart
),
},
'service_account': service_account,
'tensorboard': tensorboard,
'enable_web_access': enable_web_access,
'network': network,
'reserved_ip_ranges': reserved_ip_ranges,
'base_output_directory': {
'output_uri_prefix': base_output_directory
},
'persistent_resource_id': persistent_resource_id,
},
'labels': labels,
'encryption_spec': {'kms_key_name': encryption_spec_key_name},
},
gcp_resources=gcp_resources,
)
| 857 |
0 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/custom_job/__init__.py | # Copyright 2023 The Kubeflow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# fmt: off
"""Run KFP components as [Vertex AI Custom Training Jobs](https://cloud.google.com/vertex-ai/docs/training/create-custom-job) with customized worker and cloud configurations."""
# fmt: on
from google_cloud_pipeline_components.v1.custom_job.component import custom_training_job as CustomTrainingJobOp
from google_cloud_pipeline_components.v1.custom_job.utils import create_custom_training_job_from_component
from google_cloud_pipeline_components.v1.custom_job.utils import create_custom_training_job_op_from_component
__all__ = [
'CustomTrainingJobOp',
'create_custom_training_job_op_from_component',
'create_custom_training_job_from_component',
]
| 858 |
0 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/custom_job/utils.py | # Copyright 2023 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for supporting Google Vertex AI Custom Training Job Op."""
import copy
import textwrap
from typing import Callable, Dict, List, Optional
import warnings
from google_cloud_pipeline_components import _placeholders
from google_cloud_pipeline_components.v1.custom_job import component
from kfp import components
import yaml
from google.protobuf import json_format
def _replace_executor_placeholder(
container_input: List[str],
) -> List[str]:
"""Replace executor placeholder in container command or args.
Args:
container_input: Container command or args.
Returns: container_input with executor placeholder replaced.
"""
# Executor replacement is used as executor content needs to be jsonified before
# injection into the payload, since payload is already a JSON serialized string.
EXECUTOR_INPUT_PLACEHOLDER = '{{$}}'
JSON_ESCAPED_EXECUTOR_INPUT_PLACEHOLDER = '{{$.json_escape[1]}}'
return [
JSON_ESCAPED_EXECUTOR_INPUT_PLACEHOLDER
if cmd_part == EXECUTOR_INPUT_PLACEHOLDER
else cmd_part
for cmd_part in container_input
]
# keep identical to CustomTrainingJobOp
def create_custom_training_job_from_component(
component_spec: Callable,
display_name: str = '',
replica_count: int = 1,
machine_type: str = 'n1-standard-4',
accelerator_type: str = '',
accelerator_count: int = 1,
boot_disk_type: str = 'pd-ssd',
boot_disk_size_gb: int = 100,
timeout: str = '604800s',
restart_job_on_worker_restart: bool = False,
service_account: str = '',
network: str = '',
encryption_spec_key_name: str = '',
tensorboard: str = '',
enable_web_access: bool = False,
reserved_ip_ranges: Optional[List[str]] = None,
nfs_mounts: Optional[List[Dict[str, str]]] = None,
base_output_directory: str = '',
labels: Optional[Dict[str, str]] = None,
persistent_resource_id: str = _placeholders.PERSISTENT_RESOURCE_ID_PLACEHOLDER,
env: Optional[List[Dict[str, str]]] = None,
) -> Callable:
# fmt: off
"""Convert a KFP component into Vertex AI [custom training job](https://cloud.google.com/vertex-ai/docs/training/create-custom-job) using the [CustomJob](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.customJobs) API.
This utility converts a [KFP component](https://www.kubeflow.org/docs/components/pipelines/v2/components/) provided to `component_spec` into `CustomTrainingJobOp` component. Your components inputs, outputs, and logic are carried over, with additional [CustomJob](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/CustomJobSpec) parameters exposed. Note that this utility constructs a ClusterSpec where the master and all the workers use the same spec, meaning all disk/machine spec related parameters will apply to all replicas. This is suitable for uses cases such as executing a training component over multiple replicas with [MultiWorkerMirroredStrategy](https://www.tensorflow.org/api_docs/python/tf/distribute/MultiWorkerMirroredStrategy) or [MirroredStrategy](https://www.tensorflow.org/api_docs/python/tf/distribute/MirroredStrategy). See [Create custom training jobs](https://cloud.google.com/vertex-ai/docs/training/create-custom-job) for more information.
Args:
component_spec: A KFP component.
display_name: The name of the CustomJob. If not provided the component's name will be used instead.
replica_count: The count of instances in the cluster. One replica always counts towards the master in worker_pool_spec[0] and the remaining replicas will be allocated in worker_pool_spec[1]. See [more information.](https://cloud.google.com/vertex-ai/docs/training/distributed-training#configure_a_distributed_training_job)
machine_type: The type of the machine to run the CustomJob. The default value is "n1-standard-4". See [more information](https://cloud.google.com/vertex-ai/docs/training/configure-compute#machine-types).
accelerator_type: The type of accelerator(s) that may be attached to the machine per `accelerator_count`. See [more information](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/MachineSpec#acceleratortype).
accelerator_count: The number of accelerators to attach to the machine. Defaults to 1 if `accelerator_type` is set.
boot_disk_type: Type of the boot disk (default is "pd-ssd"). Valid values: "pd-ssd" (Persistent Disk Solid State Drive) or "pd-standard" (Persistent Disk Hard Disk Drive). boot_disk_type is set as a static value and cannot be changed as a pipeline parameter.
boot_disk_size_gb: Size in GB of the boot disk (default is 100GB). `boot_disk_size_gb` is set as a static value and cannot be changed as a pipeline parameter.
timeout: The maximum job running time. The default is 7 days. A duration in seconds with up to nine fractional digits, terminated by 's', for example: "3.5s".
restart_job_on_worker_restart: Restarts the entire CustomJob if a worker gets restarted. This feature can be used by distributed training jobs that are not resilient to workers leaving and joining a job.
service_account: Sets the default service account for workload run-as account. The [service account](https://cloud.google.com/vertex-ai/docs/pipelines/configure-project#service-account) running the pipeline submitting jobs must have act-as permission on this run-as account. If unspecified, the Vertex AI Custom Code [Service Agent](https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) for the CustomJob's project.
network: The full name of the Compute Engine network to which the job should be peered. For example, `projects/12345/global/networks/myVPC`. Format is of the form `projects/{project}/global/networks/{network}`. Where `{project}` is a project number, as in `12345`, and `{network}` is a network name. Private services access must already be configured for the network. If left unspecified, the job is not peered with any network.
encryption_spec_key_name: Customer-managed encryption key options for the CustomJob. If this is set, then all resources created by the CustomJob will be encrypted with the provided encryption key.
tensorboard: The name of a Vertex AI TensorBoard resource to which this CustomJob will upload TensorBoard logs.
enable_web_access: Whether you want Vertex AI to enable [interactive shell access](https://cloud.google.com/vertex-ai/docs/training/monitor-debug-interactive-shell) to training containers. If `True`, you can access interactive shells at the URIs given by [CustomJob.web_access_uris][].
reserved_ip_ranges: A list of names for the reserved IP ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided IP ranges. Otherwise, the job will be deployed to any IP ranges under the provided VPC network.
nfs_mounts: A list of [NfsMount](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/CustomJobSpec#NfsMount) resource specs in Json dict format. For more details about mounting NFS for CustomJob, see [Mount an NFS share for custom training](https://cloud.google.com/vertex-ai/docs/training/train-nfs-share).
base_output_directory: The Cloud Storage location to store the output of this CustomJob or HyperparameterTuningJob. See [more information](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/GcsDestination).
labels: The labels with user-defined metadata to organize the CustomJob. See [more information](https://goo.gl/xmQnxf).
persistent_resource_id: The ID of the PersistentResource in the same Project and Location which to run. The default value is a placeholder that will be resolved to the PipelineJob [RuntimeConfig](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.pipelineJobs#PipelineJob.RuntimeConfig)'s persistent resource id at runtime. However, if the PipelineJob doesn't set Persistent Resource as the job level runtime, the placedholder will be resolved to an empty string and the custom job will be run on demand. If the value is set explicitly, the custom job will runs in the specified persistent resource, in this case, please note the network and CMEK configs on the job should be consistent with those on the PersistentResource, otherwise, the job will be rejected.
env: Environment variables to be passed to the container. Takes the form `[{'name': '...', 'value': '...'}]`. Maximum limit is 100.
Returns:
A KFP component with CustomJob specification applied.
"""
# fmt: on
# This function constructs a Custom Job component based on the input
# component, by performing a 3-way merge of the inputs/outputs of the
# input component, the Custom Job component and the arguments given to this
# function.
#
# It first retrieves the PipelineSpec (as a Python dict) for each of the two
# components (the input component and the Custom Job component).
# Note: The advantage of using the PipelineSpec here is that the
# placeholders are (mostly) serialized, so there is less processing
# needed (and avoids unnecessary dependency on KFP internals).
#
# The arguments to this function are first inserted into each input parameter
# of the Custom Job component as a default value (which will be used at
# runtime, unless when overridden by specifying the input).
# One particular input parameter that needs detailed construction is the
# worker_pool_spec, before being inserted into the Custom Job component.
#
# After inserting the arguments into the Custom Job input parameters as
# default values, the input/output parameters from the input component are
# then merged with the Custom Job input/output parameters. Preference is given
# to Custom Job input parameters to make sure they are not overridden (which
# follows the same logic as the original version).
#
# It is assumed that the Custom Job component itself has no input/output
# artifacts, so the artifacts from the input component needs no merging.
# (There is a unit test to make sure this is the case, otherwise merging of
# artifacts need to be done here.)
#
# Once the above is done, and the dict of the Custom Job is converted back
# into a KFP component (by first converting to YAML, then using
# load_component_from_text to load the YAML).
# After adding the appropriate description and the name, the new component
# is returned.
cj_pipeline_spec = json_format.MessageToDict(
component.custom_training_job.pipeline_spec
)
user_pipeline_spec = json_format.MessageToDict(component_spec.pipeline_spec)
user_component_container = list(
user_pipeline_spec['deploymentSpec']['executors'].values()
)[0]['container']
worker_pool_spec = {
'machine_spec': {'machine_type': machine_type},
'replica_count': 1,
'container_spec': {
'image_uri': user_component_container['image'],
'command': _replace_executor_placeholder(
user_component_container.get('command', [])
),
'args': _replace_executor_placeholder(
user_component_container.get('args', [])
),
'env': env or [],
},
}
if accelerator_type:
worker_pool_spec['machine_spec']['accelerator_type'] = accelerator_type
worker_pool_spec['machine_spec']['accelerator_count'] = accelerator_count
if boot_disk_type:
worker_pool_spec['disk_spec'] = {
'boot_disk_type': boot_disk_type,
'boot_disk_size_gb': boot_disk_size_gb,
}
if nfs_mounts:
worker_pool_spec['nfs_mounts'] = nfs_mounts
worker_pool_specs = [worker_pool_spec]
if int(replica_count) > 1:
additional_worker_pool_spec = copy.deepcopy(worker_pool_spec)
additional_worker_pool_spec['replica_count'] = replica_count - 1
worker_pool_specs.append(additional_worker_pool_spec)
# get the component spec for both components
cj_component_spec_key = list(cj_pipeline_spec['components'].keys())[0]
cj_component_spec = cj_pipeline_spec['components'][cj_component_spec_key]
user_component_spec_key = list(user_pipeline_spec['components'].keys())[0]
user_component_spec = user_pipeline_spec['components'][
user_component_spec_key
]
# add custom job defaults based on user-provided args
custom_job_param_defaults = {
'display_name': display_name or component_spec.component_spec.name,
'worker_pool_specs': worker_pool_specs,
'timeout': timeout,
'restart_job_on_worker_restart': restart_job_on_worker_restart,
'service_account': service_account,
'tensorboard': tensorboard,
'enable_web_access': enable_web_access,
'network': network,
'reserved_ip_ranges': reserved_ip_ranges or [],
'base_output_directory': base_output_directory,
'labels': labels or {},
'encryption_spec_key_name': encryption_spec_key_name,
'persistent_resource_id': persistent_resource_id,
}
for param_name, default_value in custom_job_param_defaults.items():
cj_component_spec['inputDefinitions']['parameters'][param_name][
'defaultValue'
] = default_value
# merge parameters from user component into the customjob component
cj_component_spec['inputDefinitions']['parameters'].update(
user_component_spec.get('inputDefinitions', {}).get('parameters', {})
)
cj_component_spec['outputDefinitions']['parameters'].update(
user_component_spec.get('outputDefinitions', {}).get('parameters', {})
)
# use artifacts from user component
## assign artifacts, not update, since customjob has no artifact outputs
cj_component_spec['inputDefinitions']['artifacts'] = user_component_spec.get(
'inputDefinitions', {}
).get('artifacts', {})
cj_component_spec['outputDefinitions']['artifacts'] = user_component_spec.get(
'outputDefinitions', {}
).get('artifacts', {})
# copy the input definitions to the root, which will have an identical interface for a single-step pipeline
cj_pipeline_spec['root']['inputDefinitions'] = copy.deepcopy(
cj_component_spec['inputDefinitions']
)
cj_pipeline_spec['root']['outputDefinitions'] = copy.deepcopy(
cj_component_spec['outputDefinitions']
)
# update the customjob task with the user inputs
cj_task_key = list(cj_pipeline_spec['root']['dag']['tasks'].keys())[0]
user_task_key = list(user_pipeline_spec['root']['dag']['tasks'].keys())[0]
cj_pipeline_spec['root']['dag']['tasks'][cj_task_key]['inputs'].update(
user_pipeline_spec['root']['dag']['tasks'][user_task_key].get(
'inputs', {}
)
)
# reload the pipelinespec as a component using KFP
new_component = components.load_component_from_text(
yaml.safe_dump(cj_pipeline_spec)
)
# Copy the component name and description
# TODO(b/262360354): The inner .component_spec.name is needed here as that is
# the name that is retrieved by the FE for display. Can simply reference the
# outer .name once setter is implemented.
new_component.component_spec.name = component_spec.component_spec.name
if component_spec.description:
component_description = textwrap.dedent(f"""
A CustomJob that wraps {component_spec.component_spec.name}.
Original component description:
{component_spec.description}
Custom Job wrapper description:
{component.custom_training_job.description}
""")
new_component.description = component_description
return new_component
def create_custom_training_job_op_from_component(*args, **kwargs) -> Callable:
"""Deprecated.
Please use create_custom_training_job_from_component instead.
"""
warnings.warn(
f'{create_custom_training_job_op_from_component.__name__!r} is'
' deprecated. Please use'
f' {create_custom_training_job_from_component.__name__!r} instead.',
DeprecationWarning,
)
return create_custom_training_job_from_component(*args, **kwargs)
| 859 |
0 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/batch_predict_job/component.py | # Copyright 2023 The Kubeflow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, List
from google_cloud_pipeline_components import _image
from google_cloud_pipeline_components import _placeholders
from google_cloud_pipeline_components.types.artifact_types import BQTable
from google_cloud_pipeline_components.types.artifact_types import UnmanagedContainerModel
from google_cloud_pipeline_components.types.artifact_types import VertexBatchPredictionJob
from google_cloud_pipeline_components.types.artifact_types import VertexModel
from kfp.dsl import Artifact
from kfp.dsl import ConcatPlaceholder
from kfp.dsl import container_component
from kfp.dsl import ContainerSpec
from kfp.dsl import IfPresentPlaceholder
from kfp.dsl import Input
from kfp.dsl import Output
from kfp.dsl import OutputPath
@container_component
def model_batch_predict(
job_display_name: str,
gcp_resources: OutputPath(str),
batchpredictionjob: Output[VertexBatchPredictionJob],
bigquery_output_table: Output[BQTable],
gcs_output_directory: Output[Artifact],
model: Input[VertexModel] = None,
unmanaged_container_model: Input[UnmanagedContainerModel] = None,
location: str = 'us-central1',
instances_format: str = 'jsonl',
predictions_format: str = 'jsonl',
gcs_source_uris: List[str] = [],
bigquery_source_input_uri: str = '',
instance_type: str = '',
key_field: str = '',
included_fields: List[str] = [],
excluded_fields: List[str] = [],
model_parameters: Dict[str, str] = {},
gcs_destination_output_uri_prefix: str = '',
bigquery_destination_output_uri: str = '',
machine_type: str = '',
accelerator_type: str = '',
accelerator_count: int = 0,
starting_replica_count: int = 0,
max_replica_count: int = 0,
service_account: str = '',
manual_batch_tuning_parameters_batch_size: int = 0,
generate_explanation: bool = False,
explanation_metadata: Dict[str, str] = {},
explanation_parameters: Dict[str, str] = {},
labels: Dict[str, str] = {},
encryption_spec_key_name: str = '',
project: str = _placeholders.PROJECT_ID_PLACEHOLDER,
):
# fmt: off
"""Creates a Google Cloud Vertex [BatchPredictionJob](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs) and waits for it to complete. For more details, see [BatchPredictionJob.Create](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs/create).
Args:
job_display_name: The user-defined name of this BatchPredictionJob.
location: Location for creating the BatchPredictionJob.
instances_format: The format in which instances are given, must be one of the [Model](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.models)'s supportedInputStorageFormats. For more details about this input config, see [InputConfig](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#InputConfig.)
predictions_format: The format in which Vertex AI gives the predictions. Must be one of the Model's supportedOutputStorageFormats. For more details about this output config, see [OutputConfig](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#OutputConfig).
model: The Model used to get predictions via this job. Must share the same ancestor Location. Starting this job has no impact on any existing deployments of the Model and their resources. Either this or `unmanaged_container_model` must be specified.
unmanaged_container_model: The unmanaged container model used to get predictions via this job. This should be used for models that are not uploaded to Vertex. Either this or model must be specified.
gcs_source_uris: Google Cloud Storage URI(-s) to your instances to run batch prediction on. They must match `instances_format`. May contain wildcards. For more information on wildcards, see [WildcardNames](https://cloud.google.com/storage/docs/gsutil/addlhelp/WildcardNames). For more details about this input config, see [InputConfig](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#InputConfig).
bigquery_source_input_uri: BigQuery URI to a table, up to 2000 characters long. For example: `projectId.bqDatasetId.bqTableId` For more details about this input config, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#InputConfig.
model_parameters: The parameters that govern the predictions. The schema of the parameters
instance_type: The format of the instance that the Model accepts. Vertex AI will convert compatible [InstancesFormat](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#InputConfig) to the specified format. Supported values are: `object`: Each input is converted to JSON object format. * For `bigquery`, each row is converted to an object. * For `jsonl`, each line of the JSONL input must be an object. * Does not apply to `csv`, `file-list`, `tf-record`, or `tf-record-gzip`. `array`: Each input is converted to JSON array format. * For `bigquery`, each row is converted to an array. The order of columns is determined by the BigQuery column order, unless [included_fields](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#InputConfig) is populated. `included_fields` must be populated for specifying field orders. * For `jsonl`, if each line of the JSONL input is an object, `included_fields` must be populated for specifying field orders. * Does not apply to `csv`, `file-list`, `tf-record`, or `tf-record-gzip`. If not specified, Vertex AI converts the batch prediction input as follows: * For `bigquery` and `csv`, the behavior is the same as `array`. The order of columns is the same as defined in the file or table, unless included_fields is populated. * For `jsonl`, the prediction instance format is determined by each line of the input. * For `tf-record`/`tf-record-gzip`, each record will be converted to an object in the format of `{"b64": <value>}`, where `<value>` is the Base64-encoded string of the content of the record. * For `file-list`, each file in the list will be converted to an object in the format of `{"b64": <value>}`, where `<value>` is the Base64-encoded string of the content of the file.
key_field: The name of the field that is considered as a key. The values identified by the key field is not included in the transformed instances that is sent to the Model. This is similar to specifying this name of the field in [excluded_fields](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#InputConfig). In addition, the batch prediction output will not include the instances. Instead the output will only include the value of the key field, in a field named `key` in the output: * For `jsonl` output format, the output will have a `key` field instead of the `instance` field. * For `csv`/`bigquery` output format, the output will have have a `key` column instead of the instance feature columns. The input must be JSONL with objects at each line, CSV, BigQuery or TfRecord.
included_fields: Fields that will be included in the prediction instance that is sent to the Model. If `instance_type` is `array`, the order of field names in `included_fields` also determines the order of the values in the array. When `included_fields` is populated, `excluded_fields` must be empty. The input must be JSONL with objects at each line, CSV, BigQuery or TfRecord.
excluded_fields: Fields that will be excluded in the prediction instance that is sent to the Model. Excluded will be attached to the batch prediction output if key_field is not specified. When `excluded_fields` is populated, `included_fields` must be empty. The input must be JSONL with objects at each line, CSV, BigQuery or TfRecord. may be specified via the Model's `parameters_schema_uri`.
gcs_destination_output_uri_prefix: The Google Cloud Storage location of the directory where the output is to be written to. In the given directory a new directory is created. Its name is `prediction-<model-display-name>-<job-create-time>`, where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format. Inside of it files `predictions_0001.<extension>`, `predictions_0002.<extension>`, ..., `predictions_N.<extension>` are created where `<extension>` depends on chosen `predictions_format`, and N may equal 0001 and depends on the total number of successfully predicted instances. If the Model has both `instance` and `prediction` schemata defined then each such file contains predictions as per the `predictions_format`. If prediction for any instance failed (partially or completely), then an additional `errors_0001.<extension>`, `errors_0002.<extension>`,..., `errors_N.<extension>` files are created (N depends on total number of failed predictions). These files contain the failed instances, as per their schema, followed by an additional `error` field which as value has `google.rpc.Status` containing only `code` and `message` fields. For more details about this output config, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#OutputConfig.
bigquery_destination_output_uri: The BigQuery project location where the output is to be written to. In the given project a new dataset is created with name `prediction_<model-display-name>_<job-create-time>` where is made BigQuery-dataset-name compatible (for example, most special characters become underscores), and timestamp is in YYYY_MM_DDThh_mm_ss_sssZ "based on ISO-8601" format. In the dataset two tables will be created, `predictions`, and `errors`. If the Model has both `instance` and `prediction` schemata defined then the tables have columns as follows: The `predictions` table contains instances for which the prediction succeeded, it has columns as per a concatenation of the Model's instance and prediction schemata. The `errors` table contains rows for which the prediction has failed, it has instance columns, as per the instance schema, followed by a single "errors" column, which as values has [google.rpc.Status](Status) represented as a STRUCT, and containing only `code` and `message`. For more details about this output config, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#OutputConfig.
machine_type: The type of machine for running batch prediction on dedicated resources. If the Model supports DEDICATED_RESOURCES this config may be provided (and the job will use these resources). If the Model doesn't support AUTOMATIC_RESOURCES, this config must be provided. For more details about the BatchDedicatedResources, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.batchPredictionJobs#BatchDedicatedResources. For more details about the machine spec, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/MachineSpec
accelerator_type: The type of accelerator(s) that may be attached to the machine as per `accelerator_count`. Only used if `machine_type` is set. For more details about the machine spec, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/MachineSpec
accelerator_count: The number of accelerators to attach to the `machine_type`. Only used if `machine_type` is set. For more details about the machine spec, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/MachineSpec
starting_replica_count: The number of machine replicas used at the start of the batch operation. If not set, Vertex AI decides starting number, not greater than `max_replica_count`. Only used if `machine_type` is set.
max_replica_count: The maximum number of machine replicas the batch operation may be scaled to. Only used if `machine_type` is set.
service_account: The service account that the DeployedModel's container runs as. If not specified, a system generated one will be used, which has minimal permissions and the custom container, if used, may not have enough permission to access other Google Cloud resources. Users deploying the Model must have the iam.serviceAccounts.actAs permission on this service account.
manual_batch_tuning_parameters_batch_size: The number of the records (e.g. instances) of the operation given in each batch to a machine replica. Machine type, and size of a single record should be considered when setting this parameter, higher value speeds up the batch operation's execution, but too high value will result in a whole batch not fitting in a machine's memory, and the whole operation will fail.
generate_explanation: Generate explanation along with the batch prediction results. This will cause the batch prediction output to include explanations based on the `prediction_format`: - `bigquery`: output includes a column named `explanation`. The value is a struct that conforms to the [aiplatform.gapic.Explanation] object. - `jsonl`: The JSON objects on each line include an additional entry keyed `explanation`. The value of the entry is a JSON object that conforms to the [aiplatform.gapic.Explanation] object. - `csv`: Generating explanations for CSV format is not supported. If this field is set to true, either the Model.explanation_spec or explanation_metadata and explanation_parameters must be populated.
explanation_metadata: Explanation metadata configuration for this BatchPredictionJob. Can be specified only if `generate_explanation` is set to `True`. This value overrides the value of `Model.explanation_metadata`. All fields of `explanation_metadata` are optional in the request. If a field of the `explanation_metadata` object is not populated, the corresponding field of the `Model.explanation_metadata` object is inherited. For more details, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/ExplanationSpec#explanationmetadata.
explanation_parameters: Parameters to configure explaining for Model's predictions. Can be specified only if `generate_explanation` is set to `True`. This value overrides the value of `Model.explanation_parameters`. All fields of `explanation_parameters` are optional in the request. If a field of the `explanation_parameters` object is not populated, the corresponding field of the `Model.explanation_parameters` object is inherited. For more details, see https://cloud.google.com/vertex-ai/docs/reference/rest/v1/ExplanationSpec#ExplanationParameters.
labels: The labels with user-defined metadata to organize your BatchPredictionJobs. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels.
encryption_spec_key_name: Customer-managed encryption key options for a BatchPredictionJob. If this is set, then all resources created by the BatchPredictionJob will be encrypted with the provided encryption key. Has the form: `projects/my-project/locations/my-location/keyRings/my-kr/cryptoKeys/my-key`. The key needs to be in the same region as where the compute resource is created.
project: Project to create the BatchPredictionJob. Defaults to the project in which the PipelineJob is run.
Returns:
batchpredictionjob: [**Deprecated. Use gcs_output_directory and bigquery_output_table instead.**] Artifact representation of the created batch prediction job.
gcs_output_directory: Artifact tracking the batch prediction job output. This is only available if gcs_destination_output_uri_prefix is specified.
bigquery_output_table: Artifact tracking the batch prediction job output. This is only available if bigquery_output_table is specified.
gcp_resources: Serialized gcp_resources proto tracking the batch prediction job. For more details, see https://github.com/kubeflow/pipelines/blob/master/components/google-cloud/google_cloud_pipeline_components/proto/README.md.
"""
# fmt: on
return ContainerSpec(
image=_image.GCPC_IMAGE_TAG,
command=[
'python3',
'-u',
'-m',
'google_cloud_pipeline_components.container.v1.batch_prediction_job.launcher',
],
args=[
'--type',
'BatchPredictionJob',
'--payload',
ConcatPlaceholder([
'{',
'"display_name": "',
job_display_name,
'", ',
IfPresentPlaceholder(
input_name='model',
then=ConcatPlaceholder([
'"model": "',
model.metadata['resourceName'],
'",',
]),
),
' "input_config": {',
'"instances_format": "',
instances_format,
'"',
', "gcs_source": {',
'"uris":',
gcs_source_uris,
'}',
', "bigquery_source": {',
'"input_uri": "',
bigquery_source_input_uri,
'"',
'}',
'}',
', "instance_config": {',
'"instance_type": "',
instance_type,
'"',
', "key_field": "',
key_field,
'" ',
IfPresentPlaceholder(
input_name='included_fields',
then=ConcatPlaceholder([
', "included_fields": ',
included_fields,
]),
),
IfPresentPlaceholder(
input_name='excluded_fields',
then=ConcatPlaceholder([
', "excluded_fields": ',
excluded_fields,
]),
),
'}',
', "model_parameters": ',
model_parameters,
', "output_config": {',
'"predictions_format": "',
predictions_format,
'"',
', "gcs_destination": {',
'"output_uri_prefix": "',
gcs_destination_output_uri_prefix,
'"',
'}',
', "bigquery_destination": {',
'"output_uri": "',
bigquery_destination_output_uri,
'"',
'}',
'}',
', "dedicated_resources": {',
'"machine_spec": {',
'"machine_type": "',
machine_type,
'"',
', "accelerator_type": "',
accelerator_type,
'"',
', "accelerator_count": ',
accelerator_count,
'}',
', "starting_replica_count": ',
starting_replica_count,
', "max_replica_count": ',
max_replica_count,
'}',
', "service_account": "',
service_account,
'"',
', "manual_batch_tuning_parameters": {',
'"batch_size": ',
manual_batch_tuning_parameters_batch_size,
'}',
', "generate_explanation": ',
generate_explanation,
', "explanation_spec": {',
'"parameters": ',
explanation_parameters,
', "metadata": ',
explanation_metadata,
'}',
', "labels": ',
labels,
', "encryption_spec": {"kms_key_name":"',
encryption_spec_key_name,
'"}',
'}',
]),
'--project',
project,
'--location',
location,
'--gcp_resources',
gcp_resources,
'--executor_input',
'{{$}}',
],
)
| 860 |
0 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/batch_predict_job/__init__.py | # Copyright 2023 The Kubeflow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# fmt: off
"""Serve batch predictions from your models using [Vertex AI Batch Predictions](https://cloud.google.com/vertex-ai/docs/predictions/overview?_ga=2.161419069.-1686833729.1684288907#batch_predictions)."""
# fmt: on
from google_cloud_pipeline_components.v1.batch_predict_job.component import model_batch_predict as ModelBatchPredictOp
__all__ = [
'ModelBatchPredictOp',
]
| 861 |
0 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/model/__init__.py | # Copyright 2024 The Kubeflow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# fmt: off
"""Manage models via [Vertex AI Model Registry](https://cloud.google.com/vertex-ai/docs/model-registry/introduction)."""
# fmt: on
from google_cloud_pipeline_components.v1.model.delete_model.component import model_delete as ModelDeleteOp
from google_cloud_pipeline_components.v1.model.export_model.component import model_export as ModelExportOp
from google_cloud_pipeline_components.v1.model.get_model.component import model_get as ModelGetOp
from google_cloud_pipeline_components.v1.model.upload_model.component import model_upload as ModelUploadOp
__all__ = [
'ModelExportOp',
'ModelUploadOp',
'ModelDeleteOp',
'ModelGetOp',
]
| 862 |
0 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/model | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/model/upload_model/component.py | # Copyright 2023 The Kubeflow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, List
from google_cloud_pipeline_components import _image
from google_cloud_pipeline_components import _placeholders
from google_cloud_pipeline_components.types.artifact_types import UnmanagedContainerModel
from google_cloud_pipeline_components.types.artifact_types import VertexModel
from kfp import dsl
from kfp.dsl import ConcatPlaceholder
from kfp.dsl import container_component
from kfp.dsl import ContainerSpec
from kfp.dsl import IfPresentPlaceholder
from kfp.dsl import Input
from kfp.dsl import Output
from kfp.dsl import OutputPath
@container_component
def model_upload(
display_name: str,
gcp_resources: OutputPath(str),
model: Output[VertexModel],
location: str = 'us-central1',
description: str = '',
parent_model: Input[VertexModel] = None,
unmanaged_container_model: Input[UnmanagedContainerModel] = None,
explanation_metadata: Dict[str, str] = {},
explanation_parameters: Dict[str, str] = {},
version_aliases: List[str] = [],
labels: Dict[str, str] = {},
encryption_spec_key_name: str = '',
project: str = _placeholders.PROJECT_ID_PLACEHOLDER,
):
# fmt: off
"""[Uploads](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.models/upload) a Google Cloud Vertex [Model](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.models) and returns a Model artifact representing the uploaded Model resource, with a tag for the particular version. See [Model upload](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.models/upload) method for more information.
Args:
location: Optional location to upload this Model to. If not set, defaults to `us-central1`.
display_name: The display name of the Model. The name can be up to 128 characters long and can be consist of any UTF-8 characters. [More information.](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.models#Model)
description: The description of the Model. [More information.](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.models#Model)
parent_model: An artifact of a model which to upload a new version to. Only specify this field when uploading a new version. [More information.](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.models/upload#request-body)
unmanaged_container_model: The unmanaged container model to be uploaded. The Model can be passed from an upstream step or imported via a KFP `dsl.importer`. Example:
from kfp import dsl
from google_cloud_pipeline_components.types import artifact_types
importer_spec = dsl.importer( artifact_uri='gs://managed-pipeline-gcpc-e2e-test/automl-tabular/model', artifact_class=artifact_types.UnmanagedContainerModel, metadata={ 'containerSpec': { 'imageUri': 'us-docker.pkg.dev/vertex-ai/automl-tabular/prediction-server:prod' } })
explanation_metadata: Metadata describing the Model's input and output for explanation. Both `explanation_metadata` and `explanation_parameters` must be passed together when used. [More information.](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/ExplanationSpec#explanationmetadata)
explanation_parameters: Parameters to configure explaining for Model's predictions. [More information.](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/ExplanationSpec#ExplanationParameters)
version_aliases: User provided version aliases so that a model version can be referenced via alias (i.e. `projects/{project}/locations/{location}/models/{modelId}@{version_alias}` instead of auto-generated version id (i.e. `projects/{project}/locations/{location}/models/{modelId}@{versionId}`). The format is [a-z][a-zA-Z0-9-]{0,126}[a-z0-9] to distinguish from versionId. A default version alias will be created for the first version of the model, and there must be exactly one default version alias for a model.
encryption_spec_key_name: Customer-managed encryption key spec for a Model. If set, this Model and all sub-resources of this Model will be secured by this key. Has the form: `projects/my-project/locations/my-location/keyRings/my-kr/cryptoKeys/my-key`. The key needs to be in the same region as where the compute resource is created.
labels: The labels with user-defined metadata to organize your model. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels.
project: Project to upload this Model to. Defaults to the project in which the PipelineJob is run.
Returns:
model: Artifact tracking the created Model version.
gcp_resources: Serialized JSON of `gcp_resources` [proto](https://github.com/kubeflow/pipelines/tree/master/components/google-cloud/google_cloud_pipeline_components/proto) which tracks the upload Model's long-running operation.
"""
# fmt: on
return ContainerSpec(
image=_image.GCPC_IMAGE_TAG,
command=[
'python3',
'-u',
'-m',
'google_cloud_pipeline_components.container.v1.model.upload_model.launcher',
],
args=[
'--type',
'UploadModel',
'--payload',
ConcatPlaceholder([
'{',
'"display_name": "',
display_name,
'"',
', "description": "',
description,
'"',
', "explanation_spec": {',
'"parameters": ',
explanation_parameters,
', "metadata": ',
explanation_metadata,
'}',
', "encryption_spec": {"kms_key_name":"',
encryption_spec_key_name,
'"}',
', "version_aliases": ',
version_aliases,
', "labels": ',
labels,
', "pipeline_job": "',
f'projects/{project}/locations/{location}/pipelineJobs/{dsl.PIPELINE_JOB_ID_PLACEHOLDER}',
'"',
'}',
]),
'--project',
project,
'--location',
location,
'--gcp_resources',
gcp_resources,
'--executor_input',
'{{$}}',
IfPresentPlaceholder(
input_name='parent_model',
then=[
'--parent_model_name',
parent_model.metadata['resourceName'],
],
),
],
)
| 863 |
0 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/model | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/model/upload_model/__init__.py | # Copyright 2023 The Kubeflow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Google Cloud Pipeline V2 Model Upload Component."""
| 864 |
0 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/model | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/model/get_model/component.py | # Copyright 2024 The Kubeflow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from google_cloud_pipeline_components import _image
from google_cloud_pipeline_components import _placeholders
from google_cloud_pipeline_components.types.artifact_types import VertexModel
from kfp import dsl
@dsl.container_component
def model_get(
model: dsl.Output[VertexModel],
model_name: str,
project: str = _placeholders.PROJECT_ID_PLACEHOLDER,
location: str = 'us-central1',
):
# fmt: off
"""Gets a model artifact based on the model name of an existing Vertex model.
Args:
project: Project from which to get the VertexModel. Defaults to the project in which the PipelineJob is run.
model_name: Specify the model name in one of the following formats: {model}: Fetches the default model version. {model}@{model_version_id}: Fetches the model version specified by its ID. {model}@{model_version_alias}: Fetches the model version specified by its alias.
location: Location from which to get the VertexModel. Defaults to `us-central1`.
Returns:
model: Artifact of the Vertex Model.
"""
# fmt: on
return dsl.ContainerSpec(
image=_image.GCPC_IMAGE_TAG,
command=[
'python3',
'-u',
'-m',
'google_cloud_pipeline_components.container.v1.model.get_model.launcher',
],
args=[
'--project',
project,
'--location',
location,
'--model_name',
model_name,
'--executor_input',
'{{$}}',
],
)
| 865 |
0 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/model | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/model/get_model/__init__.py | # Copyright 2024 The Kubeflow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Google Cloud Pipeline Get Vertex Model Component."""
| 866 |
0 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/model | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/model/delete_model/component.py | # Copyright 2023 The Kubeflow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from google_cloud_pipeline_components import _image
from google_cloud_pipeline_components.types.artifact_types import VertexModel
from kfp import dsl
from kfp.dsl import Input
@dsl.container_component
def model_delete(model: Input[VertexModel], gcp_resources: dsl.OutputPath(str)):
# fmt: off
"""[Deletes](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.models/delete) a Google Cloud Vertex [Model](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.models). See the [Model delete](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.models/delete) method for more information. Note that the full model is deleted, NOT only the model version.
Args:
model: The name of the Model resource to be deleted. Format: `projects/{project}/locations/{location}/models/{model}`. [More information](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.models/delete#path-parameters).
Returns:
gcp_resources: Serialized JSON of `gcp_resources` [proto](https://github.com/kubeflow/pipelines/tree/master/components/google-cloud/google_cloud_pipeline_components/proto) which tracks the delete Model's long-running operation.
"""
# fmt: on
return dsl.ContainerSpec(
image=_image.GCPC_IMAGE_TAG,
command=[
'python3',
'-u',
'-m',
'google_cloud_pipeline_components.container.v1.model.delete_model.launcher',
],
args=[
'--type',
'DeleteModel',
'--payload',
dsl.ConcatPlaceholder([
'{',
'"model": "',
model.metadata['resourceName'],
'"',
'}',
]),
'--project',
'',
'--location',
'',
'--gcp_resources',
gcp_resources,
],
)
| 867 |
0 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/model | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/model/delete_model/__init__.py | # Copyright 2023 The Kubeflow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Undeploy Model Component."""
| 868 |
0 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/model | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/model/export_model/component.py | # Copyright 2023 The Kubeflow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict
from google_cloud_pipeline_components import _image
from google_cloud_pipeline_components.types.artifact_types import VertexModel
from kfp.dsl import ConcatPlaceholder
from kfp.dsl import container_component
from kfp.dsl import ContainerSpec
from kfp.dsl import Input
from kfp.dsl import OutputPath
@container_component
def model_export(
model: Input[VertexModel],
export_format_id: str,
output_info: OutputPath(Dict[str, str]),
gcp_resources: OutputPath(str),
artifact_destination: str = '',
image_destination: str = '',
):
# fmt: off
"""[Exports](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.models/export) a Google Cloud Vertex [Model](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.models) to a user-specified location. The Model must be exportable. A Model is considered to be exportable if it has at least one supported export format. See the [Model export](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.models/export) method for more information.
Args:
model: The Model to export.
export_format_id: The ID of the format in which the Model must be exported. Each Model lists the export formats it supports. If no value is provided here, then the first from the list of the Model's supported formats is used by default. [More information.](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.models/export#OutputConfig)
artifact_destination: The Cloud Storage location where the Model artifact is to be written to. Under the directory given as the destination a new one with name `"model-export-<model-display-name>-<timestamp-of-export-call>"`, where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format, will be created. Inside, the Model and any of its supporting files will be written. This field should only be set when, in [Model.supported_export_formats], the value for the key given in `export_format_id` contains `ARTIFACT`. [More information.](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.models/export#OutputConfig)
image_destination: The Google Container Registry or Artifact Registry URI where the Model container image will be copied to. [More information.](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.models/export#OutputConfig) Accepted forms: - Google Container Registry path. For example: `gcr.io/projectId/imageName:tag`. - Artifact Registry path. For example: `us-central1-docker.pkg.dev/projectId/repoName/imageName:tag`. This field should only be set when, in [Model.supported_export_formats], the value for the key given in `export_format_id` contains `IMAGE`.
Returns:
output_info: Details of the completed export with output destination paths to the artifacts or container image.
gcp_resources: Serialized JSON of `gcp_resources` [proto](https://github.com/kubeflow/pipelines/tree/master/components/google-cloud/google_cloud_pipeline_components/proto) which tracks the export Model's long-running operation.
"""
# fmt: on
return ContainerSpec(
image=_image.GCPC_IMAGE_TAG,
command=[
'python3',
'-u',
'-m',
'google_cloud_pipeline_components.container.v1.model.export_model.launcher',
],
args=[
'--type',
'ExportModel',
'--payload',
ConcatPlaceholder([
'{',
'"name": "',
model.metadata['resourceName'],
'"',
', "output_config": {',
'"export_format_id": "',
export_format_id,
'"',
', "artifact_destination": {',
'"output_uri_prefix": "',
artifact_destination,
'"',
'}',
', "image_destination": {',
'"output_uri": "',
image_destination,
'"',
'}',
'}',
'}',
]),
'--project',
'', # not being used
'--location',
'', # not being used
'--gcp_resources',
gcp_resources,
'--output_info',
output_info,
],
)
| 869 |
0 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/model | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/model/export_model/__init__.py | # Copyright 2023 The Kubeflow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Google Cloud Pipeline V2 Model Export Component."""
| 870 |
0 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/bigquery/__init__.py | # Copyright 2023 The Kubeflow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# fmt: off
"""Create and execute machine learning models via SQL using [Google Cloud BigQuery ML](https://cloud.google.com/bigquery/docs/bqml-introduction)."""
# fmt: on
from google_cloud_pipeline_components.v1.bigquery.create_model.component import bigquery_create_model_job as BigqueryCreateModelJobOp
from google_cloud_pipeline_components.v1.bigquery.detect_anomalies_model.component import bigquery_detect_anomalies_job as BigqueryDetectAnomaliesModelJobOp
from google_cloud_pipeline_components.v1.bigquery.drop_model.component import bigquery_drop_model_job as BigqueryDropModelJobOp
from google_cloud_pipeline_components.v1.bigquery.evaluate_model.component import bigquery_evaluate_model_job as BigqueryEvaluateModelJobOp
from google_cloud_pipeline_components.v1.bigquery.explain_forecast_model.component import bigquery_explain_forecast_model_job as BigqueryExplainForecastModelJobOp
from google_cloud_pipeline_components.v1.bigquery.explain_predict_model.component import bigquery_explain_predict_model_job as BigqueryExplainPredictModelJobOp
from google_cloud_pipeline_components.v1.bigquery.export_model.component import bigquery_export_model_job as BigqueryExportModelJobOp
from google_cloud_pipeline_components.v1.bigquery.feature_importance.component import bigquery_ml_feature_importance_job as BigqueryMLFeatureImportanceJobOp
from google_cloud_pipeline_components.v1.bigquery.forecast_model.component import bigquery_forecast_model_job as BigqueryForecastModelJobOp
from google_cloud_pipeline_components.v1.bigquery.global_explain.component import bigquery_ml_global_explain_job as BigqueryMLGlobalExplainJobOp
from google_cloud_pipeline_components.v1.bigquery.ml_advanced_weights.component import bigquery_ml_advanced_weights_job as BigqueryMLAdvancedWeightsJobOp
from google_cloud_pipeline_components.v1.bigquery.ml_arima_coefficients.component import bigquery_ml_arima_coefficients as BigqueryMLArimaCoefficientsJobOp
from google_cloud_pipeline_components.v1.bigquery.ml_arima_evaluate.component import bigquery_ml_arima_evaluate_job as BigqueryMLArimaEvaluateJobOp
from google_cloud_pipeline_components.v1.bigquery.ml_centroids.component import bigquery_ml_centroids_job as BigqueryMLCentroidsJobOp
from google_cloud_pipeline_components.v1.bigquery.ml_confusion_matrix.component import bigquery_ml_confusion_matrix_job as BigqueryMLConfusionMatrixJobOp
from google_cloud_pipeline_components.v1.bigquery.ml_feature_info.component import bigquery_ml_feature_info_job as BigqueryMLFeatureInfoJobOp
from google_cloud_pipeline_components.v1.bigquery.ml_principal_component_info.component import bigquery_ml_principal_component_info_job as BigqueryMLPrincipalComponentInfoJobOp
from google_cloud_pipeline_components.v1.bigquery.ml_principal_components.component import bigquery_ml_principal_components_job as BigqueryMLPrincipalComponentsJobOp
from google_cloud_pipeline_components.v1.bigquery.ml_recommend.component import bigquery_ml_recommend_job as BigqueryMLRecommendJobOp
from google_cloud_pipeline_components.v1.bigquery.ml_reconstruction_loss.component import bigquery_ml_reconstruction_loss_job as BigqueryMLReconstructionLossJobOp
from google_cloud_pipeline_components.v1.bigquery.ml_roc_curve.component import bigquery_ml_roc_curve_job as BigqueryMLRocCurveJobOp
from google_cloud_pipeline_components.v1.bigquery.ml_training_info.component import bigquery_ml_training_info_job as BigqueryMLTrainingInfoJobOp
from google_cloud_pipeline_components.v1.bigquery.ml_trial_info.component import bigquery_ml_trial_info_job as BigqueryMLTrialInfoJobOp
from google_cloud_pipeline_components.v1.bigquery.ml_weights.component import bigquery_ml_weights_job as BigqueryMLWeightsJobOp
from google_cloud_pipeline_components.v1.bigquery.predict_model.component import bigquery_predict_model_job as BigqueryPredictModelJobOp
from google_cloud_pipeline_components.v1.bigquery.query_job.component import bigquery_query_job as BigqueryQueryJobOp
__all__ = [
'BigqueryCreateModelJobOp',
'BigqueryDetectAnomaliesModelJobOp',
'BigqueryDropModelJobOp',
'BigqueryEvaluateModelJobOp',
'BigqueryExplainForecastModelJobOp',
'BigqueryExplainPredictModelJobOp',
'BigqueryExportModelJobOp',
'BigqueryForecastModelJobOp',
'BigqueryMLAdvancedWeightsJobOp',
'BigqueryMLArimaCoefficientsJobOp',
'BigqueryMLArimaEvaluateJobOp',
'BigqueryMLCentroidsJobOp',
'BigqueryMLConfusionMatrixJobOp',
'BigqueryMLFeatureImportanceJobOp',
'BigqueryMLFeatureInfoJobOp',
'BigqueryMLGlobalExplainJobOp',
'BigqueryMLPrincipalComponentInfoJobOp',
'BigqueryMLPrincipalComponentsJobOp',
'BigqueryMLRecommendJobOp',
'BigqueryMLReconstructionLossJobOp',
'BigqueryMLRocCurveJobOp',
'BigqueryMLTrainingInfoJobOp',
'BigqueryMLTrialInfoJobOp',
'BigqueryMLWeightsJobOp',
'BigqueryPredictModelJobOp',
'BigqueryQueryJobOp',
]
| 871 |
0 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/bigquery | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/bigquery/ml_roc_curve/component.py | # Copyright 2023 The Kubeflow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, List
from google_cloud_pipeline_components import _image
from google_cloud_pipeline_components import _placeholders
from google_cloud_pipeline_components.types.artifact_types import BQMLModel
from google_cloud_pipeline_components.types.artifact_types import BQTable
from kfp.dsl import ConcatPlaceholder
from kfp.dsl import container_component
from kfp.dsl import ContainerSpec
from kfp.dsl import Input
from kfp.dsl import Output
from kfp.dsl import OutputPath
@container_component
def bigquery_ml_roc_curve_job(
model: Input[BQMLModel],
roc_curve: Output[BQTable],
gcp_resources: OutputPath(str),
location: str = 'us-central1',
table_name: str = '',
query_statement: str = '',
thresholds: str = '',
query_parameters: List[str] = [],
job_configuration_query: Dict[str, str] = {},
labels: Dict[str, str] = {},
project: str = _placeholders.PROJECT_ID_PLACEHOLDER,
):
# fmt: off
"""Launch a BigQuery roc curve job and waits for it to finish.
Args:
location: Location of the job to run BigQuery roc curve job. If not set, default to `US` multi-region. For more details, see https://cloud.google.com/bigquery/docs/locations#specifying_your_location
model: BigQuery ML model for BigQuery roc curv job. For more details, see https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-roc#roc_model_name
table_name: BigQuery table id of the input table that contains the evaluation data. For more details, see https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-roc#roc_table_name
query_statement: Query statement string used to generate the evaluation data. For more details, see https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-roc#roc_query_statement
thresholds: Percentile values of the prediction output. For more details, see https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-roc#roc_thresholds
query_parameters: Query parameters for standard SQL queries. If query_parameters are both specified in here and in job_configuration_query, the value in here will override the other one.
job_configuration_query: A json formatted string describing the rest of the job configuration. For more details, see https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationQuery
labels: The labels associated with this job. You can use these to organize and group your jobs. Label keys and values can be no longer than 63 characters, can only containlowercase letters, numeric characters, underscores and dashes. International characters are allowed. Label values are optional. Label keys must start with a letter and each label in the list must have a different key.
Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.
project: Project to run BigQuery roc curve job. Defaults to the project in which the PipelineJob is run.
Returns:
roc_curve: Describes common metrics applicable to the type of model supplied. For more details, see https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-roc#mlroc_curve_output
gcp_resources: Serialized gcp_resources proto tracking the BigQuery job. For more details, see https://github.com/kubeflow/pipelines/blob/master/components/google-cloud/google_cloud_pipeline_components/proto/README.md.
"""
# fmt: on
return ContainerSpec(
image=_image.GCPC_IMAGE_TAG,
command=[
'python3',
'-u',
'-m',
'google_cloud_pipeline_components.container.v1.bigquery.ml_roc_curve.launcher',
],
args=[
'--type',
'BigqueryMLRocCurveJob',
'--project',
project,
'--location',
location,
'--model_name',
ConcatPlaceholder([
model.metadata['projectId'],
'.',
model.metadata['datasetId'],
'.',
model.metadata['modelId'],
]),
'--table_name',
table_name,
'--query_statement',
query_statement,
'--thresholds',
thresholds,
'--payload',
ConcatPlaceholder([
'{',
'"configuration": {',
'"query": ',
job_configuration_query,
', "labels": ',
labels,
'}',
'}',
]),
'--job_configuration_query_override',
ConcatPlaceholder(
['{', '"query_parameters": ', query_parameters, '}']
),
'--gcp_resources',
gcp_resources,
'--executor_input',
'{{$}}',
],
)
| 872 |
0 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/bigquery | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/bigquery/ml_roc_curve/__init__.py | # Copyright 2023 The Kubeflow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Google Cloud Pipeline V2 BigQuery ML Roc Curve Component."""
| 873 |
0 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/bigquery | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/bigquery/ml_principal_components/component.py | # Copyright 2023 The Kubeflow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, List
from google_cloud_pipeline_components import _image
from google_cloud_pipeline_components import _placeholders
from google_cloud_pipeline_components.types.artifact_types import BQMLModel
from google_cloud_pipeline_components.types.artifact_types import BQTable
from kfp.dsl import ConcatPlaceholder
from kfp.dsl import container_component
from kfp.dsl import ContainerSpec
from kfp.dsl import Input
from kfp.dsl import Output
from kfp.dsl import OutputPath
@container_component
def bigquery_ml_principal_components_job(
model: Input[BQMLModel],
destination_table: Output[BQTable],
gcp_resources: OutputPath(str),
location: str = 'us-central1',
query_parameters: List[str] = [],
job_configuration_query: Dict[str, str] = {},
labels: Dict[str, str] = {},
encryption_spec_key_name: str = '',
project: str = _placeholders.PROJECT_ID_PLACEHOLDER,
):
# fmt: off
"""Launch a BigQuery ML.principal_components job and waits for it to finish.
Args:
location: Location to run the BigQuery ML.principal_components job. If not set, default to `US` multi-region. For more details, see https://cloud.google.com/bigquery/docs/locations#specifying_your_location
model: BigQuery ML model for ML.principal_components. For more details, see https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-principal-components#mlprincipal_components_syntax
query_parameters: jobs.query parameters for standard SQL queries. If query_parameters are both specified in here and in job_configuration_query, the value in here will override the other one.
job_configuration_query: A json formatted string describing the rest of the job configuration. For more details, see https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationQuery
labels: The labels associated with this job. You can use these to organize and group your jobs. Label keys and values can be no longer than 63 characters, can only containlowercase letters, numeric characters, underscores and dashes. International characters are allowed. Label values are optional. Label keys must start with a letter and each label in the list must have a different key.
Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.
encryption_spec_key_name: Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key. If encryption_spec_key_name are both specified in here and in job_configuration_query, the value in here will override the other one.
project: Project to run BigQuery ML.principal_components job. Defaults to the project in which the PipelineJob is run.
Returns:
destination_table: Describes the table which stores common metrics applicable to the type of model supplied. For more details, see https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-principal-components#mlprincipal_components_output
gcp_resources: Serialized gcp_resources proto tracking the BigQuery job. For more details, see https://github.com/kubeflow/pipelines/blob/master/components/google-cloud/google_cloud_pipeline_components/proto/README.md.
"""
# fmt: on
return ContainerSpec(
image=_image.GCPC_IMAGE_TAG,
command=[
'python3',
'-u',
'-m',
'google_cloud_pipeline_components.container.v1.bigquery.ml_principal_components.launcher',
],
args=[
'--type',
'BigqueryMLPrincipalComponentsJob',
'--project',
project,
'--location',
location,
'--model_name',
ConcatPlaceholder([
model.metadata['projectId'],
'.',
model.metadata['datasetId'],
'.',
model.metadata['modelId'],
]),
'--payload',
ConcatPlaceholder([
'{',
'"configuration": {',
'"query": ',
job_configuration_query,
', "labels": ',
labels,
'}',
'}',
]),
'--job_configuration_query_override',
ConcatPlaceholder([
'{',
'"query_parameters": ',
query_parameters,
', "destination_encryption_configuration": {',
'"kmsKeyName": "',
encryption_spec_key_name,
'"}',
'}',
]),
'--gcp_resources',
gcp_resources,
'--executor_input',
'{{$}}',
],
)
| 874 |
0 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/bigquery | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/bigquery/ml_principal_components/__init__.py | # Copyright 2023 The Kubeflow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Google Cloud Pipeline V2 BigQuery ML Principal Components Component."""
| 875 |
0 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/bigquery | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/bigquery/predict_model/component.py | # Copyright 2023 The Kubeflow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, List
from google_cloud_pipeline_components import _image
from google_cloud_pipeline_components import _placeholders
from google_cloud_pipeline_components.types.artifact_types import BQMLModel
from google_cloud_pipeline_components.types.artifact_types import BQTable
from kfp.dsl import ConcatPlaceholder
from kfp.dsl import container_component
from kfp.dsl import ContainerSpec
from kfp.dsl import Input
from kfp.dsl import Output
from kfp.dsl import OutputPath
@container_component
def bigquery_predict_model_job(
model: Input[BQMLModel],
destination_table: Output[BQTable],
gcp_resources: OutputPath(str),
table_name: str = '',
query_statement: str = '',
threshold: float = -1.0,
location: str = 'us-central1',
query_parameters: List[str] = [],
job_configuration_query: Dict[str, str] = {},
labels: Dict[str, str] = {},
encryption_spec_key_name: str = '',
project: str = _placeholders.PROJECT_ID_PLACEHOLDER,
):
# fmt: off
"""Launch a BigQuery predict model job and waits for it to finish.
Args:
location: Location to run the BigQuery model prediction job. If not set, default to `US` multi-region. For more details, see https://cloud.google.com/bigquery/docs/locations#specifying_your_location
model: BigQuery ML model for prediction. For more details, see https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-predict#predict_model_name
table_name: BigQuery table id of the input table that contains the prediction data. For more details, see https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-predict#predict_table_name
query_statement: Query statement string used to generate the prediction data. For more details, see https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-predict#predict_query_statement
threshold: A custom threshold for the binary logistic regression model used as the cutoff between two labels. Predictions above the threshold are treated as positive prediction. Predictions below the threshold are negative predictions. For more details, see https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-predict#threshold
query_parameters: jobs.query parameters for standard SQL queries. If query_parameters are both specified in here and in job_configuration_query, the value in here will override the other one.
job_configuration_query: A json formatted string describing the rest of the job configuration. For more details, see https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationQuery
labels: The labels associated with this job. You can use these to organize and group your jobs. Label keys and values can be no longer than 63 characters, can only containlowercase letters, numeric characters, underscores and dashes. International characters are allowed. Label values are optional. Label keys must start with a letter and each label in the list must have a different key.
Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.
encryption_spec_key_name: Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key. If encryption_spec_key_name are both specified in here and in job_configuration_query, the value in here will override the other one.
project: Project to run BigQuery model prediction job. Defaults to the project in which the PipelineJob is run.
Returns:
destination_table: Describes the table where the model prediction results should be stored. This property must be set for large results that exceed the maximum response size. For queries that produce anonymous (cached) results, this field will be populated by BigQuery.
gcp_resources: Serialized gcp_resources proto tracking the BigQuery job. For more details, see https://github.com/kubeflow/pipelines/blob/master/components/google-cloud/google_cloud_pipeline_components/proto/README.md.
"""
# fmt: on
return ContainerSpec(
image=_image.GCPC_IMAGE_TAG,
command=[
'python3',
'-u',
'-m',
'google_cloud_pipeline_components.container.v1.bigquery.predict_model.launcher',
],
args=[
'--type',
'BigqueryPredictModelJob',
'--project',
project,
'--location',
location,
'--model_name',
ConcatPlaceholder([
model.metadata['projectId'],
'.',
model.metadata['datasetId'],
'.',
model.metadata['modelId'],
]),
'--table_name',
table_name,
'--query_statement',
query_statement,
'--threshold',
threshold,
'--payload',
ConcatPlaceholder([
'{',
'"configuration": {',
'"query": ',
job_configuration_query,
', "labels": ',
labels,
'}',
'}',
]),
'--job_configuration_query_override',
ConcatPlaceholder([
'{',
'"query_parameters": ',
query_parameters,
', "destination_encryption_configuration": {',
'"kmsKeyName": "',
encryption_spec_key_name,
'"}',
'}',
]),
'--gcp_resources',
gcp_resources,
'--executor_input',
'{{$}}',
],
)
| 876 |
0 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/bigquery | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/bigquery/predict_model/__init__.py | # Copyright 2023 The Kubeflow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Google Cloud Pipeline V2 BigQuery Predict Model Component."""
| 877 |
0 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/bigquery | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/bigquery/explain_predict_model/component.py | # Copyright 2023 The Kubeflow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, List
from google_cloud_pipeline_components import _image
from google_cloud_pipeline_components import _placeholders
from google_cloud_pipeline_components.types.artifact_types import BQMLModel
from google_cloud_pipeline_components.types.artifact_types import BQTable
from kfp.dsl import ConcatPlaceholder
from kfp.dsl import container_component
from kfp.dsl import ContainerSpec
from kfp.dsl import Input
from kfp.dsl import Output
from kfp.dsl import OutputPath
@container_component
def bigquery_explain_predict_model_job(
model: Input[BQMLModel],
destination_table: Output[BQTable],
gcp_resources: OutputPath(str),
location: str = 'us-central1',
table_name: str = '',
query_statement: str = '',
top_k_features: int = -1,
threshold: float = -1.0,
num_integral_steps: int = -1,
query_parameters: List[str] = [],
job_configuration_query: Dict[str, str] = {},
labels: Dict[str, str] = {},
encryption_spec_key_name: str = '',
project: str = _placeholders.PROJECT_ID_PLACEHOLDER,
):
# fmt: off
"""Launch a BigQuery explain predict model job and waits for it to finish.
Args:
location: Location to run the BigQuery model prediction job. If not set, default to `US` multi-region. For more details, see https://cloud.google.com/bigquery/docs/locations#specifying_your_location
model: BigQuery ML model for explaining prediction. For more details, see https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-explain-predict#model_name
table_name: BigQuery table id of the input table that contains the prediction data. For more details, see https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-explain-predict#table_name
query_statement: Query statement string used to generate the prediction data. For more details, see https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-explain-predict#query_statement
top_k_features: This argument specifies how many top feature attribution pairs are generated per row of input data. The features are ranked by the absolute values of their attributions. For more details, see https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-explain-predict#top_k_features
threshold: A custom threshold for the binary logistic regression model used as the cutoff between two labels. Predictions above the threshold are treated as positive prediction. Predictions below the threshold are negative predictions. For more details, see https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-predict#threshold
num_integral_steps: This argument specifies the number of steps to sample between the example being explained and its baseline for approximating the integral in integrated gradients attribution methods. For more details, see https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-explain-predict#num_integral_steps
query_parameters: Query parameters for standard SQL queries. If query_parameters are both specified in here and in job_configuration_query, the value in here will override the other one.
job_configuration_query: A json formatted string describing the rest of the job configuration. For more details, see https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationQuery
labels: The labels associated with this job. You can use these to organize and group your jobs. Label keys and values can be no longer than 63 characters, can only containlowercase letters, numeric characters, underscores and dashes. International characters are allowed. Label values are optional. Label keys must start with a letter and each label in the list must have a different key.
Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.
encryption_spec_key_name: Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key. If encryption_spec_key_name are both specified in here and in job_configuration_query, the value in here will override the other one.
project: Project to run BigQuery model prediction job. Defaults to the project in which the PipelineJob is run.
Returns:
destination_table: Describes the table where the model prediction results should be stored. This property must be set for large results that exceed the maximum response size. For queries that produce anonymous (cached) results, this field will be populated by BigQuery.
gcp_resources: Serialized gcp_resources proto tracking the BigQuery job. For more details, see https://github.com/kubeflow/pipelines/blob/master/components/google-cloud/google_cloud_pipeline_components/proto/README.md.
"""
# fmt: on
return ContainerSpec(
image=_image.GCPC_IMAGE_TAG,
command=[
'python3',
'-u',
'-m',
'google_cloud_pipeline_components.container.v1.bigquery.explain_predict_model.launcher',
],
args=[
'--type',
'BigqueryExplainPredictModelJob',
'--project',
project,
'--location',
location,
'--model_name',
ConcatPlaceholder([
model.metadata['projectId'],
'.',
model.metadata['datasetId'],
'.',
model.metadata['modelId'],
]),
'--table_name',
table_name,
'--query_statement',
query_statement,
'--top_k_features',
top_k_features,
'--threshold',
threshold,
'--num_integral_steps',
num_integral_steps,
'--payload',
ConcatPlaceholder([
'{',
'"configuration": {',
'"query": ',
job_configuration_query,
', "labels": ',
labels,
'}',
'}',
]),
'--job_configuration_query_override',
ConcatPlaceholder([
'{',
'"query_parameters": ',
query_parameters,
', "destination_encryption_configuration": {',
'"kmsKeyName": "',
encryption_spec_key_name,
'"}',
'}',
]),
'--gcp_resources',
gcp_resources,
'--executor_input',
'{{$}}',
],
)
| 878 |
0 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/bigquery | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/bigquery/explain_predict_model/__init__.py | # Copyright 2023 The Kubeflow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Google Cloud Pipeline V2 BigQuery Explain Predict Model Component."""
| 879 |
0 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/bigquery | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/bigquery/detect_anomalies_model/component.py | # Copyright 2023 The Kubeflow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, List
from google_cloud_pipeline_components import _image
from google_cloud_pipeline_components import _placeholders
from google_cloud_pipeline_components.types.artifact_types import BQMLModel
from google_cloud_pipeline_components.types.artifact_types import BQTable
from kfp.dsl import ConcatPlaceholder
from kfp.dsl import container_component
from kfp.dsl import ContainerSpec
from kfp.dsl import Input
from kfp.dsl import Output
from kfp.dsl import OutputPath
@container_component
def bigquery_detect_anomalies_job(
model: Input[BQMLModel],
destination_table: Output[BQTable],
gcp_resources: OutputPath(str),
location: str = 'us-central1',
table_name: str = '',
query_statement: str = '',
contamination: float = -1.0,
anomaly_prob_threshold: float = 0.95,
query_parameters: List[str] = [],
job_configuration_query: Dict[str, str] = {},
labels: Dict[str, str] = {},
encryption_spec_key_name: str = '',
project: str = _placeholders.PROJECT_ID_PLACEHOLDER,
):
# fmt: off
"""Launch a BigQuery detect anomalies model job and waits for it to finish.
Args:
location: Location to run the BigQuery model prediction job. If not set, default to `US` multi-region. For more details, see https://cloud.google.com/bigquery/docs/locations#specifying_your_location
model: BigQuery ML model for prediction. For more details, see https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-detect-anomalies#model_name
table_name: BigQuery table id of the input table that contains the data. For more details, see https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-detect-anomalies#table_name
query_statement: Query statement string used to generate the data. For more details, see https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-detect-anomalies#query_statement
contamination: Contamination is the proportion of anomalies in the training dataset that are used to create the AUTOENCODER, KMEANS, or PCA input models. For more details, see https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-detect-anomalies#contamination
anomaly_prob_threshold: The ARIMA_PLUS model supports the anomaly_prob_threshold custom threshold for anomaly detection. The value of the anomaly probability at each timestamp is calculated using the actual time-series data value and the values of the predicted time-series data and the variance from the model training. The actual time-series data value at a specific timestamp is identified as anomalous if the anomaly probability exceeds the anomaly_prob_threshold value. For more details, see https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-detect-anomalies#anomaly_prob_threshold
query_parameters: Query parameters for standard SQL queries. If query_parameters are both specified in here and in job_configuration_query, the value in here will override the other one.
job_configuration_query: A json formatted string describing the rest of the job configuration. For more details, see https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationQuery
labels: The labels associated with this job. You can use these to organize and group your jobs. Label keys and values can be no longer than 63 characters, can only containlowercase letters, numeric characters, underscores and dashes. International characters are allowed. Label values are optional. Label keys must start with a letter and each label in the list must have a different key.
Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.
encryption_spec_key_name: Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key. If encryption_spec_key_name are both specified in here and in job_configuration_query, the value in here will override the other one.
project: Project to run BigQuery model prediction job. Defaults to the project in which the PipelineJob is run.
Returns:
destination_table: Describes the table where the model prediction results should be stored. This property must be set for large results that exceed the maximum response size. For queries that produce anonymous (cached) results, this field will be populated by BigQuery.
gcp_resources: Serialized gcp_resources proto tracking the BigQuery job. For more details, see https://github.com/kubeflow/pipelines/blob/master/components/google-cloud/google_cloud_pipeline_components/proto/README.md.
"""
# fmt: on
return ContainerSpec(
image=_image.GCPC_IMAGE_TAG,
command=[
'python3',
'-u',
'-m',
'google_cloud_pipeline_components.container.v1.bigquery.detect_anomalies_model.launcher',
],
args=[
'--type',
'BigqueryDetectAnomaliesModelJob',
'--project',
project,
'--location',
location,
'--model_name',
ConcatPlaceholder([
model.metadata['projectId'],
'.',
model.metadata['datasetId'],
'.',
model.metadata['modelId'],
]),
'--table_name',
table_name,
'--query_statement',
query_statement,
'--contamination',
contamination,
'--anomaly_prob_threshold',
anomaly_prob_threshold,
'--payload',
ConcatPlaceholder([
'{',
'"configuration": {',
'"query": ',
job_configuration_query,
', "labels": ',
labels,
'}',
'}',
]),
'--job_configuration_query_override',
ConcatPlaceholder([
'{',
'"query_parameters": ',
query_parameters,
', "destination_encryption_configuration": {',
'"kmsKeyName": "',
encryption_spec_key_name,
'"}',
'}',
]),
'--gcp_resources',
gcp_resources,
'--executor_input',
'{{$}}',
],
)
| 880 |
0 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/bigquery | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/bigquery/detect_anomalies_model/__init__.py | # Copyright 2023 The Kubeflow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Google Cloud Pipeline V2 BigQuery Detect Anomalies Model Component."""
| 881 |
0 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/bigquery | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/bigquery/ml_trial_info/component.py | # Copyright 2023 The Kubeflow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, List
from google_cloud_pipeline_components import _image
from google_cloud_pipeline_components import _placeholders
from google_cloud_pipeline_components.types.artifact_types import BQMLModel
from kfp.dsl import Artifact
from kfp.dsl import ConcatPlaceholder
from kfp.dsl import container_component
from kfp.dsl import ContainerSpec
from kfp.dsl import Input
from kfp.dsl import Output
from kfp.dsl import OutputPath
@container_component
def bigquery_ml_trial_info_job(
model: Input[BQMLModel],
trial_info: Output[Artifact],
gcp_resources: OutputPath(str),
location: str = 'us-central1',
query_parameters: List[str] = [],
job_configuration_query: Dict[str, str] = {},
labels: Dict[str, str] = {},
encryption_spec_key_name: str = '',
project: str = _placeholders.PROJECT_ID_PLACEHOLDER,
):
# fmt: off
"""Launch a BigQuery ml trial info job and waits for it to finish.
Args:
location: Location to run the BigQuery ml trial info job. If not set, default to `US` multi-region. For more details, see https://cloud.google.com/bigquery/docs/locations#specifying_your_location
model: BigQuery ML model. For more details, see https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-trial-info#predict_model_name
query_parameters: Query parameters for standard SQL queries. If query_parameters are both specified in here and in job_configuration_query, the value in here will override the other one.
job_configuration_query: A json formatted string describing the rest of the job configuration. For more details, see https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationQuery
labels: The labels associated with this job. You can use these to organize and group your jobs. Label keys and values can be no longer than 63 characters, can only containlowercase letters, numeric characters, underscores and dashes. International characters are allowed. Label values are optional. Label keys must start with a letter and each label in the list must have a different key.
Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.
encryption_spec_key_name: Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key. If encryption_spec_key_name are both specified in here and in job_configuration_query, the value in here will override the other one.
project: Project to run BigQuery ml trial info job. Defaults to the project in which the PipelineJob is run.
Returns:
trial_info: Describes the trial info applicable to the type of model supplied. For more details, see https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-trial-info
gcp_resources: Serialized gcp_resources proto tracking the BigQuery job. For more details, see https://github.com/kubeflow/pipelines/blob/master/components/google-cloud/google_cloud_pipeline_components/proto/README.md.
"""
# fmt: on
return ContainerSpec(
image=_image.GCPC_IMAGE_TAG,
command=[
'python3',
'-u',
'-m',
'google_cloud_pipeline_components.container.v1.bigquery.ml_trial_info.launcher',
],
args=[
'--type',
'BigqueryMLTrialInfoJob',
'--project',
project,
'--location',
location,
'--model_name',
ConcatPlaceholder([
model.metadata['projectId'],
'.',
model.metadata['datasetId'],
'.',
model.metadata['modelId'],
]),
'--payload',
ConcatPlaceholder([
'{',
'"configuration": {',
'"query": ',
job_configuration_query,
', "labels": ',
labels,
'}',
'}',
]),
'--job_configuration_query_override',
ConcatPlaceholder([
'{',
'"query_parameters": ',
query_parameters,
', "destination_encryption_configuration": {',
'"kmsKeyName": "',
encryption_spec_key_name,
'"}',
'}',
]),
'--gcp_resources',
gcp_resources,
'--executor_input',
'{{$}}',
],
)
| 882 |
0 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/bigquery | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/bigquery/ml_trial_info/__init__.py | # Copyright 2023 The Kubeflow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Google Cloud Pipeline V2 BigQuery ML Trial Info Component."""
| 883 |
0 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/bigquery | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/bigquery/ml_feature_info/component.py | # Copyright 2023 The Kubeflow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, List
from google_cloud_pipeline_components import _image
from google_cloud_pipeline_components import _placeholders
from google_cloud_pipeline_components.types.artifact_types import BQMLModel
from kfp.dsl import Artifact
from kfp.dsl import ConcatPlaceholder
from kfp.dsl import container_component
from kfp.dsl import ContainerSpec
from kfp.dsl import Input
from kfp.dsl import Output
from kfp.dsl import OutputPath
@container_component
def bigquery_ml_feature_info_job(
model: Input[BQMLModel],
feature_info: Output[Artifact],
gcp_resources: OutputPath(str),
location: str = 'us-central1',
query_parameters: List[str] = [],
job_configuration_query: Dict[str, str] = {},
labels: Dict[str, str] = {},
project: str = _placeholders.PROJECT_ID_PLACEHOLDER,
):
# fmt: off
"""Launch a BigQuery feature info job and waits for it to finish.
Args:
location: Location of the job to run BigQuery feature info job. If not set, default to `US` multi-region. For more details, see https://cloud.google.com/bigquery/docs/locations#specifying_your_location
model: BigQuery ML model for evaluation. For more details, see https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-predict#predict_model_name
query_parameters: jobs.query parameters for standard SQL queries. If query_parameters are both specified in here and in job_configuration_query, the value in here will override the other one.
job_configuration_query: A json formatted string describing the rest of the job configuration. For more details, see https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationQuery
labels: The labels associated with this job. You can use these to organize and group your jobs. Label keys and values can be no longer than 63 characters, can only containlowercase letters, numeric characters, underscores and dashes. International characters are allowed. Label values are optional. Label keys must start with a letter and each label in the list must have a different key.
Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.
project: Project to run BigQuery feature info job. Defaults to the project in which the PipelineJob is run.
Returns:
feature_info: Describes common metrics applicable to the type of model supplied. For more details, see https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-feature#mlfeature_info_output
gcp_resources: Serialized gcp_resources proto tracking the BigQuery job. For more details, see https://github.com/kubeflow/pipelines/blob/master/components/google-cloud/google_cloud_pipeline_components/proto/README.md.
"""
# fmt: on
return ContainerSpec(
image=_image.GCPC_IMAGE_TAG,
command=[
'python3',
'-u',
'-m',
'google_cloud_pipeline_components.container.v1.bigquery.ml_feature_info.launcher',
],
args=[
'--type',
'BigqueryMLFeatureInfoJob',
'--project',
project,
'--location',
location,
'--model_name',
ConcatPlaceholder([
model.metadata['projectId'],
'.',
model.metadata['datasetId'],
'.',
model.metadata['modelId'],
]),
'--payload',
ConcatPlaceholder([
'{',
'"configuration": {',
'"query": ',
job_configuration_query,
', "labels": ',
labels,
'}',
'}',
]),
'--job_configuration_query_override',
ConcatPlaceholder(
['{', '"query_parameters": ', query_parameters, '}']
),
'--gcp_resources',
gcp_resources,
'--executor_input',
'{{$}}',
],
)
| 884 |
0 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/bigquery | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/bigquery/ml_feature_info/__init__.py | # Copyright 2023 The Kubeflow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Google Cloud Pipeline V2 BigQuery ML Feature Info Component."""
| 885 |
0 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/bigquery | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/bigquery/ml_recommend/component.py | # Copyright 2023 The Kubeflow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, List
from google_cloud_pipeline_components import _image
from google_cloud_pipeline_components import _placeholders
from google_cloud_pipeline_components.types.artifact_types import BQMLModel
from google_cloud_pipeline_components.types.artifact_types import BQTable
from kfp.dsl import ConcatPlaceholder
from kfp.dsl import container_component
from kfp.dsl import ContainerSpec
from kfp.dsl import Input
from kfp.dsl import Output
from kfp.dsl import OutputPath
@container_component
def bigquery_ml_recommend_job(
model: Input[BQMLModel],
destination_table: Output[BQTable],
gcp_resources: OutputPath(str),
location: str = 'us-central1',
table_name: str = '',
query_statement: str = '',
query_parameters: List[str] = [],
job_configuration_query: Dict[str, str] = {},
labels: Dict[str, str] = {},
encryption_spec_key_name: str = '',
project: str = _placeholders.PROJECT_ID_PLACEHOLDER,
):
# fmt: off
"""Launch a BigQuery ML.Recommend job and waits for it to finish.
Args:
location: Location to run the BigQuery ML.Recommend job. If not set, default to `US` multi-region. For more details, see https://cloud.google.com/bigquery/docs/locations#specifying_your_location
model: BigQuery ML model for ML.Recoomend. For more details, see https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-recommend#recommend_model_name
table_name: BigQuery table id of the input table that contains the the user and/or item data. For more details, see https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-recommend#recommend_table_name
query_statement: query statement string used to generate the evaluation data. For more details, see https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-recommend#recommend_query_statement
query_parameters: jobs.query parameters for standard SQL queries. If query_parameters are both specified in here and in job_configuration_query, the value in here will override the other one.
job_configuration_query: A json formatted string describing the rest of the job configuration. For more details, see https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationQuery
labels: The labels associated with this job. You can use these to organize and group your jobs. Label keys and values can be no longer than 63 characters, can only containlowercase letters, numeric characters, underscores and dashes. International characters are allowed. Label values are optional. Label keys must start with a letter and each label in the list must have a different key.
Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.
encryption_spec_key_name: Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key. If encryption_spec_key_name are both specified in here and in job_configuration_query, the value in here will override the other one.
project: Project to run BigQuery ML.Recommend job. Defaults to the project in which the PipelineJob is run.
Returns:
destination_table: Describes the table where the recommendation results should be stored. This property must be set for large results that exceed the maximum response size. For queries that produce anonymous (cached) results, this field will be populated by BigQuery.
gcp_resources: Serialized gcp_resources proto tracking the BigQuery job. For more details, see https://github.com/kubeflow/pipelines/blob/master/components/google-cloud/google_cloud_pipeline_components/proto/README.md.
"""
# fmt: on
return ContainerSpec(
image=_image.GCPC_IMAGE_TAG,
command=[
'python3',
'-u',
'-m',
'google_cloud_pipeline_components.container.v1.bigquery.ml_recommend.launcher',
],
args=[
'--type',
'BigqueryMLRecommendJob',
'--project',
project,
'--location',
location,
'--model_name',
ConcatPlaceholder([
model.metadata['projectId'],
'.',
model.metadata['datasetId'],
'.',
model.metadata['modelId'],
]),
'--table_name',
table_name,
'--query_statement',
query_statement,
'--payload',
ConcatPlaceholder([
'{',
'"configuration": {',
'"query": ',
job_configuration_query,
', "labels": ',
labels,
'}',
'}',
]),
'--job_configuration_query_override',
ConcatPlaceholder([
'{',
'"query_parameters": ',
query_parameters,
', "destination_encryption_configuration": {',
'"kmsKeyName": "',
encryption_spec_key_name,
'"}',
'}',
]),
'--gcp_resources',
gcp_resources,
'--executor_input',
'{{$}}',
],
)
| 886 |
0 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/bigquery | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/bigquery/ml_recommend/__init__.py | # Copyright 2023 The Kubeflow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Google Cloud Pipeline V2 BigQuery ML Recommend Component."""
| 887 |
0 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/bigquery | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/bigquery/evaluate_model/component.py | # Copyright 2023 The Kubeflow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, List
from google_cloud_pipeline_components import _image
from google_cloud_pipeline_components import _placeholders
from google_cloud_pipeline_components.types.artifact_types import BQMLModel
from kfp.dsl import Artifact
from kfp.dsl import ConcatPlaceholder
from kfp.dsl import container_component
from kfp.dsl import ContainerSpec
from kfp.dsl import Input
from kfp.dsl import Output
from kfp.dsl import OutputPath
@container_component
def bigquery_evaluate_model_job(
model: Input[BQMLModel],
evaluation_metrics: Output[Artifact],
gcp_resources: OutputPath(str),
location: str = 'us-central1',
table_name: str = '',
query_statement: str = '',
threshold: float = -1.0,
query_parameters: List[str] = [],
job_configuration_query: Dict[str, str] = {},
labels: Dict[str, str] = {},
encryption_spec_key_name: str = '',
project: str = _placeholders.PROJECT_ID_PLACEHOLDER,
):
# fmt: off
"""Launch a BigQuery evaluate model job and waits for it to finish.
Args:
location: Location to run the BigQuery model evaluation job. If not set, default to `US` multi-region. For more details, see https://cloud.google.com/bigquery/docs/locations#specifying_your_location
model: BigQuery ML model for evaluation. For more details, see https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-evaluate#eval_model_name
table_name: BigQuery table id of the input table that contains the evaluation data, as in ML.EVALUATE(MODEL model_name[, {TABLE table_name | (query_statement)}] For more details, see https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-evaluate#eval_table_name
query_statement: Query statement string used to generate the evaluation data, as in ML.EVALUATE(MODEL model_name[, {TABLE table_name | (query_statement)}] For more details, see https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-evaluate#eval_query_statement
threshold: A custom threshold for the binary-class classification model to be used for evaluation. The default value is 0.5. The threshold value that is supplied must be of type STRUCT. https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-evaluate#eval_threshold
query_parameters: jobs.query parameters for standard SQL queries. If query_parameters are both specified in here and in job_configuration_query, the value in here will override the other one.
job_configuration_query: A json formatted string describing the rest of the job configuration. For more details, see https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationQuery
labels: The labels associated with this job. You can use these to organize and group your jobs. Label keys and values can be no longer than 63 characters, can only containlowercase letters, numeric characters, underscores and dashes. International characters are allowed. Label values are optional. Label keys must start with a letter and each label in the list must have a different key.
Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.
encryption_spec_key_name: Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key. If encryption_spec_key_name are both specified in here and in job_configuration_query, the value in here will override the other one.
project: Project to run BigQuery model evaluation job. Defaults to the project in which the PipelineJob is run.
Returns:
destination_table: Describes the table where the model prediction results should be stored. This property must be set for large results that exceed the maximum response size. For queries that produce anonymous (cached) results, this field will be populated by BigQuery.
gcp_resources: Serialized gcp_resources proto tracking the BigQuery job. For more details, see https://github.com/kubeflow/pipelines/blob/master/components/google-cloud/google_cloud_pipeline_components/proto/README.md.
"""
# fmt: on
return ContainerSpec(
image=_image.GCPC_IMAGE_TAG,
command=[
'python3',
'-u',
'-m',
'google_cloud_pipeline_components.container.v1.bigquery.evaluate_model.launcher',
],
args=[
'--type',
'BigqueryEvaluateModelJob',
'--project',
project,
'--location',
location,
'--model_name',
ConcatPlaceholder([
model.metadata['projectId'],
'.',
model.metadata['datasetId'],
'.',
model.metadata['modelId'],
]),
'--table_name',
table_name,
'--query_statement',
query_statement,
'--threshold',
threshold,
'--payload',
ConcatPlaceholder([
'{',
'"configuration": {',
'"query": ',
job_configuration_query,
', "labels": ',
labels,
'}',
'}',
]),
'--job_configuration_query_override',
ConcatPlaceholder([
'{',
'"query_parameters": ',
query_parameters,
', "destination_encryption_configuration": {',
'"kmsKeyName": "',
encryption_spec_key_name,
'"}',
'}',
]),
'--gcp_resources',
gcp_resources,
'--executor_input',
'{{$}}',
],
)
| 888 |
0 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/bigquery | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/bigquery/evaluate_model/__init__.py | # Copyright 2023 The Kubeflow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Google Cloud Pipeline V2 BigQuery Evaluate Model Component."""
| 889 |
0 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/bigquery | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/bigquery/ml_centroids/component.py | # Copyright 2023 The Kubeflow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, List
from google_cloud_pipeline_components import _image
from google_cloud_pipeline_components import _placeholders
from google_cloud_pipeline_components.types.artifact_types import BQMLModel
from kfp.dsl import Artifact
from kfp.dsl import ConcatPlaceholder
from kfp.dsl import container_component
from kfp.dsl import ContainerSpec
from kfp.dsl import Input
from kfp.dsl import Output
from kfp.dsl import OutputPath
@container_component
def bigquery_ml_centroids_job(
model: Input[BQMLModel],
centroids: Output[Artifact],
gcp_resources: OutputPath(str),
location: str = 'us-central1',
standardize: bool = False,
query_parameters: List[str] = [],
job_configuration_query: Dict[str, str] = {},
labels: Dict[str, str] = {},
encryption_spec_key_name: str = '',
project: str = _placeholders.PROJECT_ID_PLACEHOLDER,
):
# fmt: off
"""Launch a BigQuery ML.CENTROIDS job and waits for it to finish.
Args:
location: Location to run the BigQuery ML.CENTROIDS job. If not set, default to `US` multi-region. For more details, see https://cloud.google.com/bigquery/docs/locations#specifying_your_location
model: BigQuery ML model for ML.CENTROIDS. For more details, see https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-centroids#mlcentroids_syntax
standardize: Determines whether the centroid features should be standardized to assume that all features have a mean of zero and a standard deviation of one. For more details, see https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-centroids#mlcentroids_syntax
query_parameters: jobs.query parameters for standard SQL queries. If query_parameters are both specified in here and in job_configuration_query, the value in here will override the other one.
job_configuration_query: A json formatted string describing the rest of the job configuration. For more details, see https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationQuery
labels: The labels associated with this job. You can use these to organize and group your jobs. Label keys and values can be no longer than 63 characters, can only containlowercase letters, numeric characters, underscores and dashes. International characters are allowed. Label values are optional. Label keys must start with a letter and each label in the list must have a different key.
Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.
encryption_spec_key_name: Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key. If encryption_spec_key_name are both specified in here and in job_configuration_query, the value in here will override the other one.
project: Project to run BigQuery ML.CENTROIDS job. Defaults to the project in which the PipelineJob is run.
Returns:
centroids: Information about the centroids in a k-means model. For more details, see https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-centroids#mlcentroids_output
gcp_resources: Serialized gcp_resources proto tracking the BigQuery job. For more details, see https://github.com/kubeflow/pipelines/blob/master/components/google-cloud/google_cloud_pipeline_components/proto/README.md.
"""
# fmt: on
return ContainerSpec(
image=_image.GCPC_IMAGE_TAG,
command=[
'python3',
'-u',
'-m',
'google_cloud_pipeline_components.container.v1.bigquery.ml_centroids.launcher',
],
args=[
'--type',
'BigqueryMLCentroidsJob',
'--project',
project,
'--location',
location,
'--model_name',
ConcatPlaceholder([
model.metadata['projectId'],
'.',
model.metadata['datasetId'],
'.',
model.metadata['modelId'],
]),
'--standardize',
standardize,
'--payload',
ConcatPlaceholder([
'{',
'"configuration": {',
'"query": ',
job_configuration_query,
', "labels": ',
labels,
'}',
'}',
]),
'--job_configuration_query_override',
ConcatPlaceholder([
'{',
'"query_parameters": ',
query_parameters,
', "destination_encryption_configuration": {',
'"kmsKeyName": "',
encryption_spec_key_name,
'"}',
'}',
]),
'--gcp_resources',
gcp_resources,
'--executor_input',
'{{$}}',
],
)
| 890 |
0 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/bigquery | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/bigquery/ml_centroids/__init__.py | # Copyright 2023 The Kubeflow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Google Cloud Pipeline V2 BigQuery ML Centroids Component."""
| 891 |
0 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/bigquery | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/bigquery/ml_reconstruction_loss/component.py | # Copyright 2023 The Kubeflow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, List
from google_cloud_pipeline_components import _image
from google_cloud_pipeline_components import _placeholders
from google_cloud_pipeline_components.types.artifact_types import BQMLModel
from google_cloud_pipeline_components.types.artifact_types import BQTable
from kfp.dsl import ConcatPlaceholder
from kfp.dsl import container_component
from kfp.dsl import ContainerSpec
from kfp.dsl import Input
from kfp.dsl import Output
from kfp.dsl import OutputPath
@container_component
def bigquery_ml_reconstruction_loss_job(
model: Input[BQMLModel],
destination_table: Output[BQTable],
gcp_resources: OutputPath(str),
location: str = 'us-central1',
table_name: str = '',
query_statement: str = '',
query_parameters: List[str] = [],
job_configuration_query: Dict[str, str] = {},
labels: Dict[str, str] = {},
encryption_spec_key_name: str = '',
project: str = _placeholders.PROJECT_ID_PLACEHOLDER,
):
# fmt: off
"""Launch a BigQuery ml reconstruction loss job and waits for it to finish.
Args:
location: Location to run the BigQuery ml reconstruction loss job. If not set, default to `US` multi-region. For more details, see https://cloud.google.com/bigquery/docs/locations#specifying_your_location
model: BigQuery ML model. For more details, see https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-reconstruction-loss#reconstruction_loss_model_name
table_name: BigQuery table id of the input table that contains the input data. For more details, see https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-reconstruction-loss#reconstruction_loss_table_name
query_statement: Query statement string used to generate the input data. For more details, see https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-reconstruction-loss#reconstruction_loss_query_statement
query_parameters: jobs.query parameters for standard SQL queries. If query_parameters are both specified in here and in job_configuration_query, the value in here will override the other one.
job_configuration_query: A json formatted string describing the rest of the job configuration. For more details, see https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationQuery
labels: The labels associated with this job. You can use these to organize and group your jobs. Label keys and values can be no longer than 63 characters, can only containlowercase letters, numeric characters, underscores and dashes. International characters are allowed. Label values are optional. Label keys must start with a letter and each label in the list must have a different key.
Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.
encryption_spec_key_name: Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key. If encryption_spec_key_name are both specified in here and in job_configuration_query, the value in here will override the other one.
project: Project to run BigQuery ml reconstruction loss job. Defaults to the project in which the PipelineJob is run.
Returns:
destination_table: Describes the table where the ml reconstruction loss job results should be stored. This property must be set for large results that exceed the maximum response size. For queries that produce anonymous (cached) results, this field will be populated by BigQuery.
gcp_resources: Serialized gcp_resources proto tracking the BigQuery job. For more details, see https://github.com/kubeflow/pipelines/blob/master/components/google-cloud/google_cloud_pipeline_components/proto/README.md.
"""
# fmt: on
return ContainerSpec(
image=_image.GCPC_IMAGE_TAG,
command=[
'python3',
'-u',
'-m',
'google_cloud_pipeline_components.container.v1.bigquery.ml_reconstruction_loss.launcher',
],
args=[
'--type',
'BigqueryMLReconstructionLossJob',
'--project',
project,
'--location',
location,
'--model_name',
ConcatPlaceholder([
model.metadata['projectId'],
'.',
model.metadata['datasetId'],
'.',
model.metadata['modelId'],
]),
'--table_name',
table_name,
'--query_statement',
query_statement,
'--payload',
ConcatPlaceholder([
'{',
'"configuration": {',
'"query": ',
job_configuration_query,
', "labels": ',
labels,
'}',
'}',
]),
'--job_configuration_query_override',
ConcatPlaceholder([
'{',
'"query_parameters": ',
query_parameters,
', "destination_encryption_configuration": {',
'"kmsKeyName": "',
encryption_spec_key_name,
'"}',
'}',
]),
'--gcp_resources',
gcp_resources,
'--executor_input',
'{{$}}',
],
)
| 892 |
0 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/bigquery | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/bigquery/ml_reconstruction_loss/__init__.py | # Copyright 2023 The Kubeflow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Google Cloud Pipeline V2 BigQuery ML Reconstruction Loss Component."""
| 893 |
0 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/bigquery | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/bigquery/feature_importance/component.py | # Copyright 2023 The Kubeflow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, List
from google_cloud_pipeline_components import _image
from google_cloud_pipeline_components import _placeholders
from google_cloud_pipeline_components.types.artifact_types import BQMLModel
from kfp.dsl import Artifact
from kfp.dsl import ConcatPlaceholder
from kfp.dsl import container_component
from kfp.dsl import ContainerSpec
from kfp.dsl import Input
from kfp.dsl import Output
from kfp.dsl import OutputPath
@container_component
def bigquery_ml_feature_importance_job(
model: Input[BQMLModel],
feature_importance: Output[Artifact],
gcp_resources: OutputPath(str),
location: str = 'us-central1',
query_parameters: List[str] = [],
job_configuration_query: Dict[str, str] = {},
labels: Dict[str, str] = {},
encryption_spec_key_name: str = '',
project: str = _placeholders.PROJECT_ID_PLACEHOLDER,
):
# fmt: off
"""Launch a BigQuery feature importance fetching job and waits for it to
finish.
Args:
location: Location of the job to create the BigQuery model. If not set, default to `US` multi-region. For more details, see https://cloud.google.com/bigquery/docs/locations#specifying_your_location
model: BigQuery ML model for feature importance. For more details, see https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-predict#predict_model_name
query_parameters: Query parameters for standard SQL queries. If query_parameters are both specified in here and in job_configuration_query, the value in here will override the other one.
job_configuration_query: A json formatted string describing the rest of the job configuration. For more details, see https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationQuery
labels: The labels associated with this job. You can use these to organize and group your jobs. Label keys and values can be no longer than 63 characters, can only containlowercase letters, numeric characters, underscores and dashes. International characters are allowed. Label values are optional. Label keys must start with a letter and each label in the list must have a different key.
Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.
encryption_spec_key_name: Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key. If encryption_spec_key_name are both specified in here and in job_configuration_query, the value in here will override the other one.
project: Project to run BigQuery model creation job. Defaults to the project in which the PipelineJob is run.
Returns:
feature_importance: Describes common metrics applicable to the type of model supplied. For more details, see https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-importance
gcp_resources: Serialized gcp_resources proto tracking the BigQuery job. For more details, see https://github.com/kubeflow/pipelines/blob/master/components/google-cloud/google_cloud_pipeline_components/proto/README.md.
"""
# fmt: on
return ContainerSpec(
image=_image.GCPC_IMAGE_TAG,
command=[
'python3',
'-u',
'-m',
'google_cloud_pipeline_components.container.v1.bigquery.feature_importance.launcher',
],
args=[
'--type',
'BigqueryMLFeatureImportanceJob',
'--project',
project,
'--location',
location,
'--model_name',
ConcatPlaceholder([
model.metadata['projectId'],
'.',
model.metadata['datasetId'],
'.',
model.metadata['modelId'],
]),
'--payload',
ConcatPlaceholder([
'{',
'"configuration": {',
'"query": ',
job_configuration_query,
', "labels": ',
labels,
'}',
'}',
]),
'--job_configuration_query_override',
ConcatPlaceholder([
'{',
'"query_parameters": ',
query_parameters,
', "destination_encryption_configuration": {',
'"kmsKeyName": "',
encryption_spec_key_name,
'"}',
'}',
]),
'--gcp_resources',
gcp_resources,
'--executor_input',
'{{$}}',
],
)
| 894 |
0 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/bigquery | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/bigquery/feature_importance/__init__.py | # Copyright 2023 The Kubeflow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Google Cloud Pipeline V2 BigQuery Feature Importance Component."""
| 895 |
0 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/bigquery | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/bigquery/explain_forecast_model/component.py | # Copyright 2023 The Kubeflow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, List
from google_cloud_pipeline_components import _image
from google_cloud_pipeline_components import _placeholders
from google_cloud_pipeline_components.types.artifact_types import BQMLModel
from google_cloud_pipeline_components.types.artifact_types import BQTable
from kfp.dsl import ConcatPlaceholder
from kfp.dsl import container_component
from kfp.dsl import ContainerSpec
from kfp.dsl import Input
from kfp.dsl import Output
from kfp.dsl import OutputPath
@container_component
def bigquery_explain_forecast_model_job(
model: Input[BQMLModel],
destination_table: Output[BQTable],
gcp_resources: OutputPath(str),
location: str = 'us-central1',
horizon: int = 3,
confidence_level: float = 0.95,
query_parameters: List[str] = [],
job_configuration_query: Dict[str, str] = {},
labels: Dict[str, str] = {},
encryption_spec_key_name: str = '',
project: str = _placeholders.PROJECT_ID_PLACEHOLDER,
):
# fmt: off
"""Launch a BigQuery ML.EXPLAIN_FORECAST job and let you explain forecast an
ARIMA_PLUS or ARIMA model.
This function only applies to the time-series ARIMA_PLUS and ARIMA models.
Args:
location: Location to run the BigQuery job. If not set, default to `US` multi-region. For more details, see https://cloud.google.com/bigquery/docs/locations#specifying_your_location
model: BigQuery ML model for ML.EXPLAIN_FORECAST. For more details, see https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-explain-forecast
horizon: Horizon is the number of time points to explain forecast. For more details, see https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-explain-forecast#horizon
confidence_level: The percentage of the future values that fall in the prediction interval. For more details, see https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-explain-forecast#confidence_level
query_parameters: Query parameters for standard SQL queries. If query_parameters are both specified in here and in job_configuration_query, the value in here will override the other one.
job_configuration_query: A json formatted string describing the rest of the job configuration. For more details, see https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationQuery
labels: The labels associated with this job. You can use these to organize and group your jobs. Label keys and values can be no longer than 63 characters, can only containlowercase letters, numeric characters, underscores and dashes. International characters are allowed. Label values are optional. Label keys must start with a letter and each label in the list must have a different key.
Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.
encryption_spec_key_name: Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key. If encryption_spec_key_name are both specified in here and in job_configuration_query, the value in here will override the other one.
project: Project to run the BigQuery job. Defaults to the project in which the PipelineJob is run.
Returns:
destination_table: Describes the table where the model explain forecast results should be stored. For more details, see https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-explain-forecast#mlexplain_forecast_output
gcp_resources: Serialized gcp_resources proto tracking the BigQuery job. For more details, see https://github.com/kubeflow/pipelines/blob/master/components/google-cloud/google_cloud_pipeline_components/proto/README.md.
"""
# fmt: on
return ContainerSpec(
image=_image.GCPC_IMAGE_TAG,
command=[
'python3',
'-u',
'-m',
'google_cloud_pipeline_components.container.v1.bigquery.explain_forecast_model.launcher',
],
args=[
'--type',
'BigqueryExplainForecastModelJob',
'--project',
project,
'--location',
location,
'--model_name',
ConcatPlaceholder([
model.metadata['projectId'],
'.',
model.metadata['datasetId'],
'.',
model.metadata['modelId'],
]),
'--horizon',
horizon,
'--confidence_level',
confidence_level,
'--payload',
ConcatPlaceholder([
'{',
'"configuration": {',
'"query": ',
job_configuration_query,
', "labels": ',
labels,
'}',
'}',
]),
'--job_configuration_query_override',
ConcatPlaceholder([
'{',
'"query_parameters": ',
query_parameters,
', "destination_encryption_configuration": {',
'"kmsKeyName": "',
encryption_spec_key_name,
'"}',
'}',
]),
'--gcp_resources',
gcp_resources,
'--executor_input',
'{{$}}',
],
)
| 896 |
0 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/bigquery | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/bigquery/explain_forecast_model/__init__.py | # Copyright 2023 The Kubeflow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Google Cloud Pipeline V2 BigQuery Explain Forecast Model Component."""
| 897 |
0 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/bigquery | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/bigquery/ml_arima_evaluate/component.py | # Copyright 2023 The Kubeflow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, List
from google_cloud_pipeline_components import _image
from google_cloud_pipeline_components import _placeholders
from google_cloud_pipeline_components.types.artifact_types import BQMLModel
from kfp.dsl import Artifact
from kfp.dsl import ConcatPlaceholder
from kfp.dsl import container_component
from kfp.dsl import ContainerSpec
from kfp.dsl import Input
from kfp.dsl import Output
from kfp.dsl import OutputPath
@container_component
def bigquery_ml_arima_evaluate_job(
model: Input[BQMLModel],
arima_evaluation_metrics: Output[Artifact],
gcp_resources: OutputPath(str),
location: str = 'us-central1',
show_all_candidate_models: bool = False,
query_parameters: List[str] = [],
job_configuration_query: Dict[str, str] = {},
labels: Dict[str, str] = {},
encryption_spec_key_name: str = '',
project: str = _placeholders.PROJECT_ID_PLACEHOLDER,
):
# fmt: off
"""Launch a BigQuery ML.ARIMA_EVALUATE job and waits for it to finish.
Args:
location: Location to run the BigQuery model evaluation job. If not set, default to `US` multi-region. For more details, see https://cloud.google.com/bigquery/docs/locations#specifying_your_location
model: BigQuery ML model for ML.ARIMA_EVALUATE. For more details, see https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-arima-evaluate#model_name
show_all_candidate_models: You can use show_all_candidate_models to show evaluation metrics or an error message for either all candidate models or for only the best model with the lowest AIC. The value is type BOOL and is part of the settings STRUCT. For more details, see https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-arima-evaluate#show_all_candidate_models
query_parameters: jobs.query parameters for standard SQL queries. If query_parameters are both specified in here and in job_configuration_query, the value in here will override the other one.
job_configuration_query: A json formatted string describing the rest of the job configuration. For more details, see https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationQuery
labels: The labels associated with this job. You can use these to organize and group your jobs. Label keys and values can be no longer than 63 characters, can only containlowercase letters, numeric characters, underscores and dashes. International characters are allowed. Label values are optional. Label keys must start with a letter and each label in the list must have a different key.
Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.
encryption_spec_key_name: Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key. If encryption_spec_key_name are both specified in here and in job_configuration_query, the value in here will override the other one.
project: Project to run BigQuery model evaluation job. Defaults to the project in which the PipelineJob is run.
Returns:
arima_evaluation_metrics: Describes arima metrics. For more details, see https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-arima-evaluate#mlarima_evaluate_output
gcp_resources: Serialized gcp_resources proto tracking the BigQuery job. For more details, see https://github.com/kubeflow/pipelines/blob/master/components/google-cloud/google_cloud_pipeline_components/proto/README.md.
"""
# fmt: on
return ContainerSpec(
image=_image.GCPC_IMAGE_TAG,
command=[
'python3',
'-u',
'-m',
'google_cloud_pipeline_components.container.v1.bigquery.ml_arima_evaluate.launcher',
],
args=[
'--type',
'BigqueryMLArimaEvaluateJob',
'--project',
project,
'--location',
location,
'--model_name',
ConcatPlaceholder([
model.metadata['projectId'],
'.',
model.metadata['datasetId'],
'.',
model.metadata['modelId'],
]),
'--show_all_candidate_models',
show_all_candidate_models,
'--payload',
ConcatPlaceholder([
'{',
'"configuration": {',
'"query": ',
job_configuration_query,
', "labels": ',
labels,
'}',
'}',
]),
'--job_configuration_query_override',
ConcatPlaceholder([
'{',
'"query_parameters": ',
query_parameters,
', "destination_encryption_configuration": {',
'"kmsKeyName": "',
encryption_spec_key_name,
'"}',
'}',
]),
'--gcp_resources',
gcp_resources,
'--executor_input',
'{{$}}',
],
)
| 898 |
0 | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/bigquery | kubeflow_public_repos/pipelines/components/google-cloud/google_cloud_pipeline_components/v1/bigquery/ml_arima_evaluate/__init__.py | # Copyright 2023 The Kubeflow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Google Cloud Pipeline V2 BigQuery ML Arima Evaluate Component."""
| 899 |
Subsets and Splits