|
program(1.3) |
|
[buildInfo = dict<string, string>({{"coremlc-component-MIL", "3400.43.1"}, {"coremlc-version", "3400.58.2"}, {"coremltools-component-torch", "2.4.1"}, {"coremltools-source-dialect", "TorchScript"}, {"coremltools-version", "8.0"}})] |
|
{ |
|
func main<ios18>(tensor<fp16, [1, ?, 1024]> audio_data, state<tensor<fp16, [24, 1, 448, 1024]>> k_cache1, state<tensor<fp16, [24, 1, 1500, 1024]>> k_cache2, state<tensor<fp16, [24, 1, 448, 1024]>> v_cache1, state<tensor<fp16, [24, 1, 1500, 1024]>> v_cache2) [FlexibleShapeInformation = tuple<tuple<string, dict<string, tensor<int32, [?]>>>, tuple<string, dict<string, list<tensor<int32, [2]>, ?>>>>((("DefaultShapes", {{"audio_data", [1, 1, 1024]}}), ("RangeDims", {{"audio_data", [[1, 1], [1, 1500], [1024, 1024]]}})))] { |
|
tensor<fp16, [1, ?, 1024]> dummy = identity(x = audio_data)[name = string("identity_0")]; |
|
tensor<fp16, [24, 1, 448, 1024]> read_state_0 = read_state(input = k_cache1)[name = string("read_state_0")]; |
|
tensor<int32, [4]> concat_0 = const()[name = string("concat_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [4]> concat_1 = const()[name = string("concat_1"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [4]> k_cache1_internal_tensor_assign_1_stride_0 = const()[name = string("k_cache1_internal_tensor_assign_1_stride_0"), val = tensor<int32, [4]>([1, 1, 1, 1])]; |
|
tensor<bool, [4]> k_cache1_internal_tensor_assign_1_begin_mask_0 = const()[name = string("k_cache1_internal_tensor_assign_1_begin_mask_0"), val = tensor<bool, [4]>([false, false, false, false])]; |
|
tensor<bool, [4]> k_cache1_internal_tensor_assign_1_end_mask_0 = const()[name = string("k_cache1_internal_tensor_assign_1_end_mask_0"), val = tensor<bool, [4]>([true, true, true, true])]; |
|
tensor<bool, [4]> k_cache1_internal_tensor_assign_1_squeeze_mask_0 = const()[name = string("k_cache1_internal_tensor_assign_1_squeeze_mask_0"), val = tensor<bool, [4]>([false, false, false, false])]; |
|
tensor<fp16, [24, 1, 448, 1024]> const_0_to_fp16 = const()[name = string("const_0_to_fp16"), val = tensor<fp16, [24, 1, 448, 1024]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(64)))]; |
|
tensor<fp16, [24, 1, 448, 1024]> k_cache1_internal_tensor_assign_1_cast_fp16 = slice_update(begin = concat_0, begin_mask = k_cache1_internal_tensor_assign_1_begin_mask_0, end = concat_1, end_mask = k_cache1_internal_tensor_assign_1_end_mask_0, squeeze_mask = k_cache1_internal_tensor_assign_1_squeeze_mask_0, stride = k_cache1_internal_tensor_assign_1_stride_0, update = const_0_to_fp16, x = read_state_0)[name = string("k_cache1_internal_tensor_assign_1_cast_fp16")]; |
|
write_state(data = k_cache1_internal_tensor_assign_1_cast_fp16, input = k_cache1)[name = string("coreml_update_state_50_write_state")]; |
|
tensor<fp16, [24, 1, 448, 1024]> read_state_1 = read_state(input = v_cache1)[name = string("read_state_1")]; |
|
tensor<int32, [4]> concat_2 = const()[name = string("concat_2"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [4]> concat_3 = const()[name = string("concat_3"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [4]> v_cache1_internal_tensor_assign_1_stride_0 = const()[name = string("v_cache1_internal_tensor_assign_1_stride_0"), val = tensor<int32, [4]>([1, 1, 1, 1])]; |
|
tensor<bool, [4]> v_cache1_internal_tensor_assign_1_begin_mask_0 = const()[name = string("v_cache1_internal_tensor_assign_1_begin_mask_0"), val = tensor<bool, [4]>([false, false, false, false])]; |
|
tensor<bool, [4]> v_cache1_internal_tensor_assign_1_end_mask_0 = const()[name = string("v_cache1_internal_tensor_assign_1_end_mask_0"), val = tensor<bool, [4]>([true, true, true, true])]; |
|
tensor<bool, [4]> v_cache1_internal_tensor_assign_1_squeeze_mask_0 = const()[name = string("v_cache1_internal_tensor_assign_1_squeeze_mask_0"), val = tensor<bool, [4]>([false, false, false, false])]; |
|
tensor<fp16, [24, 1, 448, 1024]> v_cache1_internal_tensor_assign_1_cast_fp16 = slice_update(begin = concat_2, begin_mask = v_cache1_internal_tensor_assign_1_begin_mask_0, end = concat_3, end_mask = v_cache1_internal_tensor_assign_1_end_mask_0, squeeze_mask = v_cache1_internal_tensor_assign_1_squeeze_mask_0, stride = v_cache1_internal_tensor_assign_1_stride_0, update = const_0_to_fp16, x = read_state_1)[name = string("v_cache1_internal_tensor_assign_1_cast_fp16")]; |
|
write_state(data = v_cache1_internal_tensor_assign_1_cast_fp16, input = v_cache1)[name = string("coreml_update_state_51_write_state")]; |
|
tensor<fp16, [24, 1, 1500, 1024]> read_state_2 = read_state(input = k_cache2)[name = string("read_state_2")]; |
|
tensor<fp16, [24, 1, 1500, 1024]> read_state_3 = read_state(input = v_cache2)[name = string("read_state_3")]; |
|
tensor<fp16, [1024, 1024]> var_115_to_fp16 = const()[name = string("op_115_to_fp16"), val = tensor<fp16, [1024, 1024]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(22020224)))]; |
|
tensor<fp16, [1024]> linear_0_bias_0_to_fp16 = const()[name = string("linear_0_bias_0_to_fp16"), val = tensor<fp16, [1024]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(24117440)))]; |
|
tensor<fp16, [1, ?, 1024]> linear_0_cast_fp16 = linear(bias = linear_0_bias_0_to_fp16, weight = var_115_to_fp16, x = audio_data)[name = string("linear_0_cast_fp16")]; |
|
tensor<fp16, [1024, 1024]> var_119_to_fp16 = const()[name = string("op_119_to_fp16"), val = tensor<fp16, [1024, 1024]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(24119552)))]; |
|
tensor<fp16, [1024]> var_120_to_fp16 = const()[name = string("op_120_to_fp16"), val = tensor<fp16, [1024]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(26216768)))]; |
|
tensor<fp16, [1, ?, 1024]> linear_1_cast_fp16 = linear(bias = var_120_to_fp16, weight = var_119_to_fp16, x = audio_data)[name = string("linear_1_cast_fp16")]; |
|
tensor<int32, [3]> var_122_shape_cast_fp16 = shape(x = linear_0_cast_fp16)[name = string("op_122_shape_cast_fp16")]; |
|
int32 gather_0_axis_0 = const()[name = string("gather_0_axis_0"), val = int32(0)]; |
|
int32 gather_0_batch_dims_0 = const()[name = string("gather_0_batch_dims_0"), val = int32(0)]; |
|
bool gather_0_validate_indices_0 = const()[name = string("gather_0_validate_indices_0"), val = bool(false)]; |
|
string var_122_shape_cast_fp16_to_int16_dtype_0 = const()[name = string("op_122_shape_cast_fp16_to_int16_dtype_0"), val = string("int16")]; |
|
uint16 select_0_to_uint16 = const()[name = string("select_0_to_uint16"), val = uint16(1)]; |
|
tensor<int16, [3]> var_122_shape_cast_fp16_to_int16 = cast(dtype = var_122_shape_cast_fp16_to_int16_dtype_0, x = var_122_shape_cast_fp16)[name = string("cast_151")]; |
|
int16 gather_0_cast_uint16 = gather(axis = gather_0_axis_0, batch_dims = gather_0_batch_dims_0, indices = select_0_to_uint16, validate_indices = gather_0_validate_indices_0, x = var_122_shape_cast_fp16_to_int16)[name = string("gather_0_cast_uint16")]; |
|
string gather_0_cast_uint16_to_int32_dtype_0 = const()[name = string("gather_0_cast_uint16_to_int32_dtype_0"), val = string("int32")]; |
|
tensor<int32, [1]> expand_dims_11_axes_0 = const()[name = string("expand_dims_11_axes_0"), val = tensor<int32, [1]>([0])]; |
|
int32 gather_0_cast_uint16_to_int32 = cast(dtype = gather_0_cast_uint16_to_int32_dtype_0, x = gather_0_cast_uint16)[name = string("cast_150")]; |
|
tensor<int32, [1]> expand_dims_11 = expand_dims(axes = expand_dims_11_axes_0, x = gather_0_cast_uint16_to_int32)[name = string("expand_dims_11")]; |
|
tensor<int32, [4]> concat_5 = const()[name = string("concat_5"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [1]> concat_6_values0_0 = const()[name = string("concat_6_values0_0"), val = tensor<int32, [1]>([0])]; |
|
tensor<int32, [1]> concat_6_values1_0 = const()[name = string("concat_6_values1_0"), val = tensor<int32, [1]>([0])]; |
|
tensor<int32, [1]> concat_6_values3_0 = const()[name = string("concat_6_values3_0"), val = tensor<int32, [1]>([0])]; |
|
int32 concat_6_axis_0 = const()[name = string("concat_6_axis_0"), val = int32(0)]; |
|
bool concat_6_interleave_0 = const()[name = string("concat_6_interleave_0"), val = bool(false)]; |
|
tensor<int32, [4]> concat_6 = concat(axis = concat_6_axis_0, interleave = concat_6_interleave_0, values = (concat_6_values0_0, concat_6_values1_0, expand_dims_11, concat_6_values3_0))[name = string("concat_6")]; |
|
tensor<int32, [4]> k_cache2_internal_tensor_assign_1_stride_0 = const()[name = string("k_cache2_internal_tensor_assign_1_stride_0"), val = tensor<int32, [4]>([1, 1, 1, 1])]; |
|
tensor<bool, [4]> k_cache2_internal_tensor_assign_1_begin_mask_0 = const()[name = string("k_cache2_internal_tensor_assign_1_begin_mask_0"), val = tensor<bool, [4]>([false, false, false, false])]; |
|
tensor<bool, [4]> k_cache2_internal_tensor_assign_1_end_mask_0 = const()[name = string("k_cache2_internal_tensor_assign_1_end_mask_0"), val = tensor<bool, [4]>([false, true, false, true])]; |
|
tensor<bool, [4]> k_cache2_internal_tensor_assign_1_squeeze_mask_0 = const()[name = string("k_cache2_internal_tensor_assign_1_squeeze_mask_0"), val = tensor<bool, [4]>([true, false, false, false])]; |
|
tensor<fp16, [24, 1, 1500, 1024]> k_cache2_internal_tensor_assign_1_cast_fp16 = slice_update(begin = concat_5, begin_mask = k_cache2_internal_tensor_assign_1_begin_mask_0, end = concat_6, end_mask = k_cache2_internal_tensor_assign_1_end_mask_0, squeeze_mask = k_cache2_internal_tensor_assign_1_squeeze_mask_0, stride = k_cache2_internal_tensor_assign_1_stride_0, update = linear_0_cast_fp16, x = read_state_2)[name = string("k_cache2_internal_tensor_assign_1_cast_fp16")]; |
|
write_state(data = k_cache2_internal_tensor_assign_1_cast_fp16, input = k_cache2)[name = string("coreml_update_state_52_write_state")]; |
|
tensor<fp16, [24, 1, 1500, 1024]> coreml_update_state_52 = read_state(input = k_cache2)[name = string("coreml_update_state_52")]; |
|
tensor<int32, [3]> var_127_shape_cast_fp16 = shape(x = linear_1_cast_fp16)[name = string("op_127_shape_cast_fp16")]; |
|
int32 gather_1_axis_0 = const()[name = string("gather_1_axis_0"), val = int32(0)]; |
|
int32 gather_1_batch_dims_0 = const()[name = string("gather_1_batch_dims_0"), val = int32(0)]; |
|
bool gather_1_validate_indices_0 = const()[name = string("gather_1_validate_indices_0"), val = bool(false)]; |
|
string var_127_shape_cast_fp16_to_uint16_dtype_0 = const()[name = string("op_127_shape_cast_fp16_to_uint16_dtype_0"), val = string("uint16")]; |
|
uint16 select_1_to_uint16 = const()[name = string("select_1_to_uint16"), val = uint16(1)]; |
|
tensor<uint16, [3]> var_127_shape_cast_fp16_to_uint16 = cast(dtype = var_127_shape_cast_fp16_to_uint16_dtype_0, x = var_127_shape_cast_fp16)[name = string("cast_149")]; |
|
uint16 gather_1_cast_uint16 = gather(axis = gather_1_axis_0, batch_dims = gather_1_batch_dims_0, indices = select_1_to_uint16, validate_indices = gather_1_validate_indices_0, x = var_127_shape_cast_fp16_to_uint16)[name = string("gather_1_cast_uint16")]; |
|
string gather_1_cast_uint16_to_int32_dtype_0 = const()[name = string("gather_1_cast_uint16_to_int32_dtype_0"), val = string("int32")]; |
|
tensor<int32, [1]> expand_dims_15_axes_0 = const()[name = string("expand_dims_15_axes_0"), val = tensor<int32, [1]>([0])]; |
|
int32 gather_1_cast_uint16_to_int32 = cast(dtype = gather_1_cast_uint16_to_int32_dtype_0, x = gather_1_cast_uint16)[name = string("cast_148")]; |
|
tensor<int32, [1]> expand_dims_15 = expand_dims(axes = expand_dims_15_axes_0, x = gather_1_cast_uint16_to_int32)[name = string("expand_dims_15")]; |
|
tensor<int32, [4]> concat_8 = const()[name = string("concat_8"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [1]> concat_9_values0_0 = const()[name = string("concat_9_values0_0"), val = tensor<int32, [1]>([0])]; |
|
tensor<int32, [1]> concat_9_values1_0 = const()[name = string("concat_9_values1_0"), val = tensor<int32, [1]>([0])]; |
|
tensor<int32, [1]> concat_9_values3_0 = const()[name = string("concat_9_values3_0"), val = tensor<int32, [1]>([0])]; |
|
int32 concat_9_axis_0 = const()[name = string("concat_9_axis_0"), val = int32(0)]; |
|
bool concat_9_interleave_0 = const()[name = string("concat_9_interleave_0"), val = bool(false)]; |
|
tensor<int32, [4]> concat_9 = concat(axis = concat_9_axis_0, interleave = concat_9_interleave_0, values = (concat_9_values0_0, concat_9_values1_0, expand_dims_15, concat_9_values3_0))[name = string("concat_9")]; |
|
tensor<int32, [4]> v_cache2_internal_tensor_assign_1_stride_0 = const()[name = string("v_cache2_internal_tensor_assign_1_stride_0"), val = tensor<int32, [4]>([1, 1, 1, 1])]; |
|
tensor<bool, [4]> v_cache2_internal_tensor_assign_1_begin_mask_0 = const()[name = string("v_cache2_internal_tensor_assign_1_begin_mask_0"), val = tensor<bool, [4]>([false, false, false, false])]; |
|
tensor<bool, [4]> v_cache2_internal_tensor_assign_1_end_mask_0 = const()[name = string("v_cache2_internal_tensor_assign_1_end_mask_0"), val = tensor<bool, [4]>([false, true, false, true])]; |
|
tensor<bool, [4]> v_cache2_internal_tensor_assign_1_squeeze_mask_0 = const()[name = string("v_cache2_internal_tensor_assign_1_squeeze_mask_0"), val = tensor<bool, [4]>([true, false, false, false])]; |
|
tensor<fp16, [24, 1, 1500, 1024]> v_cache2_internal_tensor_assign_1_cast_fp16 = slice_update(begin = concat_8, begin_mask = v_cache2_internal_tensor_assign_1_begin_mask_0, end = concat_9, end_mask = v_cache2_internal_tensor_assign_1_end_mask_0, squeeze_mask = v_cache2_internal_tensor_assign_1_squeeze_mask_0, stride = v_cache2_internal_tensor_assign_1_stride_0, update = linear_1_cast_fp16, x = read_state_3)[name = string("v_cache2_internal_tensor_assign_1_cast_fp16")]; |
|
write_state(data = v_cache2_internal_tensor_assign_1_cast_fp16, input = v_cache2)[name = string("coreml_update_state_53_write_state")]; |
|
tensor<fp16, [24, 1, 1500, 1024]> coreml_update_state_53 = read_state(input = v_cache2)[name = string("coreml_update_state_53")]; |
|
tensor<fp16, [1024, 1024]> var_149_to_fp16 = const()[name = string("op_149_to_fp16"), val = tensor<fp16, [1024, 1024]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(26218880)))]; |
|
tensor<fp16, [1, ?, 1024]> linear_2_cast_fp16 = linear(bias = linear_0_bias_0_to_fp16, weight = var_149_to_fp16, x = audio_data)[name = string("linear_2_cast_fp16")]; |
|
tensor<fp16, [1024, 1024]> var_153_to_fp16 = const()[name = string("op_153_to_fp16"), val = tensor<fp16, [1024, 1024]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(28316096)))]; |
|
tensor<fp16, [1024]> var_154_to_fp16 = const()[name = string("op_154_to_fp16"), val = tensor<fp16, [1024]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(30413312)))]; |
|
tensor<fp16, [1, ?, 1024]> linear_3_cast_fp16 = linear(bias = var_154_to_fp16, weight = var_153_to_fp16, x = audio_data)[name = string("linear_3_cast_fp16")]; |
|
tensor<int32, [3]> var_156_shape_cast_fp16 = shape(x = linear_2_cast_fp16)[name = string("op_156_shape_cast_fp16")]; |
|
int32 gather_2_axis_0 = const()[name = string("gather_2_axis_0"), val = int32(0)]; |
|
int32 gather_2_batch_dims_0 = const()[name = string("gather_2_batch_dims_0"), val = int32(0)]; |
|
bool gather_2_validate_indices_0 = const()[name = string("gather_2_validate_indices_0"), val = bool(false)]; |
|
string var_156_shape_cast_fp16_to_uint16_dtype_0 = const()[name = string("op_156_shape_cast_fp16_to_uint16_dtype_0"), val = string("uint16")]; |
|
uint16 select_2_to_uint16 = const()[name = string("select_2_to_uint16"), val = uint16(1)]; |
|
tensor<uint16, [3]> var_156_shape_cast_fp16_to_uint16 = cast(dtype = var_156_shape_cast_fp16_to_uint16_dtype_0, x = var_156_shape_cast_fp16)[name = string("cast_147")]; |
|
uint16 gather_2_cast_uint16 = gather(axis = gather_2_axis_0, batch_dims = gather_2_batch_dims_0, indices = select_2_to_uint16, validate_indices = gather_2_validate_indices_0, x = var_156_shape_cast_fp16_to_uint16)[name = string("gather_2_cast_uint16")]; |
|
string gather_2_cast_uint16_to_int32_dtype_0 = const()[name = string("gather_2_cast_uint16_to_int32_dtype_0"), val = string("int32")]; |
|
tensor<int32, [1]> expand_dims_19_axes_0 = const()[name = string("expand_dims_19_axes_0"), val = tensor<int32, [1]>([0])]; |
|
int32 gather_2_cast_uint16_to_int32 = cast(dtype = gather_2_cast_uint16_to_int32_dtype_0, x = gather_2_cast_uint16)[name = string("cast_146")]; |
|
tensor<int32, [1]> expand_dims_19 = expand_dims(axes = expand_dims_19_axes_0, x = gather_2_cast_uint16_to_int32)[name = string("expand_dims_19")]; |
|
tensor<int32, [4]> concat_11 = const()[name = string("concat_11"), val = tensor<int32, [4]>([1, 0, 0, 0])]; |
|
tensor<int32, [1]> concat_12_values0_0 = const()[name = string("concat_12_values0_0"), val = tensor<int32, [1]>([0])]; |
|
tensor<int32, [1]> concat_12_values1_0 = const()[name = string("concat_12_values1_0"), val = tensor<int32, [1]>([0])]; |
|
tensor<int32, [1]> concat_12_values3_0 = const()[name = string("concat_12_values3_0"), val = tensor<int32, [1]>([0])]; |
|
int32 concat_12_axis_0 = const()[name = string("concat_12_axis_0"), val = int32(0)]; |
|
bool concat_12_interleave_0 = const()[name = string("concat_12_interleave_0"), val = bool(false)]; |
|
tensor<int32, [4]> concat_12 = concat(axis = concat_12_axis_0, interleave = concat_12_interleave_0, values = (concat_12_values0_0, concat_12_values1_0, expand_dims_19, concat_12_values3_0))[name = string("concat_12")]; |
|
tensor<int32, [4]> k_cache2_internal_tensor_assign_2_stride_0 = const()[name = string("k_cache2_internal_tensor_assign_2_stride_0"), val = tensor<int32, [4]>([1, 1, 1, 1])]; |
|
tensor<bool, [4]> k_cache2_internal_tensor_assign_2_begin_mask_0 = const()[name = string("k_cache2_internal_tensor_assign_2_begin_mask_0"), val = tensor<bool, [4]>([false, false, false, false])]; |
|
tensor<bool, [4]> k_cache2_internal_tensor_assign_2_end_mask_0 = const()[name = string("k_cache2_internal_tensor_assign_2_end_mask_0"), val = tensor<bool, [4]>([false, true, false, true])]; |
|
tensor<bool, [4]> k_cache2_internal_tensor_assign_2_squeeze_mask_0 = const()[name = string("k_cache2_internal_tensor_assign_2_squeeze_mask_0"), val = tensor<bool, [4]>([true, false, false, false])]; |
|
tensor<fp16, [24, 1, 1500, 1024]> k_cache2_internal_tensor_assign_2_cast_fp16 = slice_update(begin = concat_11, begin_mask = k_cache2_internal_tensor_assign_2_begin_mask_0, end = concat_12, end_mask = k_cache2_internal_tensor_assign_2_end_mask_0, squeeze_mask = k_cache2_internal_tensor_assign_2_squeeze_mask_0, stride = k_cache2_internal_tensor_assign_2_stride_0, update = linear_2_cast_fp16, x = coreml_update_state_52)[name = string("k_cache2_internal_tensor_assign_2_cast_fp16")]; |
|
write_state(data = k_cache2_internal_tensor_assign_2_cast_fp16, input = k_cache2)[name = string("coreml_update_state_54_write_state")]; |
|
tensor<fp16, [24, 1, 1500, 1024]> coreml_update_state_54 = read_state(input = k_cache2)[name = string("coreml_update_state_54")]; |
|
tensor<int32, [3]> var_161_shape_cast_fp16 = shape(x = linear_3_cast_fp16)[name = string("op_161_shape_cast_fp16")]; |
|
int32 gather_3_axis_0 = const()[name = string("gather_3_axis_0"), val = int32(0)]; |
|
int32 gather_3_batch_dims_0 = const()[name = string("gather_3_batch_dims_0"), val = int32(0)]; |
|
bool gather_3_validate_indices_0 = const()[name = string("gather_3_validate_indices_0"), val = bool(false)]; |
|
string var_161_shape_cast_fp16_to_uint16_dtype_0 = const()[name = string("op_161_shape_cast_fp16_to_uint16_dtype_0"), val = string("uint16")]; |
|
uint16 select_3_to_uint16 = const()[name = string("select_3_to_uint16"), val = uint16(1)]; |
|
tensor<uint16, [3]> var_161_shape_cast_fp16_to_uint16 = cast(dtype = var_161_shape_cast_fp16_to_uint16_dtype_0, x = var_161_shape_cast_fp16)[name = string("cast_145")]; |
|
uint16 gather_3_cast_uint16 = gather(axis = gather_3_axis_0, batch_dims = gather_3_batch_dims_0, indices = select_3_to_uint16, validate_indices = gather_3_validate_indices_0, x = var_161_shape_cast_fp16_to_uint16)[name = string("gather_3_cast_uint16")]; |
|
string gather_3_cast_uint16_to_int32_dtype_0 = const()[name = string("gather_3_cast_uint16_to_int32_dtype_0"), val = string("int32")]; |
|
tensor<int32, [1]> expand_dims_23_axes_0 = const()[name = string("expand_dims_23_axes_0"), val = tensor<int32, [1]>([0])]; |
|
int32 gather_3_cast_uint16_to_int32 = cast(dtype = gather_3_cast_uint16_to_int32_dtype_0, x = gather_3_cast_uint16)[name = string("cast_144")]; |
|
tensor<int32, [1]> expand_dims_23 = expand_dims(axes = expand_dims_23_axes_0, x = gather_3_cast_uint16_to_int32)[name = string("expand_dims_23")]; |
|
tensor<int32, [4]> concat_14 = const()[name = string("concat_14"), val = tensor<int32, [4]>([1, 0, 0, 0])]; |
|
tensor<int32, [1]> concat_15_values0_0 = const()[name = string("concat_15_values0_0"), val = tensor<int32, [1]>([0])]; |
|
tensor<int32, [1]> concat_15_values1_0 = const()[name = string("concat_15_values1_0"), val = tensor<int32, [1]>([0])]; |
|
tensor<int32, [1]> concat_15_values3_0 = const()[name = string("concat_15_values3_0"), val = tensor<int32, [1]>([0])]; |
|
int32 concat_15_axis_0 = const()[name = string("concat_15_axis_0"), val = int32(0)]; |
|
bool concat_15_interleave_0 = const()[name = string("concat_15_interleave_0"), val = bool(false)]; |
|
tensor<int32, [4]> concat_15 = concat(axis = concat_15_axis_0, interleave = concat_15_interleave_0, values = (concat_15_values0_0, concat_15_values1_0, expand_dims_23, concat_15_values3_0))[name = string("concat_15")]; |
|
tensor<int32, [4]> v_cache2_internal_tensor_assign_2_stride_0 = const()[name = string("v_cache2_internal_tensor_assign_2_stride_0"), val = tensor<int32, [4]>([1, 1, 1, 1])]; |
|
tensor<bool, [4]> v_cache2_internal_tensor_assign_2_begin_mask_0 = const()[name = string("v_cache2_internal_tensor_assign_2_begin_mask_0"), val = tensor<bool, [4]>([false, false, false, false])]; |
|
tensor<bool, [4]> v_cache2_internal_tensor_assign_2_end_mask_0 = const()[name = string("v_cache2_internal_tensor_assign_2_end_mask_0"), val = tensor<bool, [4]>([false, true, false, true])]; |
|
tensor<bool, [4]> v_cache2_internal_tensor_assign_2_squeeze_mask_0 = const()[name = string("v_cache2_internal_tensor_assign_2_squeeze_mask_0"), val = tensor<bool, [4]>([true, false, false, false])]; |
|
tensor<fp16, [24, 1, 1500, 1024]> v_cache2_internal_tensor_assign_2_cast_fp16 = slice_update(begin = concat_14, begin_mask = v_cache2_internal_tensor_assign_2_begin_mask_0, end = concat_15, end_mask = v_cache2_internal_tensor_assign_2_end_mask_0, squeeze_mask = v_cache2_internal_tensor_assign_2_squeeze_mask_0, stride = v_cache2_internal_tensor_assign_2_stride_0, update = linear_3_cast_fp16, x = coreml_update_state_53)[name = string("v_cache2_internal_tensor_assign_2_cast_fp16")]; |
|
write_state(data = v_cache2_internal_tensor_assign_2_cast_fp16, input = v_cache2)[name = string("coreml_update_state_55_write_state")]; |
|
tensor<fp16, [24, 1, 1500, 1024]> coreml_update_state_55 = read_state(input = v_cache2)[name = string("coreml_update_state_55")]; |
|
tensor<fp16, [1024, 1024]> var_183_to_fp16 = const()[name = string("op_183_to_fp16"), val = tensor<fp16, [1024, 1024]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(30415424)))]; |
|
tensor<fp16, [1, ?, 1024]> linear_4_cast_fp16 = linear(bias = linear_0_bias_0_to_fp16, weight = var_183_to_fp16, x = audio_data)[name = string("linear_4_cast_fp16")]; |
|
tensor<fp16, [1024, 1024]> var_187_to_fp16 = const()[name = string("op_187_to_fp16"), val = tensor<fp16, [1024, 1024]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(32512640)))]; |
|
tensor<fp16, [1024]> var_188_to_fp16 = const()[name = string("op_188_to_fp16"), val = tensor<fp16, [1024]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(34609856)))]; |
|
tensor<fp16, [1, ?, 1024]> linear_5_cast_fp16 = linear(bias = var_188_to_fp16, weight = var_187_to_fp16, x = audio_data)[name = string("linear_5_cast_fp16")]; |
|
tensor<int32, [3]> var_190_shape_cast_fp16 = shape(x = linear_4_cast_fp16)[name = string("op_190_shape_cast_fp16")]; |
|
int32 gather_4_axis_0 = const()[name = string("gather_4_axis_0"), val = int32(0)]; |
|
int32 gather_4_batch_dims_0 = const()[name = string("gather_4_batch_dims_0"), val = int32(0)]; |
|
bool gather_4_validate_indices_0 = const()[name = string("gather_4_validate_indices_0"), val = bool(false)]; |
|
string var_190_shape_cast_fp16_to_uint16_dtype_0 = const()[name = string("op_190_shape_cast_fp16_to_uint16_dtype_0"), val = string("uint16")]; |
|
uint16 select_4_to_uint16 = const()[name = string("select_4_to_uint16"), val = uint16(1)]; |
|
tensor<uint16, [3]> var_190_shape_cast_fp16_to_uint16 = cast(dtype = var_190_shape_cast_fp16_to_uint16_dtype_0, x = var_190_shape_cast_fp16)[name = string("cast_143")]; |
|
uint16 gather_4_cast_uint16 = gather(axis = gather_4_axis_0, batch_dims = gather_4_batch_dims_0, indices = select_4_to_uint16, validate_indices = gather_4_validate_indices_0, x = var_190_shape_cast_fp16_to_uint16)[name = string("gather_4_cast_uint16")]; |
|
string gather_4_cast_uint16_to_int32_dtype_0 = const()[name = string("gather_4_cast_uint16_to_int32_dtype_0"), val = string("int32")]; |
|
tensor<int32, [1]> expand_dims_27_axes_0 = const()[name = string("expand_dims_27_axes_0"), val = tensor<int32, [1]>([0])]; |
|
int32 gather_4_cast_uint16_to_int32 = cast(dtype = gather_4_cast_uint16_to_int32_dtype_0, x = gather_4_cast_uint16)[name = string("cast_142")]; |
|
tensor<int32, [1]> expand_dims_27 = expand_dims(axes = expand_dims_27_axes_0, x = gather_4_cast_uint16_to_int32)[name = string("expand_dims_27")]; |
|
tensor<int32, [4]> concat_17 = const()[name = string("concat_17"), val = tensor<int32, [4]>([2, 0, 0, 0])]; |
|
tensor<int32, [1]> concat_18_values0_0 = const()[name = string("concat_18_values0_0"), val = tensor<int32, [1]>([0])]; |
|
tensor<int32, [1]> concat_18_values1_0 = const()[name = string("concat_18_values1_0"), val = tensor<int32, [1]>([0])]; |
|
tensor<int32, [1]> concat_18_values3_0 = const()[name = string("concat_18_values3_0"), val = tensor<int32, [1]>([0])]; |
|
int32 concat_18_axis_0 = const()[name = string("concat_18_axis_0"), val = int32(0)]; |
|
bool concat_18_interleave_0 = const()[name = string("concat_18_interleave_0"), val = bool(false)]; |
|
tensor<int32, [4]> concat_18 = concat(axis = concat_18_axis_0, interleave = concat_18_interleave_0, values = (concat_18_values0_0, concat_18_values1_0, expand_dims_27, concat_18_values3_0))[name = string("concat_18")]; |
|
tensor<int32, [4]> k_cache2_internal_tensor_assign_3_stride_0 = const()[name = string("k_cache2_internal_tensor_assign_3_stride_0"), val = tensor<int32, [4]>([1, 1, 1, 1])]; |
|
tensor<bool, [4]> k_cache2_internal_tensor_assign_3_begin_mask_0 = const()[name = string("k_cache2_internal_tensor_assign_3_begin_mask_0"), val = tensor<bool, [4]>([false, false, false, false])]; |
|
tensor<bool, [4]> k_cache2_internal_tensor_assign_3_end_mask_0 = const()[name = string("k_cache2_internal_tensor_assign_3_end_mask_0"), val = tensor<bool, [4]>([false, true, false, true])]; |
|
tensor<bool, [4]> k_cache2_internal_tensor_assign_3_squeeze_mask_0 = const()[name = string("k_cache2_internal_tensor_assign_3_squeeze_mask_0"), val = tensor<bool, [4]>([true, false, false, false])]; |
|
tensor<fp16, [24, 1, 1500, 1024]> k_cache2_internal_tensor_assign_3_cast_fp16 = slice_update(begin = concat_17, begin_mask = k_cache2_internal_tensor_assign_3_begin_mask_0, end = concat_18, end_mask = k_cache2_internal_tensor_assign_3_end_mask_0, squeeze_mask = k_cache2_internal_tensor_assign_3_squeeze_mask_0, stride = k_cache2_internal_tensor_assign_3_stride_0, update = linear_4_cast_fp16, x = coreml_update_state_54)[name = string("k_cache2_internal_tensor_assign_3_cast_fp16")]; |
|
write_state(data = k_cache2_internal_tensor_assign_3_cast_fp16, input = k_cache2)[name = string("coreml_update_state_56_write_state")]; |
|
tensor<fp16, [24, 1, 1500, 1024]> coreml_update_state_56 = read_state(input = k_cache2)[name = string("coreml_update_state_56")]; |
|
tensor<int32, [3]> var_195_shape_cast_fp16 = shape(x = linear_5_cast_fp16)[name = string("op_195_shape_cast_fp16")]; |
|
int32 gather_5_axis_0 = const()[name = string("gather_5_axis_0"), val = int32(0)]; |
|
int32 gather_5_batch_dims_0 = const()[name = string("gather_5_batch_dims_0"), val = int32(0)]; |
|
bool gather_5_validate_indices_0 = const()[name = string("gather_5_validate_indices_0"), val = bool(false)]; |
|
string var_195_shape_cast_fp16_to_uint16_dtype_0 = const()[name = string("op_195_shape_cast_fp16_to_uint16_dtype_0"), val = string("uint16")]; |
|
uint16 select_5_to_uint16 = const()[name = string("select_5_to_uint16"), val = uint16(1)]; |
|
tensor<uint16, [3]> var_195_shape_cast_fp16_to_uint16 = cast(dtype = var_195_shape_cast_fp16_to_uint16_dtype_0, x = var_195_shape_cast_fp16)[name = string("cast_141")]; |
|
uint16 gather_5_cast_uint16 = gather(axis = gather_5_axis_0, batch_dims = gather_5_batch_dims_0, indices = select_5_to_uint16, validate_indices = gather_5_validate_indices_0, x = var_195_shape_cast_fp16_to_uint16)[name = string("gather_5_cast_uint16")]; |
|
string gather_5_cast_uint16_to_int32_dtype_0 = const()[name = string("gather_5_cast_uint16_to_int32_dtype_0"), val = string("int32")]; |
|
tensor<int32, [1]> expand_dims_31_axes_0 = const()[name = string("expand_dims_31_axes_0"), val = tensor<int32, [1]>([0])]; |
|
int32 gather_5_cast_uint16_to_int32 = cast(dtype = gather_5_cast_uint16_to_int32_dtype_0, x = gather_5_cast_uint16)[name = string("cast_140")]; |
|
tensor<int32, [1]> expand_dims_31 = expand_dims(axes = expand_dims_31_axes_0, x = gather_5_cast_uint16_to_int32)[name = string("expand_dims_31")]; |
|
tensor<int32, [4]> concat_20 = const()[name = string("concat_20"), val = tensor<int32, [4]>([2, 0, 0, 0])]; |
|
tensor<int32, [1]> concat_21_values0_0 = const()[name = string("concat_21_values0_0"), val = tensor<int32, [1]>([0])]; |
|
tensor<int32, [1]> concat_21_values1_0 = const()[name = string("concat_21_values1_0"), val = tensor<int32, [1]>([0])]; |
|
tensor<int32, [1]> concat_21_values3_0 = const()[name = string("concat_21_values3_0"), val = tensor<int32, [1]>([0])]; |
|
int32 concat_21_axis_0 = const()[name = string("concat_21_axis_0"), val = int32(0)]; |
|
bool concat_21_interleave_0 = const()[name = string("concat_21_interleave_0"), val = bool(false)]; |
|
tensor<int32, [4]> concat_21 = concat(axis = concat_21_axis_0, interleave = concat_21_interleave_0, values = (concat_21_values0_0, concat_21_values1_0, expand_dims_31, concat_21_values3_0))[name = string("concat_21")]; |
|
tensor<int32, [4]> v_cache2_internal_tensor_assign_3_stride_0 = const()[name = string("v_cache2_internal_tensor_assign_3_stride_0"), val = tensor<int32, [4]>([1, 1, 1, 1])]; |
|
tensor<bool, [4]> v_cache2_internal_tensor_assign_3_begin_mask_0 = const()[name = string("v_cache2_internal_tensor_assign_3_begin_mask_0"), val = tensor<bool, [4]>([false, false, false, false])]; |
|
tensor<bool, [4]> v_cache2_internal_tensor_assign_3_end_mask_0 = const()[name = string("v_cache2_internal_tensor_assign_3_end_mask_0"), val = tensor<bool, [4]>([false, true, false, true])]; |
|
tensor<bool, [4]> v_cache2_internal_tensor_assign_3_squeeze_mask_0 = const()[name = string("v_cache2_internal_tensor_assign_3_squeeze_mask_0"), val = tensor<bool, [4]>([true, false, false, false])]; |
|
tensor<fp16, [24, 1, 1500, 1024]> v_cache2_internal_tensor_assign_3_cast_fp16 = slice_update(begin = concat_20, begin_mask = v_cache2_internal_tensor_assign_3_begin_mask_0, end = concat_21, end_mask = v_cache2_internal_tensor_assign_3_end_mask_0, squeeze_mask = v_cache2_internal_tensor_assign_3_squeeze_mask_0, stride = v_cache2_internal_tensor_assign_3_stride_0, update = linear_5_cast_fp16, x = coreml_update_state_55)[name = string("v_cache2_internal_tensor_assign_3_cast_fp16")]; |
|
write_state(data = v_cache2_internal_tensor_assign_3_cast_fp16, input = v_cache2)[name = string("coreml_update_state_57_write_state")]; |
|
tensor<fp16, [24, 1, 1500, 1024]> coreml_update_state_57 = read_state(input = v_cache2)[name = string("coreml_update_state_57")]; |
|
tensor<fp16, [1024, 1024]> var_217_to_fp16 = const()[name = string("op_217_to_fp16"), val = tensor<fp16, [1024, 1024]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(34611968)))]; |
|
tensor<fp16, [1, ?, 1024]> linear_6_cast_fp16 = linear(bias = linear_0_bias_0_to_fp16, weight = var_217_to_fp16, x = audio_data)[name = string("linear_6_cast_fp16")]; |
|
tensor<fp16, [1024, 1024]> var_221_to_fp16 = const()[name = string("op_221_to_fp16"), val = tensor<fp16, [1024, 1024]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(36709184)))]; |
|
tensor<fp16, [1024]> var_222_to_fp16 = const()[name = string("op_222_to_fp16"), val = tensor<fp16, [1024]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(38806400)))]; |
|
tensor<fp16, [1, ?, 1024]> linear_7_cast_fp16 = linear(bias = var_222_to_fp16, weight = var_221_to_fp16, x = audio_data)[name = string("linear_7_cast_fp16")]; |
|
tensor<int32, [3]> var_224_shape_cast_fp16 = shape(x = linear_6_cast_fp16)[name = string("op_224_shape_cast_fp16")]; |
|
int32 gather_6_axis_0 = const()[name = string("gather_6_axis_0"), val = int32(0)]; |
|
int32 gather_6_batch_dims_0 = const()[name = string("gather_6_batch_dims_0"), val = int32(0)]; |
|
bool gather_6_validate_indices_0 = const()[name = string("gather_6_validate_indices_0"), val = bool(false)]; |
|
string var_224_shape_cast_fp16_to_uint16_dtype_0 = const()[name = string("op_224_shape_cast_fp16_to_uint16_dtype_0"), val = string("uint16")]; |
|
uint16 select_6_to_uint16 = const()[name = string("select_6_to_uint16"), val = uint16(1)]; |
|
tensor<uint16, [3]> var_224_shape_cast_fp16_to_uint16 = cast(dtype = var_224_shape_cast_fp16_to_uint16_dtype_0, x = var_224_shape_cast_fp16)[name = string("cast_139")]; |
|
uint16 gather_6_cast_uint16 = gather(axis = gather_6_axis_0, batch_dims = gather_6_batch_dims_0, indices = select_6_to_uint16, validate_indices = gather_6_validate_indices_0, x = var_224_shape_cast_fp16_to_uint16)[name = string("gather_6_cast_uint16")]; |
|
string gather_6_cast_uint16_to_int32_dtype_0 = const()[name = string("gather_6_cast_uint16_to_int32_dtype_0"), val = string("int32")]; |
|
tensor<int32, [1]> expand_dims_35_axes_0 = const()[name = string("expand_dims_35_axes_0"), val = tensor<int32, [1]>([0])]; |
|
int32 gather_6_cast_uint16_to_int32 = cast(dtype = gather_6_cast_uint16_to_int32_dtype_0, x = gather_6_cast_uint16)[name = string("cast_138")]; |
|
tensor<int32, [1]> expand_dims_35 = expand_dims(axes = expand_dims_35_axes_0, x = gather_6_cast_uint16_to_int32)[name = string("expand_dims_35")]; |
|
tensor<int32, [4]> concat_23 = const()[name = string("concat_23"), val = tensor<int32, [4]>([3, 0, 0, 0])]; |
|
tensor<int32, [1]> concat_24_values0_0 = const()[name = string("concat_24_values0_0"), val = tensor<int32, [1]>([0])]; |
|
tensor<int32, [1]> concat_24_values1_0 = const()[name = string("concat_24_values1_0"), val = tensor<int32, [1]>([0])]; |
|
tensor<int32, [1]> concat_24_values3_0 = const()[name = string("concat_24_values3_0"), val = tensor<int32, [1]>([0])]; |
|
int32 concat_24_axis_0 = const()[name = string("concat_24_axis_0"), val = int32(0)]; |
|
bool concat_24_interleave_0 = const()[name = string("concat_24_interleave_0"), val = bool(false)]; |
|
tensor<int32, [4]> concat_24 = concat(axis = concat_24_axis_0, interleave = concat_24_interleave_0, values = (concat_24_values0_0, concat_24_values1_0, expand_dims_35, concat_24_values3_0))[name = string("concat_24")]; |
|
tensor<int32, [4]> k_cache2_internal_tensor_assign_4_stride_0 = const()[name = string("k_cache2_internal_tensor_assign_4_stride_0"), val = tensor<int32, [4]>([1, 1, 1, 1])]; |
|
tensor<bool, [4]> k_cache2_internal_tensor_assign_4_begin_mask_0 = const()[name = string("k_cache2_internal_tensor_assign_4_begin_mask_0"), val = tensor<bool, [4]>([false, false, false, false])]; |
|
tensor<bool, [4]> k_cache2_internal_tensor_assign_4_end_mask_0 = const()[name = string("k_cache2_internal_tensor_assign_4_end_mask_0"), val = tensor<bool, [4]>([false, true, false, true])]; |
|
tensor<bool, [4]> k_cache2_internal_tensor_assign_4_squeeze_mask_0 = const()[name = string("k_cache2_internal_tensor_assign_4_squeeze_mask_0"), val = tensor<bool, [4]>([true, false, false, false])]; |
|
tensor<fp16, [24, 1, 1500, 1024]> k_cache2_internal_tensor_assign_4_cast_fp16 = slice_update(begin = concat_23, begin_mask = k_cache2_internal_tensor_assign_4_begin_mask_0, end = concat_24, end_mask = k_cache2_internal_tensor_assign_4_end_mask_0, squeeze_mask = k_cache2_internal_tensor_assign_4_squeeze_mask_0, stride = k_cache2_internal_tensor_assign_4_stride_0, update = linear_6_cast_fp16, x = coreml_update_state_56)[name = string("k_cache2_internal_tensor_assign_4_cast_fp16")]; |
|
write_state(data = k_cache2_internal_tensor_assign_4_cast_fp16, input = k_cache2)[name = string("coreml_update_state_58_write_state")]; |
|
tensor<fp16, [24, 1, 1500, 1024]> coreml_update_state_58 = read_state(input = k_cache2)[name = string("coreml_update_state_58")]; |
|
tensor<int32, [3]> var_229_shape_cast_fp16 = shape(x = linear_7_cast_fp16)[name = string("op_229_shape_cast_fp16")]; |
|
int32 gather_7_axis_0 = const()[name = string("gather_7_axis_0"), val = int32(0)]; |
|
int32 gather_7_batch_dims_0 = const()[name = string("gather_7_batch_dims_0"), val = int32(0)]; |
|
bool gather_7_validate_indices_0 = const()[name = string("gather_7_validate_indices_0"), val = bool(false)]; |
|
string var_229_shape_cast_fp16_to_uint16_dtype_0 = const()[name = string("op_229_shape_cast_fp16_to_uint16_dtype_0"), val = string("uint16")]; |
|
uint16 select_7_to_uint16 = const()[name = string("select_7_to_uint16"), val = uint16(1)]; |
|
tensor<uint16, [3]> var_229_shape_cast_fp16_to_uint16 = cast(dtype = var_229_shape_cast_fp16_to_uint16_dtype_0, x = var_229_shape_cast_fp16)[name = string("cast_137")]; |
|
uint16 gather_7_cast_uint16 = gather(axis = gather_7_axis_0, batch_dims = gather_7_batch_dims_0, indices = select_7_to_uint16, validate_indices = gather_7_validate_indices_0, x = var_229_shape_cast_fp16_to_uint16)[name = string("gather_7_cast_uint16")]; |
|
string gather_7_cast_uint16_to_int32_dtype_0 = const()[name = string("gather_7_cast_uint16_to_int32_dtype_0"), val = string("int32")]; |
|
tensor<int32, [1]> expand_dims_39_axes_0 = const()[name = string("expand_dims_39_axes_0"), val = tensor<int32, [1]>([0])]; |
|
int32 gather_7_cast_uint16_to_int32 = cast(dtype = gather_7_cast_uint16_to_int32_dtype_0, x = gather_7_cast_uint16)[name = string("cast_136")]; |
|
tensor<int32, [1]> expand_dims_39 = expand_dims(axes = expand_dims_39_axes_0, x = gather_7_cast_uint16_to_int32)[name = string("expand_dims_39")]; |
|
tensor<int32, [4]> concat_26 = const()[name = string("concat_26"), val = tensor<int32, [4]>([3, 0, 0, 0])]; |
|
tensor<int32, [1]> concat_27_values0_0 = const()[name = string("concat_27_values0_0"), val = tensor<int32, [1]>([0])]; |
|
tensor<int32, [1]> concat_27_values1_0 = const()[name = string("concat_27_values1_0"), val = tensor<int32, [1]>([0])]; |
|
tensor<int32, [1]> concat_27_values3_0 = const()[name = string("concat_27_values3_0"), val = tensor<int32, [1]>([0])]; |
|
int32 concat_27_axis_0 = const()[name = string("concat_27_axis_0"), val = int32(0)]; |
|
bool concat_27_interleave_0 = const()[name = string("concat_27_interleave_0"), val = bool(false)]; |
|
tensor<int32, [4]> concat_27 = concat(axis = concat_27_axis_0, interleave = concat_27_interleave_0, values = (concat_27_values0_0, concat_27_values1_0, expand_dims_39, concat_27_values3_0))[name = string("concat_27")]; |
|
tensor<int32, [4]> v_cache2_internal_tensor_assign_4_stride_0 = const()[name = string("v_cache2_internal_tensor_assign_4_stride_0"), val = tensor<int32, [4]>([1, 1, 1, 1])]; |
|
tensor<bool, [4]> v_cache2_internal_tensor_assign_4_begin_mask_0 = const()[name = string("v_cache2_internal_tensor_assign_4_begin_mask_0"), val = tensor<bool, [4]>([false, false, false, false])]; |
|
tensor<bool, [4]> v_cache2_internal_tensor_assign_4_end_mask_0 = const()[name = string("v_cache2_internal_tensor_assign_4_end_mask_0"), val = tensor<bool, [4]>([false, true, false, true])]; |
|
tensor<bool, [4]> v_cache2_internal_tensor_assign_4_squeeze_mask_0 = const()[name = string("v_cache2_internal_tensor_assign_4_squeeze_mask_0"), val = tensor<bool, [4]>([true, false, false, false])]; |
|
tensor<fp16, [24, 1, 1500, 1024]> v_cache2_internal_tensor_assign_4_cast_fp16 = slice_update(begin = concat_26, begin_mask = v_cache2_internal_tensor_assign_4_begin_mask_0, end = concat_27, end_mask = v_cache2_internal_tensor_assign_4_end_mask_0, squeeze_mask = v_cache2_internal_tensor_assign_4_squeeze_mask_0, stride = v_cache2_internal_tensor_assign_4_stride_0, update = linear_7_cast_fp16, x = coreml_update_state_57)[name = string("v_cache2_internal_tensor_assign_4_cast_fp16")]; |
|
write_state(data = v_cache2_internal_tensor_assign_4_cast_fp16, input = v_cache2)[name = string("coreml_update_state_59_write_state")]; |
|
tensor<fp16, [24, 1, 1500, 1024]> coreml_update_state_59 = read_state(input = v_cache2)[name = string("coreml_update_state_59")]; |
|
tensor<fp16, [1024, 1024]> var_251_to_fp16 = const()[name = string("op_251_to_fp16"), val = tensor<fp16, [1024, 1024]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(38808512)))]; |
|
tensor<fp16, [1, ?, 1024]> linear_8_cast_fp16 = linear(bias = linear_0_bias_0_to_fp16, weight = var_251_to_fp16, x = audio_data)[name = string("linear_8_cast_fp16")]; |
|
tensor<fp16, [1024, 1024]> var_255_to_fp16 = const()[name = string("op_255_to_fp16"), val = tensor<fp16, [1024, 1024]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(40905728)))]; |
|
tensor<fp16, [1024]> var_256_to_fp16 = const()[name = string("op_256_to_fp16"), val = tensor<fp16, [1024]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(43002944)))]; |
|
tensor<fp16, [1, ?, 1024]> linear_9_cast_fp16 = linear(bias = var_256_to_fp16, weight = var_255_to_fp16, x = audio_data)[name = string("linear_9_cast_fp16")]; |
|
tensor<int32, [3]> var_258_shape_cast_fp16 = shape(x = linear_8_cast_fp16)[name = string("op_258_shape_cast_fp16")]; |
|
int32 gather_8_axis_0 = const()[name = string("gather_8_axis_0"), val = int32(0)]; |
|
int32 gather_8_batch_dims_0 = const()[name = string("gather_8_batch_dims_0"), val = int32(0)]; |
|
bool gather_8_validate_indices_0 = const()[name = string("gather_8_validate_indices_0"), val = bool(false)]; |
|
string var_258_shape_cast_fp16_to_uint16_dtype_0 = const()[name = string("op_258_shape_cast_fp16_to_uint16_dtype_0"), val = string("uint16")]; |
|
uint16 select_8_to_uint16 = const()[name = string("select_8_to_uint16"), val = uint16(1)]; |
|
tensor<uint16, [3]> var_258_shape_cast_fp16_to_uint16 = cast(dtype = var_258_shape_cast_fp16_to_uint16_dtype_0, x = var_258_shape_cast_fp16)[name = string("cast_135")]; |
|
uint16 gather_8_cast_uint16 = gather(axis = gather_8_axis_0, batch_dims = gather_8_batch_dims_0, indices = select_8_to_uint16, validate_indices = gather_8_validate_indices_0, x = var_258_shape_cast_fp16_to_uint16)[name = string("gather_8_cast_uint16")]; |
|
string gather_8_cast_uint16_to_int32_dtype_0 = const()[name = string("gather_8_cast_uint16_to_int32_dtype_0"), val = string("int32")]; |
|
tensor<int32, [1]> expand_dims_43_axes_0 = const()[name = string("expand_dims_43_axes_0"), val = tensor<int32, [1]>([0])]; |
|
int32 gather_8_cast_uint16_to_int32 = cast(dtype = gather_8_cast_uint16_to_int32_dtype_0, x = gather_8_cast_uint16)[name = string("cast_134")]; |
|
tensor<int32, [1]> expand_dims_43 = expand_dims(axes = expand_dims_43_axes_0, x = gather_8_cast_uint16_to_int32)[name = string("expand_dims_43")]; |
|
tensor<int32, [4]> concat_29 = const()[name = string("concat_29"), val = tensor<int32, [4]>([4, 0, 0, 0])]; |
|
tensor<int32, [1]> concat_30_values0_0 = const()[name = string("concat_30_values0_0"), val = tensor<int32, [1]>([0])]; |
|
tensor<int32, [1]> concat_30_values1_0 = const()[name = string("concat_30_values1_0"), val = tensor<int32, [1]>([0])]; |
|
tensor<int32, [1]> concat_30_values3_0 = const()[name = string("concat_30_values3_0"), val = tensor<int32, [1]>([0])]; |
|
int32 concat_30_axis_0 = const()[name = string("concat_30_axis_0"), val = int32(0)]; |
|
bool concat_30_interleave_0 = const()[name = string("concat_30_interleave_0"), val = bool(false)]; |
|
tensor<int32, [4]> concat_30 = concat(axis = concat_30_axis_0, interleave = concat_30_interleave_0, values = (concat_30_values0_0, concat_30_values1_0, expand_dims_43, concat_30_values3_0))[name = string("concat_30")]; |
|
tensor<int32, [4]> k_cache2_internal_tensor_assign_5_stride_0 = const()[name = string("k_cache2_internal_tensor_assign_5_stride_0"), val = tensor<int32, [4]>([1, 1, 1, 1])]; |
|
tensor<bool, [4]> k_cache2_internal_tensor_assign_5_begin_mask_0 = const()[name = string("k_cache2_internal_tensor_assign_5_begin_mask_0"), val = tensor<bool, [4]>([false, false, false, false])]; |
|
tensor<bool, [4]> k_cache2_internal_tensor_assign_5_end_mask_0 = const()[name = string("k_cache2_internal_tensor_assign_5_end_mask_0"), val = tensor<bool, [4]>([false, true, false, true])]; |
|
tensor<bool, [4]> k_cache2_internal_tensor_assign_5_squeeze_mask_0 = const()[name = string("k_cache2_internal_tensor_assign_5_squeeze_mask_0"), val = tensor<bool, [4]>([true, false, false, false])]; |
|
tensor<fp16, [24, 1, 1500, 1024]> k_cache2_internal_tensor_assign_5_cast_fp16 = slice_update(begin = concat_29, begin_mask = k_cache2_internal_tensor_assign_5_begin_mask_0, end = concat_30, end_mask = k_cache2_internal_tensor_assign_5_end_mask_0, squeeze_mask = k_cache2_internal_tensor_assign_5_squeeze_mask_0, stride = k_cache2_internal_tensor_assign_5_stride_0, update = linear_8_cast_fp16, x = coreml_update_state_58)[name = string("k_cache2_internal_tensor_assign_5_cast_fp16")]; |
|
write_state(data = k_cache2_internal_tensor_assign_5_cast_fp16, input = k_cache2)[name = string("coreml_update_state_60_write_state")]; |
|
tensor<fp16, [24, 1, 1500, 1024]> coreml_update_state_60 = read_state(input = k_cache2)[name = string("coreml_update_state_60")]; |
|
tensor<int32, [3]> var_263_shape_cast_fp16 = shape(x = linear_9_cast_fp16)[name = string("op_263_shape_cast_fp16")]; |
|
int32 gather_9_axis_0 = const()[name = string("gather_9_axis_0"), val = int32(0)]; |
|
int32 gather_9_batch_dims_0 = const()[name = string("gather_9_batch_dims_0"), val = int32(0)]; |
|
bool gather_9_validate_indices_0 = const()[name = string("gather_9_validate_indices_0"), val = bool(false)]; |
|
string var_263_shape_cast_fp16_to_uint16_dtype_0 = const()[name = string("op_263_shape_cast_fp16_to_uint16_dtype_0"), val = string("uint16")]; |
|
uint16 select_9_to_uint16 = const()[name = string("select_9_to_uint16"), val = uint16(1)]; |
|
tensor<uint16, [3]> var_263_shape_cast_fp16_to_uint16 = cast(dtype = var_263_shape_cast_fp16_to_uint16_dtype_0, x = var_263_shape_cast_fp16)[name = string("cast_133")]; |
|
uint16 gather_9_cast_uint16 = gather(axis = gather_9_axis_0, batch_dims = gather_9_batch_dims_0, indices = select_9_to_uint16, validate_indices = gather_9_validate_indices_0, x = var_263_shape_cast_fp16_to_uint16)[name = string("gather_9_cast_uint16")]; |
|
string gather_9_cast_uint16_to_int32_dtype_0 = const()[name = string("gather_9_cast_uint16_to_int32_dtype_0"), val = string("int32")]; |
|
tensor<int32, [1]> expand_dims_47_axes_0 = const()[name = string("expand_dims_47_axes_0"), val = tensor<int32, [1]>([0])]; |
|
int32 gather_9_cast_uint16_to_int32 = cast(dtype = gather_9_cast_uint16_to_int32_dtype_0, x = gather_9_cast_uint16)[name = string("cast_132")]; |
|
tensor<int32, [1]> expand_dims_47 = expand_dims(axes = expand_dims_47_axes_0, x = gather_9_cast_uint16_to_int32)[name = string("expand_dims_47")]; |
|
tensor<int32, [4]> concat_32 = const()[name = string("concat_32"), val = tensor<int32, [4]>([4, 0, 0, 0])]; |
|
tensor<int32, [1]> concat_33_values0_0 = const()[name = string("concat_33_values0_0"), val = tensor<int32, [1]>([0])]; |
|
tensor<int32, [1]> concat_33_values1_0 = const()[name = string("concat_33_values1_0"), val = tensor<int32, [1]>([0])]; |
|
tensor<int32, [1]> concat_33_values3_0 = const()[name = string("concat_33_values3_0"), val = tensor<int32, [1]>([0])]; |
|
int32 concat_33_axis_0 = const()[name = string("concat_33_axis_0"), val = int32(0)]; |
|
bool concat_33_interleave_0 = const()[name = string("concat_33_interleave_0"), val = bool(false)]; |
|
tensor<int32, [4]> concat_33 = concat(axis = concat_33_axis_0, interleave = concat_33_interleave_0, values = (concat_33_values0_0, concat_33_values1_0, expand_dims_47, concat_33_values3_0))[name = string("concat_33")]; |
|
tensor<int32, [4]> v_cache2_internal_tensor_assign_5_stride_0 = const()[name = string("v_cache2_internal_tensor_assign_5_stride_0"), val = tensor<int32, [4]>([1, 1, 1, 1])]; |
|
tensor<bool, [4]> v_cache2_internal_tensor_assign_5_begin_mask_0 = const()[name = string("v_cache2_internal_tensor_assign_5_begin_mask_0"), val = tensor<bool, [4]>([false, false, false, false])]; |
|
tensor<bool, [4]> v_cache2_internal_tensor_assign_5_end_mask_0 = const()[name = string("v_cache2_internal_tensor_assign_5_end_mask_0"), val = tensor<bool, [4]>([false, true, false, true])]; |
|
tensor<bool, [4]> v_cache2_internal_tensor_assign_5_squeeze_mask_0 = const()[name = string("v_cache2_internal_tensor_assign_5_squeeze_mask_0"), val = tensor<bool, [4]>([true, false, false, false])]; |
|
tensor<fp16, [24, 1, 1500, 1024]> v_cache2_internal_tensor_assign_5_cast_fp16 = slice_update(begin = concat_32, begin_mask = v_cache2_internal_tensor_assign_5_begin_mask_0, end = concat_33, end_mask = v_cache2_internal_tensor_assign_5_end_mask_0, squeeze_mask = v_cache2_internal_tensor_assign_5_squeeze_mask_0, stride = v_cache2_internal_tensor_assign_5_stride_0, update = linear_9_cast_fp16, x = coreml_update_state_59)[name = string("v_cache2_internal_tensor_assign_5_cast_fp16")]; |
|
write_state(data = v_cache2_internal_tensor_assign_5_cast_fp16, input = v_cache2)[name = string("coreml_update_state_61_write_state")]; |
|
tensor<fp16, [24, 1, 1500, 1024]> coreml_update_state_61 = read_state(input = v_cache2)[name = string("coreml_update_state_61")]; |
|
tensor<fp16, [1024, 1024]> var_285_to_fp16 = const()[name = string("op_285_to_fp16"), val = tensor<fp16, [1024, 1024]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(43005056)))]; |
|
tensor<fp16, [1, ?, 1024]> linear_10_cast_fp16 = linear(bias = linear_0_bias_0_to_fp16, weight = var_285_to_fp16, x = audio_data)[name = string("linear_10_cast_fp16")]; |
|
tensor<fp16, [1024, 1024]> var_289_to_fp16 = const()[name = string("op_289_to_fp16"), val = tensor<fp16, [1024, 1024]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(45102272)))]; |
|
tensor<fp16, [1024]> var_290_to_fp16 = const()[name = string("op_290_to_fp16"), val = tensor<fp16, [1024]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(47199488)))]; |
|
tensor<fp16, [1, ?, 1024]> linear_11_cast_fp16 = linear(bias = var_290_to_fp16, weight = var_289_to_fp16, x = audio_data)[name = string("linear_11_cast_fp16")]; |
|
tensor<int32, [3]> var_292_shape_cast_fp16 = shape(x = linear_10_cast_fp16)[name = string("op_292_shape_cast_fp16")]; |
|
int32 gather_10_axis_0 = const()[name = string("gather_10_axis_0"), val = int32(0)]; |
|
int32 gather_10_batch_dims_0 = const()[name = string("gather_10_batch_dims_0"), val = int32(0)]; |
|
bool gather_10_validate_indices_0 = const()[name = string("gather_10_validate_indices_0"), val = bool(false)]; |
|
string var_292_shape_cast_fp16_to_uint16_dtype_0 = const()[name = string("op_292_shape_cast_fp16_to_uint16_dtype_0"), val = string("uint16")]; |
|
uint16 select_10_to_uint16 = const()[name = string("select_10_to_uint16"), val = uint16(1)]; |
|
tensor<uint16, [3]> var_292_shape_cast_fp16_to_uint16 = cast(dtype = var_292_shape_cast_fp16_to_uint16_dtype_0, x = var_292_shape_cast_fp16)[name = string("cast_131")]; |
|
uint16 gather_10_cast_uint16 = gather(axis = gather_10_axis_0, batch_dims = gather_10_batch_dims_0, indices = select_10_to_uint16, validate_indices = gather_10_validate_indices_0, x = var_292_shape_cast_fp16_to_uint16)[name = string("gather_10_cast_uint16")]; |
|
string gather_10_cast_uint16_to_int32_dtype_0 = const()[name = string("gather_10_cast_uint16_to_int32_dtype_0"), val = string("int32")]; |
|
tensor<int32, [1]> expand_dims_51_axes_0 = const()[name = string("expand_dims_51_axes_0"), val = tensor<int32, [1]>([0])]; |
|
int32 gather_10_cast_uint16_to_int32 = cast(dtype = gather_10_cast_uint16_to_int32_dtype_0, x = gather_10_cast_uint16)[name = string("cast_130")]; |
|
tensor<int32, [1]> expand_dims_51 = expand_dims(axes = expand_dims_51_axes_0, x = gather_10_cast_uint16_to_int32)[name = string("expand_dims_51")]; |
|
tensor<int32, [4]> concat_35 = const()[name = string("concat_35"), val = tensor<int32, [4]>([5, 0, 0, 0])]; |
|
tensor<int32, [1]> concat_36_values0_0 = const()[name = string("concat_36_values0_0"), val = tensor<int32, [1]>([0])]; |
|
tensor<int32, [1]> concat_36_values1_0 = const()[name = string("concat_36_values1_0"), val = tensor<int32, [1]>([0])]; |
|
tensor<int32, [1]> concat_36_values3_0 = const()[name = string("concat_36_values3_0"), val = tensor<int32, [1]>([0])]; |
|
int32 concat_36_axis_0 = const()[name = string("concat_36_axis_0"), val = int32(0)]; |
|
bool concat_36_interleave_0 = const()[name = string("concat_36_interleave_0"), val = bool(false)]; |
|
tensor<int32, [4]> concat_36 = concat(axis = concat_36_axis_0, interleave = concat_36_interleave_0, values = (concat_36_values0_0, concat_36_values1_0, expand_dims_51, concat_36_values3_0))[name = string("concat_36")]; |
|
tensor<int32, [4]> k_cache2_internal_tensor_assign_6_stride_0 = const()[name = string("k_cache2_internal_tensor_assign_6_stride_0"), val = tensor<int32, [4]>([1, 1, 1, 1])]; |
|
tensor<bool, [4]> k_cache2_internal_tensor_assign_6_begin_mask_0 = const()[name = string("k_cache2_internal_tensor_assign_6_begin_mask_0"), val = tensor<bool, [4]>([false, false, false, false])]; |
|
tensor<bool, [4]> k_cache2_internal_tensor_assign_6_end_mask_0 = const()[name = string("k_cache2_internal_tensor_assign_6_end_mask_0"), val = tensor<bool, [4]>([false, true, false, true])]; |
|
tensor<bool, [4]> k_cache2_internal_tensor_assign_6_squeeze_mask_0 = const()[name = string("k_cache2_internal_tensor_assign_6_squeeze_mask_0"), val = tensor<bool, [4]>([true, false, false, false])]; |
|
tensor<fp16, [24, 1, 1500, 1024]> k_cache2_internal_tensor_assign_6_cast_fp16 = slice_update(begin = concat_35, begin_mask = k_cache2_internal_tensor_assign_6_begin_mask_0, end = concat_36, end_mask = k_cache2_internal_tensor_assign_6_end_mask_0, squeeze_mask = k_cache2_internal_tensor_assign_6_squeeze_mask_0, stride = k_cache2_internal_tensor_assign_6_stride_0, update = linear_10_cast_fp16, x = coreml_update_state_60)[name = string("k_cache2_internal_tensor_assign_6_cast_fp16")]; |
|
write_state(data = k_cache2_internal_tensor_assign_6_cast_fp16, input = k_cache2)[name = string("coreml_update_state_62_write_state")]; |
|
tensor<fp16, [24, 1, 1500, 1024]> coreml_update_state_62 = read_state(input = k_cache2)[name = string("coreml_update_state_62")]; |
|
tensor<int32, [3]> var_297_shape_cast_fp16 = shape(x = linear_11_cast_fp16)[name = string("op_297_shape_cast_fp16")]; |
|
int32 gather_11_axis_0 = const()[name = string("gather_11_axis_0"), val = int32(0)]; |
|
int32 gather_11_batch_dims_0 = const()[name = string("gather_11_batch_dims_0"), val = int32(0)]; |
|
bool gather_11_validate_indices_0 = const()[name = string("gather_11_validate_indices_0"), val = bool(false)]; |
|
string var_297_shape_cast_fp16_to_uint16_dtype_0 = const()[name = string("op_297_shape_cast_fp16_to_uint16_dtype_0"), val = string("uint16")]; |
|
uint16 select_11_to_uint16 = const()[name = string("select_11_to_uint16"), val = uint16(1)]; |
|
tensor<uint16, [3]> var_297_shape_cast_fp16_to_uint16 = cast(dtype = var_297_shape_cast_fp16_to_uint16_dtype_0, x = var_297_shape_cast_fp16)[name = string("cast_129")]; |
|
uint16 gather_11_cast_uint16 = gather(axis = gather_11_axis_0, batch_dims = gather_11_batch_dims_0, indices = select_11_to_uint16, validate_indices = gather_11_validate_indices_0, x = var_297_shape_cast_fp16_to_uint16)[name = string("gather_11_cast_uint16")]; |
|
string gather_11_cast_uint16_to_int32_dtype_0 = const()[name = string("gather_11_cast_uint16_to_int32_dtype_0"), val = string("int32")]; |
|
tensor<int32, [1]> expand_dims_55_axes_0 = const()[name = string("expand_dims_55_axes_0"), val = tensor<int32, [1]>([0])]; |
|
int32 gather_11_cast_uint16_to_int32 = cast(dtype = gather_11_cast_uint16_to_int32_dtype_0, x = gather_11_cast_uint16)[name = string("cast_128")]; |
|
tensor<int32, [1]> expand_dims_55 = expand_dims(axes = expand_dims_55_axes_0, x = gather_11_cast_uint16_to_int32)[name = string("expand_dims_55")]; |
|
tensor<int32, [4]> concat_38 = const()[name = string("concat_38"), val = tensor<int32, [4]>([5, 0, 0, 0])]; |
|
tensor<int32, [1]> concat_39_values0_0 = const()[name = string("concat_39_values0_0"), val = tensor<int32, [1]>([0])]; |
|
tensor<int32, [1]> concat_39_values1_0 = const()[name = string("concat_39_values1_0"), val = tensor<int32, [1]>([0])]; |
|
tensor<int32, [1]> concat_39_values3_0 = const()[name = string("concat_39_values3_0"), val = tensor<int32, [1]>([0])]; |
|
int32 concat_39_axis_0 = const()[name = string("concat_39_axis_0"), val = int32(0)]; |
|
bool concat_39_interleave_0 = const()[name = string("concat_39_interleave_0"), val = bool(false)]; |
|
tensor<int32, [4]> concat_39 = concat(axis = concat_39_axis_0, interleave = concat_39_interleave_0, values = (concat_39_values0_0, concat_39_values1_0, expand_dims_55, concat_39_values3_0))[name = string("concat_39")]; |
|
tensor<int32, [4]> v_cache2_internal_tensor_assign_6_stride_0 = const()[name = string("v_cache2_internal_tensor_assign_6_stride_0"), val = tensor<int32, [4]>([1, 1, 1, 1])]; |
|
tensor<bool, [4]> v_cache2_internal_tensor_assign_6_begin_mask_0 = const()[name = string("v_cache2_internal_tensor_assign_6_begin_mask_0"), val = tensor<bool, [4]>([false, false, false, false])]; |
|
tensor<bool, [4]> v_cache2_internal_tensor_assign_6_end_mask_0 = const()[name = string("v_cache2_internal_tensor_assign_6_end_mask_0"), val = tensor<bool, [4]>([false, true, false, true])]; |
|
tensor<bool, [4]> v_cache2_internal_tensor_assign_6_squeeze_mask_0 = const()[name = string("v_cache2_internal_tensor_assign_6_squeeze_mask_0"), val = tensor<bool, [4]>([true, false, false, false])]; |
|
tensor<fp16, [24, 1, 1500, 1024]> v_cache2_internal_tensor_assign_6_cast_fp16 = slice_update(begin = concat_38, begin_mask = v_cache2_internal_tensor_assign_6_begin_mask_0, end = concat_39, end_mask = v_cache2_internal_tensor_assign_6_end_mask_0, squeeze_mask = v_cache2_internal_tensor_assign_6_squeeze_mask_0, stride = v_cache2_internal_tensor_assign_6_stride_0, update = linear_11_cast_fp16, x = coreml_update_state_61)[name = string("v_cache2_internal_tensor_assign_6_cast_fp16")]; |
|
write_state(data = v_cache2_internal_tensor_assign_6_cast_fp16, input = v_cache2)[name = string("coreml_update_state_63_write_state")]; |
|
tensor<fp16, [24, 1, 1500, 1024]> coreml_update_state_63 = read_state(input = v_cache2)[name = string("coreml_update_state_63")]; |
|
tensor<fp16, [1024, 1024]> var_319_to_fp16 = const()[name = string("op_319_to_fp16"), val = tensor<fp16, [1024, 1024]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(47201600)))]; |
|
tensor<fp16, [1, ?, 1024]> linear_12_cast_fp16 = linear(bias = linear_0_bias_0_to_fp16, weight = var_319_to_fp16, x = audio_data)[name = string("linear_12_cast_fp16")]; |
|
tensor<fp16, [1024, 1024]> var_323_to_fp16 = const()[name = string("op_323_to_fp16"), val = tensor<fp16, [1024, 1024]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(49298816)))]; |
|
tensor<fp16, [1024]> var_324_to_fp16 = const()[name = string("op_324_to_fp16"), val = tensor<fp16, [1024]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(51396032)))]; |
|
tensor<fp16, [1, ?, 1024]> linear_13_cast_fp16 = linear(bias = var_324_to_fp16, weight = var_323_to_fp16, x = audio_data)[name = string("linear_13_cast_fp16")]; |
|
tensor<int32, [3]> var_326_shape_cast_fp16 = shape(x = linear_12_cast_fp16)[name = string("op_326_shape_cast_fp16")]; |
|
int32 gather_12_axis_0 = const()[name = string("gather_12_axis_0"), val = int32(0)]; |
|
int32 gather_12_batch_dims_0 = const()[name = string("gather_12_batch_dims_0"), val = int32(0)]; |
|
bool gather_12_validate_indices_0 = const()[name = string("gather_12_validate_indices_0"), val = bool(false)]; |
|
string var_326_shape_cast_fp16_to_uint16_dtype_0 = const()[name = string("op_326_shape_cast_fp16_to_uint16_dtype_0"), val = string("uint16")]; |
|
uint16 select_12_to_uint16 = const()[name = string("select_12_to_uint16"), val = uint16(1)]; |
|
tensor<uint16, [3]> var_326_shape_cast_fp16_to_uint16 = cast(dtype = var_326_shape_cast_fp16_to_uint16_dtype_0, x = var_326_shape_cast_fp16)[name = string("cast_127")]; |
|
uint16 gather_12_cast_uint16 = gather(axis = gather_12_axis_0, batch_dims = gather_12_batch_dims_0, indices = select_12_to_uint16, validate_indices = gather_12_validate_indices_0, x = var_326_shape_cast_fp16_to_uint16)[name = string("gather_12_cast_uint16")]; |
|
string gather_12_cast_uint16_to_int32_dtype_0 = const()[name = string("gather_12_cast_uint16_to_int32_dtype_0"), val = string("int32")]; |
|
tensor<int32, [1]> expand_dims_59_axes_0 = const()[name = string("expand_dims_59_axes_0"), val = tensor<int32, [1]>([0])]; |
|
int32 gather_12_cast_uint16_to_int32 = cast(dtype = gather_12_cast_uint16_to_int32_dtype_0, x = gather_12_cast_uint16)[name = string("cast_126")]; |
|
tensor<int32, [1]> expand_dims_59 = expand_dims(axes = expand_dims_59_axes_0, x = gather_12_cast_uint16_to_int32)[name = string("expand_dims_59")]; |
|
tensor<int32, [4]> concat_41 = const()[name = string("concat_41"), val = tensor<int32, [4]>([6, 0, 0, 0])]; |
|
tensor<int32, [1]> concat_42_values0_0 = const()[name = string("concat_42_values0_0"), val = tensor<int32, [1]>([0])]; |
|
tensor<int32, [1]> concat_42_values1_0 = const()[name = string("concat_42_values1_0"), val = tensor<int32, [1]>([0])]; |
|
tensor<int32, [1]> concat_42_values3_0 = const()[name = string("concat_42_values3_0"), val = tensor<int32, [1]>([0])]; |
|
int32 concat_42_axis_0 = const()[name = string("concat_42_axis_0"), val = int32(0)]; |
|
bool concat_42_interleave_0 = const()[name = string("concat_42_interleave_0"), val = bool(false)]; |
|
tensor<int32, [4]> concat_42 = concat(axis = concat_42_axis_0, interleave = concat_42_interleave_0, values = (concat_42_values0_0, concat_42_values1_0, expand_dims_59, concat_42_values3_0))[name = string("concat_42")]; |
|
tensor<int32, [4]> k_cache2_internal_tensor_assign_7_stride_0 = const()[name = string("k_cache2_internal_tensor_assign_7_stride_0"), val = tensor<int32, [4]>([1, 1, 1, 1])]; |
|
tensor<bool, [4]> k_cache2_internal_tensor_assign_7_begin_mask_0 = const()[name = string("k_cache2_internal_tensor_assign_7_begin_mask_0"), val = tensor<bool, [4]>([false, false, false, false])]; |
|
tensor<bool, [4]> k_cache2_internal_tensor_assign_7_end_mask_0 = const()[name = string("k_cache2_internal_tensor_assign_7_end_mask_0"), val = tensor<bool, [4]>([false, true, false, true])]; |
|
tensor<bool, [4]> k_cache2_internal_tensor_assign_7_squeeze_mask_0 = const()[name = string("k_cache2_internal_tensor_assign_7_squeeze_mask_0"), val = tensor<bool, [4]>([true, false, false, false])]; |
|
tensor<fp16, [24, 1, 1500, 1024]> k_cache2_internal_tensor_assign_7_cast_fp16 = slice_update(begin = concat_41, begin_mask = k_cache2_internal_tensor_assign_7_begin_mask_0, end = concat_42, end_mask = k_cache2_internal_tensor_assign_7_end_mask_0, squeeze_mask = k_cache2_internal_tensor_assign_7_squeeze_mask_0, stride = k_cache2_internal_tensor_assign_7_stride_0, update = linear_12_cast_fp16, x = coreml_update_state_62)[name = string("k_cache2_internal_tensor_assign_7_cast_fp16")]; |
|
write_state(data = k_cache2_internal_tensor_assign_7_cast_fp16, input = k_cache2)[name = string("coreml_update_state_64_write_state")]; |
|
tensor<fp16, [24, 1, 1500, 1024]> coreml_update_state_64 = read_state(input = k_cache2)[name = string("coreml_update_state_64")]; |
|
tensor<int32, [3]> var_331_shape_cast_fp16 = shape(x = linear_13_cast_fp16)[name = string("op_331_shape_cast_fp16")]; |
|
int32 gather_13_axis_0 = const()[name = string("gather_13_axis_0"), val = int32(0)]; |
|
int32 gather_13_batch_dims_0 = const()[name = string("gather_13_batch_dims_0"), val = int32(0)]; |
|
bool gather_13_validate_indices_0 = const()[name = string("gather_13_validate_indices_0"), val = bool(false)]; |
|
string var_331_shape_cast_fp16_to_uint16_dtype_0 = const()[name = string("op_331_shape_cast_fp16_to_uint16_dtype_0"), val = string("uint16")]; |
|
uint16 select_13_to_uint16 = const()[name = string("select_13_to_uint16"), val = uint16(1)]; |
|
tensor<uint16, [3]> var_331_shape_cast_fp16_to_uint16 = cast(dtype = var_331_shape_cast_fp16_to_uint16_dtype_0, x = var_331_shape_cast_fp16)[name = string("cast_125")]; |
|
uint16 gather_13_cast_uint16 = gather(axis = gather_13_axis_0, batch_dims = gather_13_batch_dims_0, indices = select_13_to_uint16, validate_indices = gather_13_validate_indices_0, x = var_331_shape_cast_fp16_to_uint16)[name = string("gather_13_cast_uint16")]; |
|
string gather_13_cast_uint16_to_int32_dtype_0 = const()[name = string("gather_13_cast_uint16_to_int32_dtype_0"), val = string("int32")]; |
|
tensor<int32, [1]> expand_dims_63_axes_0 = const()[name = string("expand_dims_63_axes_0"), val = tensor<int32, [1]>([0])]; |
|
int32 gather_13_cast_uint16_to_int32 = cast(dtype = gather_13_cast_uint16_to_int32_dtype_0, x = gather_13_cast_uint16)[name = string("cast_124")]; |
|
tensor<int32, [1]> expand_dims_63 = expand_dims(axes = expand_dims_63_axes_0, x = gather_13_cast_uint16_to_int32)[name = string("expand_dims_63")]; |
|
tensor<int32, [4]> concat_44 = const()[name = string("concat_44"), val = tensor<int32, [4]>([6, 0, 0, 0])]; |
|
tensor<int32, [1]> concat_45_values0_0 = const()[name = string("concat_45_values0_0"), val = tensor<int32, [1]>([0])]; |
|
tensor<int32, [1]> concat_45_values1_0 = const()[name = string("concat_45_values1_0"), val = tensor<int32, [1]>([0])]; |
|
tensor<int32, [1]> concat_45_values3_0 = const()[name = string("concat_45_values3_0"), val = tensor<int32, [1]>([0])]; |
|
int32 concat_45_axis_0 = const()[name = string("concat_45_axis_0"), val = int32(0)]; |
|
bool concat_45_interleave_0 = const()[name = string("concat_45_interleave_0"), val = bool(false)]; |
|
tensor<int32, [4]> concat_45 = concat(axis = concat_45_axis_0, interleave = concat_45_interleave_0, values = (concat_45_values0_0, concat_45_values1_0, expand_dims_63, concat_45_values3_0))[name = string("concat_45")]; |
|
tensor<int32, [4]> v_cache2_internal_tensor_assign_7_stride_0 = const()[name = string("v_cache2_internal_tensor_assign_7_stride_0"), val = tensor<int32, [4]>([1, 1, 1, 1])]; |
|
tensor<bool, [4]> v_cache2_internal_tensor_assign_7_begin_mask_0 = const()[name = string("v_cache2_internal_tensor_assign_7_begin_mask_0"), val = tensor<bool, [4]>([false, false, false, false])]; |
|
tensor<bool, [4]> v_cache2_internal_tensor_assign_7_end_mask_0 = const()[name = string("v_cache2_internal_tensor_assign_7_end_mask_0"), val = tensor<bool, [4]>([false, true, false, true])]; |
|
tensor<bool, [4]> v_cache2_internal_tensor_assign_7_squeeze_mask_0 = const()[name = string("v_cache2_internal_tensor_assign_7_squeeze_mask_0"), val = tensor<bool, [4]>([true, false, false, false])]; |
|
tensor<fp16, [24, 1, 1500, 1024]> v_cache2_internal_tensor_assign_7_cast_fp16 = slice_update(begin = concat_44, begin_mask = v_cache2_internal_tensor_assign_7_begin_mask_0, end = concat_45, end_mask = v_cache2_internal_tensor_assign_7_end_mask_0, squeeze_mask = v_cache2_internal_tensor_assign_7_squeeze_mask_0, stride = v_cache2_internal_tensor_assign_7_stride_0, update = linear_13_cast_fp16, x = coreml_update_state_63)[name = string("v_cache2_internal_tensor_assign_7_cast_fp16")]; |
|
write_state(data = v_cache2_internal_tensor_assign_7_cast_fp16, input = v_cache2)[name = string("coreml_update_state_65_write_state")]; |
|
tensor<fp16, [24, 1, 1500, 1024]> coreml_update_state_65 = read_state(input = v_cache2)[name = string("coreml_update_state_65")]; |
|
tensor<fp16, [1024, 1024]> var_353_to_fp16 = const()[name = string("op_353_to_fp16"), val = tensor<fp16, [1024, 1024]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(51398144)))]; |
|
tensor<fp16, [1, ?, 1024]> linear_14_cast_fp16 = linear(bias = linear_0_bias_0_to_fp16, weight = var_353_to_fp16, x = audio_data)[name = string("linear_14_cast_fp16")]; |
|
tensor<fp16, [1024, 1024]> var_357_to_fp16 = const()[name = string("op_357_to_fp16"), val = tensor<fp16, [1024, 1024]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(53495360)))]; |
|
tensor<fp16, [1024]> var_358_to_fp16 = const()[name = string("op_358_to_fp16"), val = tensor<fp16, [1024]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(55592576)))]; |
|
tensor<fp16, [1, ?, 1024]> linear_15_cast_fp16 = linear(bias = var_358_to_fp16, weight = var_357_to_fp16, x = audio_data)[name = string("linear_15_cast_fp16")]; |
|
tensor<int32, [3]> var_360_shape_cast_fp16 = shape(x = linear_14_cast_fp16)[name = string("op_360_shape_cast_fp16")]; |
|
int32 gather_14_axis_0 = const()[name = string("gather_14_axis_0"), val = int32(0)]; |
|
int32 gather_14_batch_dims_0 = const()[name = string("gather_14_batch_dims_0"), val = int32(0)]; |
|
bool gather_14_validate_indices_0 = const()[name = string("gather_14_validate_indices_0"), val = bool(false)]; |
|
string var_360_shape_cast_fp16_to_uint16_dtype_0 = const()[name = string("op_360_shape_cast_fp16_to_uint16_dtype_0"), val = string("uint16")]; |
|
uint16 select_14_to_uint16 = const()[name = string("select_14_to_uint16"), val = uint16(1)]; |
|
tensor<uint16, [3]> var_360_shape_cast_fp16_to_uint16 = cast(dtype = var_360_shape_cast_fp16_to_uint16_dtype_0, x = var_360_shape_cast_fp16)[name = string("cast_123")]; |
|
uint16 gather_14_cast_uint16 = gather(axis = gather_14_axis_0, batch_dims = gather_14_batch_dims_0, indices = select_14_to_uint16, validate_indices = gather_14_validate_indices_0, x = var_360_shape_cast_fp16_to_uint16)[name = string("gather_14_cast_uint16")]; |
|
string gather_14_cast_uint16_to_int32_dtype_0 = const()[name = string("gather_14_cast_uint16_to_int32_dtype_0"), val = string("int32")]; |
|
tensor<int32, [1]> expand_dims_67_axes_0 = const()[name = string("expand_dims_67_axes_0"), val = tensor<int32, [1]>([0])]; |
|
int32 gather_14_cast_uint16_to_int32 = cast(dtype = gather_14_cast_uint16_to_int32_dtype_0, x = gather_14_cast_uint16)[name = string("cast_122")]; |
|
tensor<int32, [1]> expand_dims_67 = expand_dims(axes = expand_dims_67_axes_0, x = gather_14_cast_uint16_to_int32)[name = string("expand_dims_67")]; |
|
tensor<int32, [4]> concat_47 = const()[name = string("concat_47"), val = tensor<int32, [4]>([7, 0, 0, 0])]; |
|
tensor<int32, [1]> concat_48_values0_0 = const()[name = string("concat_48_values0_0"), val = tensor<int32, [1]>([0])]; |
|
tensor<int32, [1]> concat_48_values1_0 = const()[name = string("concat_48_values1_0"), val = tensor<int32, [1]>([0])]; |
|
tensor<int32, [1]> concat_48_values3_0 = const()[name = string("concat_48_values3_0"), val = tensor<int32, [1]>([0])]; |
|
int32 concat_48_axis_0 = const()[name = string("concat_48_axis_0"), val = int32(0)]; |
|
bool concat_48_interleave_0 = const()[name = string("concat_48_interleave_0"), val = bool(false)]; |
|
tensor<int32, [4]> concat_48 = concat(axis = concat_48_axis_0, interleave = concat_48_interleave_0, values = (concat_48_values0_0, concat_48_values1_0, expand_dims_67, concat_48_values3_0))[name = string("concat_48")]; |
|
tensor<int32, [4]> k_cache2_internal_tensor_assign_8_stride_0 = const()[name = string("k_cache2_internal_tensor_assign_8_stride_0"), val = tensor<int32, [4]>([1, 1, 1, 1])]; |
|
tensor<bool, [4]> k_cache2_internal_tensor_assign_8_begin_mask_0 = const()[name = string("k_cache2_internal_tensor_assign_8_begin_mask_0"), val = tensor<bool, [4]>([false, false, false, false])]; |
|
tensor<bool, [4]> k_cache2_internal_tensor_assign_8_end_mask_0 = const()[name = string("k_cache2_internal_tensor_assign_8_end_mask_0"), val = tensor<bool, [4]>([false, true, false, true])]; |
|
tensor<bool, [4]> k_cache2_internal_tensor_assign_8_squeeze_mask_0 = const()[name = string("k_cache2_internal_tensor_assign_8_squeeze_mask_0"), val = tensor<bool, [4]>([true, false, false, false])]; |
|
tensor<fp16, [24, 1, 1500, 1024]> k_cache2_internal_tensor_assign_8_cast_fp16 = slice_update(begin = concat_47, begin_mask = k_cache2_internal_tensor_assign_8_begin_mask_0, end = concat_48, end_mask = k_cache2_internal_tensor_assign_8_end_mask_0, squeeze_mask = k_cache2_internal_tensor_assign_8_squeeze_mask_0, stride = k_cache2_internal_tensor_assign_8_stride_0, update = linear_14_cast_fp16, x = coreml_update_state_64)[name = string("k_cache2_internal_tensor_assign_8_cast_fp16")]; |
|
write_state(data = k_cache2_internal_tensor_assign_8_cast_fp16, input = k_cache2)[name = string("coreml_update_state_66_write_state")]; |
|
tensor<fp16, [24, 1, 1500, 1024]> coreml_update_state_66 = read_state(input = k_cache2)[name = string("coreml_update_state_66")]; |
|
tensor<int32, [3]> var_365_shape_cast_fp16 = shape(x = linear_15_cast_fp16)[name = string("op_365_shape_cast_fp16")]; |
|
int32 gather_15_axis_0 = const()[name = string("gather_15_axis_0"), val = int32(0)]; |
|
int32 gather_15_batch_dims_0 = const()[name = string("gather_15_batch_dims_0"), val = int32(0)]; |
|
bool gather_15_validate_indices_0 = const()[name = string("gather_15_validate_indices_0"), val = bool(false)]; |
|
string var_365_shape_cast_fp16_to_uint16_dtype_0 = const()[name = string("op_365_shape_cast_fp16_to_uint16_dtype_0"), val = string("uint16")]; |
|
uint16 select_15_to_uint16 = const()[name = string("select_15_to_uint16"), val = uint16(1)]; |
|
tensor<uint16, [3]> var_365_shape_cast_fp16_to_uint16 = cast(dtype = var_365_shape_cast_fp16_to_uint16_dtype_0, x = var_365_shape_cast_fp16)[name = string("cast_121")]; |
|
uint16 gather_15_cast_uint16 = gather(axis = gather_15_axis_0, batch_dims = gather_15_batch_dims_0, indices = select_15_to_uint16, validate_indices = gather_15_validate_indices_0, x = var_365_shape_cast_fp16_to_uint16)[name = string("gather_15_cast_uint16")]; |
|
string gather_15_cast_uint16_to_int32_dtype_0 = const()[name = string("gather_15_cast_uint16_to_int32_dtype_0"), val = string("int32")]; |
|
tensor<int32, [1]> expand_dims_71_axes_0 = const()[name = string("expand_dims_71_axes_0"), val = tensor<int32, [1]>([0])]; |
|
int32 gather_15_cast_uint16_to_int32 = cast(dtype = gather_15_cast_uint16_to_int32_dtype_0, x = gather_15_cast_uint16)[name = string("cast_120")]; |
|
tensor<int32, [1]> expand_dims_71 = expand_dims(axes = expand_dims_71_axes_0, x = gather_15_cast_uint16_to_int32)[name = string("expand_dims_71")]; |
|
tensor<int32, [4]> concat_50 = const()[name = string("concat_50"), val = tensor<int32, [4]>([7, 0, 0, 0])]; |
|
tensor<int32, [1]> concat_51_values0_0 = const()[name = string("concat_51_values0_0"), val = tensor<int32, [1]>([0])]; |
|
tensor<int32, [1]> concat_51_values1_0 = const()[name = string("concat_51_values1_0"), val = tensor<int32, [1]>([0])]; |
|
tensor<int32, [1]> concat_51_values3_0 = const()[name = string("concat_51_values3_0"), val = tensor<int32, [1]>([0])]; |
|
int32 concat_51_axis_0 = const()[name = string("concat_51_axis_0"), val = int32(0)]; |
|
bool concat_51_interleave_0 = const()[name = string("concat_51_interleave_0"), val = bool(false)]; |
|
tensor<int32, [4]> concat_51 = concat(axis = concat_51_axis_0, interleave = concat_51_interleave_0, values = (concat_51_values0_0, concat_51_values1_0, expand_dims_71, concat_51_values3_0))[name = string("concat_51")]; |
|
tensor<int32, [4]> v_cache2_internal_tensor_assign_8_stride_0 = const()[name = string("v_cache2_internal_tensor_assign_8_stride_0"), val = tensor<int32, [4]>([1, 1, 1, 1])]; |
|
tensor<bool, [4]> v_cache2_internal_tensor_assign_8_begin_mask_0 = const()[name = string("v_cache2_internal_tensor_assign_8_begin_mask_0"), val = tensor<bool, [4]>([false, false, false, false])]; |
|
tensor<bool, [4]> v_cache2_internal_tensor_assign_8_end_mask_0 = const()[name = string("v_cache2_internal_tensor_assign_8_end_mask_0"), val = tensor<bool, [4]>([false, true, false, true])]; |
|
tensor<bool, [4]> v_cache2_internal_tensor_assign_8_squeeze_mask_0 = const()[name = string("v_cache2_internal_tensor_assign_8_squeeze_mask_0"), val = tensor<bool, [4]>([true, false, false, false])]; |
|
tensor<fp16, [24, 1, 1500, 1024]> v_cache2_internal_tensor_assign_8_cast_fp16 = slice_update(begin = concat_50, begin_mask = v_cache2_internal_tensor_assign_8_begin_mask_0, end = concat_51, end_mask = v_cache2_internal_tensor_assign_8_end_mask_0, squeeze_mask = v_cache2_internal_tensor_assign_8_squeeze_mask_0, stride = v_cache2_internal_tensor_assign_8_stride_0, update = linear_15_cast_fp16, x = coreml_update_state_65)[name = string("v_cache2_internal_tensor_assign_8_cast_fp16")]; |
|
write_state(data = v_cache2_internal_tensor_assign_8_cast_fp16, input = v_cache2)[name = string("coreml_update_state_67_write_state")]; |
|
tensor<fp16, [24, 1, 1500, 1024]> coreml_update_state_67 = read_state(input = v_cache2)[name = string("coreml_update_state_67")]; |
|
tensor<fp16, [1024, 1024]> var_387_to_fp16 = const()[name = string("op_387_to_fp16"), val = tensor<fp16, [1024, 1024]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(55594688)))]; |
|
tensor<fp16, [1, ?, 1024]> linear_16_cast_fp16 = linear(bias = linear_0_bias_0_to_fp16, weight = var_387_to_fp16, x = audio_data)[name = string("linear_16_cast_fp16")]; |
|
tensor<fp16, [1024, 1024]> var_391_to_fp16 = const()[name = string("op_391_to_fp16"), val = tensor<fp16, [1024, 1024]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(57691904)))]; |
|
tensor<fp16, [1024]> var_392_to_fp16 = const()[name = string("op_392_to_fp16"), val = tensor<fp16, [1024]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(59789120)))]; |
|
tensor<fp16, [1, ?, 1024]> linear_17_cast_fp16 = linear(bias = var_392_to_fp16, weight = var_391_to_fp16, x = audio_data)[name = string("linear_17_cast_fp16")]; |
|
tensor<int32, [3]> var_394_shape_cast_fp16 = shape(x = linear_16_cast_fp16)[name = string("op_394_shape_cast_fp16")]; |
|
int32 gather_16_axis_0 = const()[name = string("gather_16_axis_0"), val = int32(0)]; |
|
int32 gather_16_batch_dims_0 = const()[name = string("gather_16_batch_dims_0"), val = int32(0)]; |
|
bool gather_16_validate_indices_0 = const()[name = string("gather_16_validate_indices_0"), val = bool(false)]; |
|
string var_394_shape_cast_fp16_to_uint16_dtype_0 = const()[name = string("op_394_shape_cast_fp16_to_uint16_dtype_0"), val = string("uint16")]; |
|
uint16 select_16_to_uint16 = const()[name = string("select_16_to_uint16"), val = uint16(1)]; |
|
tensor<uint16, [3]> var_394_shape_cast_fp16_to_uint16 = cast(dtype = var_394_shape_cast_fp16_to_uint16_dtype_0, x = var_394_shape_cast_fp16)[name = string("cast_119")]; |
|
uint16 gather_16_cast_uint16 = gather(axis = gather_16_axis_0, batch_dims = gather_16_batch_dims_0, indices = select_16_to_uint16, validate_indices = gather_16_validate_indices_0, x = var_394_shape_cast_fp16_to_uint16)[name = string("gather_16_cast_uint16")]; |
|
string gather_16_cast_uint16_to_int32_dtype_0 = const()[name = string("gather_16_cast_uint16_to_int32_dtype_0"), val = string("int32")]; |
|
tensor<int32, [1]> expand_dims_75_axes_0 = const()[name = string("expand_dims_75_axes_0"), val = tensor<int32, [1]>([0])]; |
|
int32 gather_16_cast_uint16_to_int32 = cast(dtype = gather_16_cast_uint16_to_int32_dtype_0, x = gather_16_cast_uint16)[name = string("cast_118")]; |
|
tensor<int32, [1]> expand_dims_75 = expand_dims(axes = expand_dims_75_axes_0, x = gather_16_cast_uint16_to_int32)[name = string("expand_dims_75")]; |
|
tensor<int32, [4]> concat_53 = const()[name = string("concat_53"), val = tensor<int32, [4]>([8, 0, 0, 0])]; |
|
tensor<int32, [1]> concat_54_values0_0 = const()[name = string("concat_54_values0_0"), val = tensor<int32, [1]>([0])]; |
|
tensor<int32, [1]> concat_54_values1_0 = const()[name = string("concat_54_values1_0"), val = tensor<int32, [1]>([0])]; |
|
tensor<int32, [1]> concat_54_values3_0 = const()[name = string("concat_54_values3_0"), val = tensor<int32, [1]>([0])]; |
|
int32 concat_54_axis_0 = const()[name = string("concat_54_axis_0"), val = int32(0)]; |
|
bool concat_54_interleave_0 = const()[name = string("concat_54_interleave_0"), val = bool(false)]; |
|
tensor<int32, [4]> concat_54 = concat(axis = concat_54_axis_0, interleave = concat_54_interleave_0, values = (concat_54_values0_0, concat_54_values1_0, expand_dims_75, concat_54_values3_0))[name = string("concat_54")]; |
|
tensor<int32, [4]> k_cache2_internal_tensor_assign_9_stride_0 = const()[name = string("k_cache2_internal_tensor_assign_9_stride_0"), val = tensor<int32, [4]>([1, 1, 1, 1])]; |
|
tensor<bool, [4]> k_cache2_internal_tensor_assign_9_begin_mask_0 = const()[name = string("k_cache2_internal_tensor_assign_9_begin_mask_0"), val = tensor<bool, [4]>([false, false, false, false])]; |
|
tensor<bool, [4]> k_cache2_internal_tensor_assign_9_end_mask_0 = const()[name = string("k_cache2_internal_tensor_assign_9_end_mask_0"), val = tensor<bool, [4]>([false, true, false, true])]; |
|
tensor<bool, [4]> k_cache2_internal_tensor_assign_9_squeeze_mask_0 = const()[name = string("k_cache2_internal_tensor_assign_9_squeeze_mask_0"), val = tensor<bool, [4]>([true, false, false, false])]; |
|
tensor<fp16, [24, 1, 1500, 1024]> k_cache2_internal_tensor_assign_9_cast_fp16 = slice_update(begin = concat_53, begin_mask = k_cache2_internal_tensor_assign_9_begin_mask_0, end = concat_54, end_mask = k_cache2_internal_tensor_assign_9_end_mask_0, squeeze_mask = k_cache2_internal_tensor_assign_9_squeeze_mask_0, stride = k_cache2_internal_tensor_assign_9_stride_0, update = linear_16_cast_fp16, x = coreml_update_state_66)[name = string("k_cache2_internal_tensor_assign_9_cast_fp16")]; |
|
write_state(data = k_cache2_internal_tensor_assign_9_cast_fp16, input = k_cache2)[name = string("coreml_update_state_68_write_state")]; |
|
tensor<fp16, [24, 1, 1500, 1024]> coreml_update_state_68 = read_state(input = k_cache2)[name = string("coreml_update_state_68")]; |
|
tensor<int32, [3]> var_399_shape_cast_fp16 = shape(x = linear_17_cast_fp16)[name = string("op_399_shape_cast_fp16")]; |
|
int32 gather_17_axis_0 = const()[name = string("gather_17_axis_0"), val = int32(0)]; |
|
int32 gather_17_batch_dims_0 = const()[name = string("gather_17_batch_dims_0"), val = int32(0)]; |
|
bool gather_17_validate_indices_0 = const()[name = string("gather_17_validate_indices_0"), val = bool(false)]; |
|
string var_399_shape_cast_fp16_to_uint16_dtype_0 = const()[name = string("op_399_shape_cast_fp16_to_uint16_dtype_0"), val = string("uint16")]; |
|
uint16 select_17_to_uint16 = const()[name = string("select_17_to_uint16"), val = uint16(1)]; |
|
tensor<uint16, [3]> var_399_shape_cast_fp16_to_uint16 = cast(dtype = var_399_shape_cast_fp16_to_uint16_dtype_0, x = var_399_shape_cast_fp16)[name = string("cast_117")]; |
|
uint16 gather_17_cast_uint16 = gather(axis = gather_17_axis_0, batch_dims = gather_17_batch_dims_0, indices = select_17_to_uint16, validate_indices = gather_17_validate_indices_0, x = var_399_shape_cast_fp16_to_uint16)[name = string("gather_17_cast_uint16")]; |
|
string gather_17_cast_uint16_to_int32_dtype_0 = const()[name = string("gather_17_cast_uint16_to_int32_dtype_0"), val = string("int32")]; |
|
tensor<int32, [1]> expand_dims_79_axes_0 = const()[name = string("expand_dims_79_axes_0"), val = tensor<int32, [1]>([0])]; |
|
int32 gather_17_cast_uint16_to_int32 = cast(dtype = gather_17_cast_uint16_to_int32_dtype_0, x = gather_17_cast_uint16)[name = string("cast_116")]; |
|
tensor<int32, [1]> expand_dims_79 = expand_dims(axes = expand_dims_79_axes_0, x = gather_17_cast_uint16_to_int32)[name = string("expand_dims_79")]; |
|
tensor<int32, [4]> concat_56 = const()[name = string("concat_56"), val = tensor<int32, [4]>([8, 0, 0, 0])]; |
|
tensor<int32, [1]> concat_57_values0_0 = const()[name = string("concat_57_values0_0"), val = tensor<int32, [1]>([0])]; |
|
tensor<int32, [1]> concat_57_values1_0 = const()[name = string("concat_57_values1_0"), val = tensor<int32, [1]>([0])]; |
|
tensor<int32, [1]> concat_57_values3_0 = const()[name = string("concat_57_values3_0"), val = tensor<int32, [1]>([0])]; |
|
int32 concat_57_axis_0 = const()[name = string("concat_57_axis_0"), val = int32(0)]; |
|
bool concat_57_interleave_0 = const()[name = string("concat_57_interleave_0"), val = bool(false)]; |
|
tensor<int32, [4]> concat_57 = concat(axis = concat_57_axis_0, interleave = concat_57_interleave_0, values = (concat_57_values0_0, concat_57_values1_0, expand_dims_79, concat_57_values3_0))[name = string("concat_57")]; |
|
tensor<int32, [4]> v_cache2_internal_tensor_assign_9_stride_0 = const()[name = string("v_cache2_internal_tensor_assign_9_stride_0"), val = tensor<int32, [4]>([1, 1, 1, 1])]; |
|
tensor<bool, [4]> v_cache2_internal_tensor_assign_9_begin_mask_0 = const()[name = string("v_cache2_internal_tensor_assign_9_begin_mask_0"), val = tensor<bool, [4]>([false, false, false, false])]; |
|
tensor<bool, [4]> v_cache2_internal_tensor_assign_9_end_mask_0 = const()[name = string("v_cache2_internal_tensor_assign_9_end_mask_0"), val = tensor<bool, [4]>([false, true, false, true])]; |
|
tensor<bool, [4]> v_cache2_internal_tensor_assign_9_squeeze_mask_0 = const()[name = string("v_cache2_internal_tensor_assign_9_squeeze_mask_0"), val = tensor<bool, [4]>([true, false, false, false])]; |
|
tensor<fp16, [24, 1, 1500, 1024]> v_cache2_internal_tensor_assign_9_cast_fp16 = slice_update(begin = concat_56, begin_mask = v_cache2_internal_tensor_assign_9_begin_mask_0, end = concat_57, end_mask = v_cache2_internal_tensor_assign_9_end_mask_0, squeeze_mask = v_cache2_internal_tensor_assign_9_squeeze_mask_0, stride = v_cache2_internal_tensor_assign_9_stride_0, update = linear_17_cast_fp16, x = coreml_update_state_67)[name = string("v_cache2_internal_tensor_assign_9_cast_fp16")]; |
|
write_state(data = v_cache2_internal_tensor_assign_9_cast_fp16, input = v_cache2)[name = string("coreml_update_state_69_write_state")]; |
|
tensor<fp16, [24, 1, 1500, 1024]> coreml_update_state_69 = read_state(input = v_cache2)[name = string("coreml_update_state_69")]; |
|
tensor<fp16, [1024, 1024]> var_421_to_fp16 = const()[name = string("op_421_to_fp16"), val = tensor<fp16, [1024, 1024]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(59791232)))]; |
|
tensor<fp16, [1, ?, 1024]> linear_18_cast_fp16 = linear(bias = linear_0_bias_0_to_fp16, weight = var_421_to_fp16, x = audio_data)[name = string("linear_18_cast_fp16")]; |
|
tensor<fp16, [1024, 1024]> var_425_to_fp16 = const()[name = string("op_425_to_fp16"), val = tensor<fp16, [1024, 1024]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(61888448)))]; |
|
tensor<fp16, [1024]> var_426_to_fp16 = const()[name = string("op_426_to_fp16"), val = tensor<fp16, [1024]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(63985664)))]; |
|
tensor<fp16, [1, ?, 1024]> linear_19_cast_fp16 = linear(bias = var_426_to_fp16, weight = var_425_to_fp16, x = audio_data)[name = string("linear_19_cast_fp16")]; |
|
tensor<int32, [3]> var_428_shape_cast_fp16 = shape(x = linear_18_cast_fp16)[name = string("op_428_shape_cast_fp16")]; |
|
int32 gather_18_axis_0 = const()[name = string("gather_18_axis_0"), val = int32(0)]; |
|
int32 gather_18_batch_dims_0 = const()[name = string("gather_18_batch_dims_0"), val = int32(0)]; |
|
bool gather_18_validate_indices_0 = const()[name = string("gather_18_validate_indices_0"), val = bool(false)]; |
|
string var_428_shape_cast_fp16_to_uint16_dtype_0 = const()[name = string("op_428_shape_cast_fp16_to_uint16_dtype_0"), val = string("uint16")]; |
|
uint16 select_18_to_uint16 = const()[name = string("select_18_to_uint16"), val = uint16(1)]; |
|
tensor<uint16, [3]> var_428_shape_cast_fp16_to_uint16 = cast(dtype = var_428_shape_cast_fp16_to_uint16_dtype_0, x = var_428_shape_cast_fp16)[name = string("cast_115")]; |
|
uint16 gather_18_cast_uint16 = gather(axis = gather_18_axis_0, batch_dims = gather_18_batch_dims_0, indices = select_18_to_uint16, validate_indices = gather_18_validate_indices_0, x = var_428_shape_cast_fp16_to_uint16)[name = string("gather_18_cast_uint16")]; |
|
string gather_18_cast_uint16_to_int32_dtype_0 = const()[name = string("gather_18_cast_uint16_to_int32_dtype_0"), val = string("int32")]; |
|
tensor<int32, [1]> expand_dims_83_axes_0 = const()[name = string("expand_dims_83_axes_0"), val = tensor<int32, [1]>([0])]; |
|
int32 gather_18_cast_uint16_to_int32 = cast(dtype = gather_18_cast_uint16_to_int32_dtype_0, x = gather_18_cast_uint16)[name = string("cast_114")]; |
|
tensor<int32, [1]> expand_dims_83 = expand_dims(axes = expand_dims_83_axes_0, x = gather_18_cast_uint16_to_int32)[name = string("expand_dims_83")]; |
|
tensor<int32, [4]> concat_59 = const()[name = string("concat_59"), val = tensor<int32, [4]>([9, 0, 0, 0])]; |
|
tensor<int32, [1]> concat_60_values0_0 = const()[name = string("concat_60_values0_0"), val = tensor<int32, [1]>([0])]; |
|
tensor<int32, [1]> concat_60_values1_0 = const()[name = string("concat_60_values1_0"), val = tensor<int32, [1]>([0])]; |
|
tensor<int32, [1]> concat_60_values3_0 = const()[name = string("concat_60_values3_0"), val = tensor<int32, [1]>([0])]; |
|
int32 concat_60_axis_0 = const()[name = string("concat_60_axis_0"), val = int32(0)]; |
|
bool concat_60_interleave_0 = const()[name = string("concat_60_interleave_0"), val = bool(false)]; |
|
tensor<int32, [4]> concat_60 = concat(axis = concat_60_axis_0, interleave = concat_60_interleave_0, values = (concat_60_values0_0, concat_60_values1_0, expand_dims_83, concat_60_values3_0))[name = string("concat_60")]; |
|
tensor<int32, [4]> k_cache2_internal_tensor_assign_10_stride_0 = const()[name = string("k_cache2_internal_tensor_assign_10_stride_0"), val = tensor<int32, [4]>([1, 1, 1, 1])]; |
|
tensor<bool, [4]> k_cache2_internal_tensor_assign_10_begin_mask_0 = const()[name = string("k_cache2_internal_tensor_assign_10_begin_mask_0"), val = tensor<bool, [4]>([false, false, false, false])]; |
|
tensor<bool, [4]> k_cache2_internal_tensor_assign_10_end_mask_0 = const()[name = string("k_cache2_internal_tensor_assign_10_end_mask_0"), val = tensor<bool, [4]>([false, true, false, true])]; |
|
tensor<bool, [4]> k_cache2_internal_tensor_assign_10_squeeze_mask_0 = const()[name = string("k_cache2_internal_tensor_assign_10_squeeze_mask_0"), val = tensor<bool, [4]>([true, false, false, false])]; |
|
tensor<fp16, [24, 1, 1500, 1024]> k_cache2_internal_tensor_assign_10_cast_fp16 = slice_update(begin = concat_59, begin_mask = k_cache2_internal_tensor_assign_10_begin_mask_0, end = concat_60, end_mask = k_cache2_internal_tensor_assign_10_end_mask_0, squeeze_mask = k_cache2_internal_tensor_assign_10_squeeze_mask_0, stride = k_cache2_internal_tensor_assign_10_stride_0, update = linear_18_cast_fp16, x = coreml_update_state_68)[name = string("k_cache2_internal_tensor_assign_10_cast_fp16")]; |
|
write_state(data = k_cache2_internal_tensor_assign_10_cast_fp16, input = k_cache2)[name = string("coreml_update_state_70_write_state")]; |
|
tensor<fp16, [24, 1, 1500, 1024]> coreml_update_state_70 = read_state(input = k_cache2)[name = string("coreml_update_state_70")]; |
|
tensor<int32, [3]> var_433_shape_cast_fp16 = shape(x = linear_19_cast_fp16)[name = string("op_433_shape_cast_fp16")]; |
|
int32 gather_19_axis_0 = const()[name = string("gather_19_axis_0"), val = int32(0)]; |
|
int32 gather_19_batch_dims_0 = const()[name = string("gather_19_batch_dims_0"), val = int32(0)]; |
|
bool gather_19_validate_indices_0 = const()[name = string("gather_19_validate_indices_0"), val = bool(false)]; |
|
string var_433_shape_cast_fp16_to_uint16_dtype_0 = const()[name = string("op_433_shape_cast_fp16_to_uint16_dtype_0"), val = string("uint16")]; |
|
uint16 select_19_to_uint16 = const()[name = string("select_19_to_uint16"), val = uint16(1)]; |
|
tensor<uint16, [3]> var_433_shape_cast_fp16_to_uint16 = cast(dtype = var_433_shape_cast_fp16_to_uint16_dtype_0, x = var_433_shape_cast_fp16)[name = string("cast_113")]; |
|
uint16 gather_19_cast_uint16 = gather(axis = gather_19_axis_0, batch_dims = gather_19_batch_dims_0, indices = select_19_to_uint16, validate_indices = gather_19_validate_indices_0, x = var_433_shape_cast_fp16_to_uint16)[name = string("gather_19_cast_uint16")]; |
|
string gather_19_cast_uint16_to_int32_dtype_0 = const()[name = string("gather_19_cast_uint16_to_int32_dtype_0"), val = string("int32")]; |
|
tensor<int32, [1]> expand_dims_87_axes_0 = const()[name = string("expand_dims_87_axes_0"), val = tensor<int32, [1]>([0])]; |
|
int32 gather_19_cast_uint16_to_int32 = cast(dtype = gather_19_cast_uint16_to_int32_dtype_0, x = gather_19_cast_uint16)[name = string("cast_112")]; |
|
tensor<int32, [1]> expand_dims_87 = expand_dims(axes = expand_dims_87_axes_0, x = gather_19_cast_uint16_to_int32)[name = string("expand_dims_87")]; |
|
tensor<int32, [4]> concat_62 = const()[name = string("concat_62"), val = tensor<int32, [4]>([9, 0, 0, 0])]; |
|
tensor<int32, [1]> concat_63_values0_0 = const()[name = string("concat_63_values0_0"), val = tensor<int32, [1]>([0])]; |
|
tensor<int32, [1]> concat_63_values1_0 = const()[name = string("concat_63_values1_0"), val = tensor<int32, [1]>([0])]; |
|
tensor<int32, [1]> concat_63_values3_0 = const()[name = string("concat_63_values3_0"), val = tensor<int32, [1]>([0])]; |
|
int32 concat_63_axis_0 = const()[name = string("concat_63_axis_0"), val = int32(0)]; |
|
bool concat_63_interleave_0 = const()[name = string("concat_63_interleave_0"), val = bool(false)]; |
|
tensor<int32, [4]> concat_63 = concat(axis = concat_63_axis_0, interleave = concat_63_interleave_0, values = (concat_63_values0_0, concat_63_values1_0, expand_dims_87, concat_63_values3_0))[name = string("concat_63")]; |
|
tensor<int32, [4]> v_cache2_internal_tensor_assign_10_stride_0 = const()[name = string("v_cache2_internal_tensor_assign_10_stride_0"), val = tensor<int32, [4]>([1, 1, 1, 1])]; |
|
tensor<bool, [4]> v_cache2_internal_tensor_assign_10_begin_mask_0 = const()[name = string("v_cache2_internal_tensor_assign_10_begin_mask_0"), val = tensor<bool, [4]>([false, false, false, false])]; |
|
tensor<bool, [4]> v_cache2_internal_tensor_assign_10_end_mask_0 = const()[name = string("v_cache2_internal_tensor_assign_10_end_mask_0"), val = tensor<bool, [4]>([false, true, false, true])]; |
|
tensor<bool, [4]> v_cache2_internal_tensor_assign_10_squeeze_mask_0 = const()[name = string("v_cache2_internal_tensor_assign_10_squeeze_mask_0"), val = tensor<bool, [4]>([true, false, false, false])]; |
|
tensor<fp16, [24, 1, 1500, 1024]> v_cache2_internal_tensor_assign_10_cast_fp16 = slice_update(begin = concat_62, begin_mask = v_cache2_internal_tensor_assign_10_begin_mask_0, end = concat_63, end_mask = v_cache2_internal_tensor_assign_10_end_mask_0, squeeze_mask = v_cache2_internal_tensor_assign_10_squeeze_mask_0, stride = v_cache2_internal_tensor_assign_10_stride_0, update = linear_19_cast_fp16, x = coreml_update_state_69)[name = string("v_cache2_internal_tensor_assign_10_cast_fp16")]; |
|
write_state(data = v_cache2_internal_tensor_assign_10_cast_fp16, input = v_cache2)[name = string("coreml_update_state_71_write_state")]; |
|
tensor<fp16, [24, 1, 1500, 1024]> coreml_update_state_71 = read_state(input = v_cache2)[name = string("coreml_update_state_71")]; |
|
tensor<fp16, [1024, 1024]> var_455_to_fp16 = const()[name = string("op_455_to_fp16"), val = tensor<fp16, [1024, 1024]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(63987776)))]; |
|
tensor<fp16, [1, ?, 1024]> linear_20_cast_fp16 = linear(bias = linear_0_bias_0_to_fp16, weight = var_455_to_fp16, x = audio_data)[name = string("linear_20_cast_fp16")]; |
|
tensor<fp16, [1024, 1024]> var_459_to_fp16 = const()[name = string("op_459_to_fp16"), val = tensor<fp16, [1024, 1024]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(66084992)))]; |
|
tensor<fp16, [1024]> var_460_to_fp16 = const()[name = string("op_460_to_fp16"), val = tensor<fp16, [1024]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(68182208)))]; |
|
tensor<fp16, [1, ?, 1024]> linear_21_cast_fp16 = linear(bias = var_460_to_fp16, weight = var_459_to_fp16, x = audio_data)[name = string("linear_21_cast_fp16")]; |
|
tensor<int32, [3]> var_462_shape_cast_fp16 = shape(x = linear_20_cast_fp16)[name = string("op_462_shape_cast_fp16")]; |
|
int32 gather_20_axis_0 = const()[name = string("gather_20_axis_0"), val = int32(0)]; |
|
int32 gather_20_batch_dims_0 = const()[name = string("gather_20_batch_dims_0"), val = int32(0)]; |
|
bool gather_20_validate_indices_0 = const()[name = string("gather_20_validate_indices_0"), val = bool(false)]; |
|
string var_462_shape_cast_fp16_to_uint16_dtype_0 = const()[name = string("op_462_shape_cast_fp16_to_uint16_dtype_0"), val = string("uint16")]; |
|
uint16 select_20_to_uint16 = const()[name = string("select_20_to_uint16"), val = uint16(1)]; |
|
tensor<uint16, [3]> var_462_shape_cast_fp16_to_uint16 = cast(dtype = var_462_shape_cast_fp16_to_uint16_dtype_0, x = var_462_shape_cast_fp16)[name = string("cast_111")]; |
|
uint16 gather_20_cast_uint16 = gather(axis = gather_20_axis_0, batch_dims = gather_20_batch_dims_0, indices = select_20_to_uint16, validate_indices = gather_20_validate_indices_0, x = var_462_shape_cast_fp16_to_uint16)[name = string("gather_20_cast_uint16")]; |
|
string gather_20_cast_uint16_to_int32_dtype_0 = const()[name = string("gather_20_cast_uint16_to_int32_dtype_0"), val = string("int32")]; |
|
tensor<int32, [1]> expand_dims_91_axes_0 = const()[name = string("expand_dims_91_axes_0"), val = tensor<int32, [1]>([0])]; |
|
int32 gather_20_cast_uint16_to_int32 = cast(dtype = gather_20_cast_uint16_to_int32_dtype_0, x = gather_20_cast_uint16)[name = string("cast_110")]; |
|
tensor<int32, [1]> expand_dims_91 = expand_dims(axes = expand_dims_91_axes_0, x = gather_20_cast_uint16_to_int32)[name = string("expand_dims_91")]; |
|
tensor<int32, [4]> concat_65 = const()[name = string("concat_65"), val = tensor<int32, [4]>([10, 0, 0, 0])]; |
|
tensor<int32, [1]> concat_66_values0_0 = const()[name = string("concat_66_values0_0"), val = tensor<int32, [1]>([0])]; |
|
tensor<int32, [1]> concat_66_values1_0 = const()[name = string("concat_66_values1_0"), val = tensor<int32, [1]>([0])]; |
|
tensor<int32, [1]> concat_66_values3_0 = const()[name = string("concat_66_values3_0"), val = tensor<int32, [1]>([0])]; |
|
int32 concat_66_axis_0 = const()[name = string("concat_66_axis_0"), val = int32(0)]; |
|
bool concat_66_interleave_0 = const()[name = string("concat_66_interleave_0"), val = bool(false)]; |
|
tensor<int32, [4]> concat_66 = concat(axis = concat_66_axis_0, interleave = concat_66_interleave_0, values = (concat_66_values0_0, concat_66_values1_0, expand_dims_91, concat_66_values3_0))[name = string("concat_66")]; |
|
tensor<int32, [4]> k_cache2_internal_tensor_assign_11_stride_0 = const()[name = string("k_cache2_internal_tensor_assign_11_stride_0"), val = tensor<int32, [4]>([1, 1, 1, 1])]; |
|
tensor<bool, [4]> k_cache2_internal_tensor_assign_11_begin_mask_0 = const()[name = string("k_cache2_internal_tensor_assign_11_begin_mask_0"), val = tensor<bool, [4]>([false, false, false, false])]; |
|
tensor<bool, [4]> k_cache2_internal_tensor_assign_11_end_mask_0 = const()[name = string("k_cache2_internal_tensor_assign_11_end_mask_0"), val = tensor<bool, [4]>([false, true, false, true])]; |
|
tensor<bool, [4]> k_cache2_internal_tensor_assign_11_squeeze_mask_0 = const()[name = string("k_cache2_internal_tensor_assign_11_squeeze_mask_0"), val = tensor<bool, [4]>([true, false, false, false])]; |
|
tensor<fp16, [24, 1, 1500, 1024]> k_cache2_internal_tensor_assign_11_cast_fp16 = slice_update(begin = concat_65, begin_mask = k_cache2_internal_tensor_assign_11_begin_mask_0, end = concat_66, end_mask = k_cache2_internal_tensor_assign_11_end_mask_0, squeeze_mask = k_cache2_internal_tensor_assign_11_squeeze_mask_0, stride = k_cache2_internal_tensor_assign_11_stride_0, update = linear_20_cast_fp16, x = coreml_update_state_70)[name = string("k_cache2_internal_tensor_assign_11_cast_fp16")]; |
|
write_state(data = k_cache2_internal_tensor_assign_11_cast_fp16, input = k_cache2)[name = string("coreml_update_state_72_write_state")]; |
|
tensor<fp16, [24, 1, 1500, 1024]> coreml_update_state_72 = read_state(input = k_cache2)[name = string("coreml_update_state_72")]; |
|
tensor<int32, [3]> var_467_shape_cast_fp16 = shape(x = linear_21_cast_fp16)[name = string("op_467_shape_cast_fp16")]; |
|
int32 gather_21_axis_0 = const()[name = string("gather_21_axis_0"), val = int32(0)]; |
|
int32 gather_21_batch_dims_0 = const()[name = string("gather_21_batch_dims_0"), val = int32(0)]; |
|
bool gather_21_validate_indices_0 = const()[name = string("gather_21_validate_indices_0"), val = bool(false)]; |
|
string var_467_shape_cast_fp16_to_uint16_dtype_0 = const()[name = string("op_467_shape_cast_fp16_to_uint16_dtype_0"), val = string("uint16")]; |
|
uint16 select_21_to_uint16 = const()[name = string("select_21_to_uint16"), val = uint16(1)]; |
|
tensor<uint16, [3]> var_467_shape_cast_fp16_to_uint16 = cast(dtype = var_467_shape_cast_fp16_to_uint16_dtype_0, x = var_467_shape_cast_fp16)[name = string("cast_109")]; |
|
uint16 gather_21_cast_uint16 = gather(axis = gather_21_axis_0, batch_dims = gather_21_batch_dims_0, indices = select_21_to_uint16, validate_indices = gather_21_validate_indices_0, x = var_467_shape_cast_fp16_to_uint16)[name = string("gather_21_cast_uint16")]; |
|
string gather_21_cast_uint16_to_int32_dtype_0 = const()[name = string("gather_21_cast_uint16_to_int32_dtype_0"), val = string("int32")]; |
|
tensor<int32, [1]> expand_dims_95_axes_0 = const()[name = string("expand_dims_95_axes_0"), val = tensor<int32, [1]>([0])]; |
|
int32 gather_21_cast_uint16_to_int32 = cast(dtype = gather_21_cast_uint16_to_int32_dtype_0, x = gather_21_cast_uint16)[name = string("cast_108")]; |
|
tensor<int32, [1]> expand_dims_95 = expand_dims(axes = expand_dims_95_axes_0, x = gather_21_cast_uint16_to_int32)[name = string("expand_dims_95")]; |
|
tensor<int32, [4]> concat_68 = const()[name = string("concat_68"), val = tensor<int32, [4]>([10, 0, 0, 0])]; |
|
tensor<int32, [1]> concat_69_values0_0 = const()[name = string("concat_69_values0_0"), val = tensor<int32, [1]>([0])]; |
|
tensor<int32, [1]> concat_69_values1_0 = const()[name = string("concat_69_values1_0"), val = tensor<int32, [1]>([0])]; |
|
tensor<int32, [1]> concat_69_values3_0 = const()[name = string("concat_69_values3_0"), val = tensor<int32, [1]>([0])]; |
|
int32 concat_69_axis_0 = const()[name = string("concat_69_axis_0"), val = int32(0)]; |
|
bool concat_69_interleave_0 = const()[name = string("concat_69_interleave_0"), val = bool(false)]; |
|
tensor<int32, [4]> concat_69 = concat(axis = concat_69_axis_0, interleave = concat_69_interleave_0, values = (concat_69_values0_0, concat_69_values1_0, expand_dims_95, concat_69_values3_0))[name = string("concat_69")]; |
|
tensor<int32, [4]> v_cache2_internal_tensor_assign_11_stride_0 = const()[name = string("v_cache2_internal_tensor_assign_11_stride_0"), val = tensor<int32, [4]>([1, 1, 1, 1])]; |
|
tensor<bool, [4]> v_cache2_internal_tensor_assign_11_begin_mask_0 = const()[name = string("v_cache2_internal_tensor_assign_11_begin_mask_0"), val = tensor<bool, [4]>([false, false, false, false])]; |
|
tensor<bool, [4]> v_cache2_internal_tensor_assign_11_end_mask_0 = const()[name = string("v_cache2_internal_tensor_assign_11_end_mask_0"), val = tensor<bool, [4]>([false, true, false, true])]; |
|
tensor<bool, [4]> v_cache2_internal_tensor_assign_11_squeeze_mask_0 = const()[name = string("v_cache2_internal_tensor_assign_11_squeeze_mask_0"), val = tensor<bool, [4]>([true, false, false, false])]; |
|
tensor<fp16, [24, 1, 1500, 1024]> v_cache2_internal_tensor_assign_11_cast_fp16 = slice_update(begin = concat_68, begin_mask = v_cache2_internal_tensor_assign_11_begin_mask_0, end = concat_69, end_mask = v_cache2_internal_tensor_assign_11_end_mask_0, squeeze_mask = v_cache2_internal_tensor_assign_11_squeeze_mask_0, stride = v_cache2_internal_tensor_assign_11_stride_0, update = linear_21_cast_fp16, x = coreml_update_state_71)[name = string("v_cache2_internal_tensor_assign_11_cast_fp16")]; |
|
write_state(data = v_cache2_internal_tensor_assign_11_cast_fp16, input = v_cache2)[name = string("coreml_update_state_73_write_state")]; |
|
tensor<fp16, [24, 1, 1500, 1024]> coreml_update_state_73 = read_state(input = v_cache2)[name = string("coreml_update_state_73")]; |
|
tensor<fp16, [1024, 1024]> var_489_to_fp16 = const()[name = string("op_489_to_fp16"), val = tensor<fp16, [1024, 1024]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(68184320)))]; |
|
tensor<fp16, [1, ?, 1024]> linear_22_cast_fp16 = linear(bias = linear_0_bias_0_to_fp16, weight = var_489_to_fp16, x = audio_data)[name = string("linear_22_cast_fp16")]; |
|
tensor<fp16, [1024, 1024]> var_493_to_fp16 = const()[name = string("op_493_to_fp16"), val = tensor<fp16, [1024, 1024]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(70281536)))]; |
|
tensor<fp16, [1024]> var_494_to_fp16 = const()[name = string("op_494_to_fp16"), val = tensor<fp16, [1024]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(72378752)))]; |
|
tensor<fp16, [1, ?, 1024]> linear_23_cast_fp16 = linear(bias = var_494_to_fp16, weight = var_493_to_fp16, x = audio_data)[name = string("linear_23_cast_fp16")]; |
|
tensor<int32, [3]> var_496_shape_cast_fp16 = shape(x = linear_22_cast_fp16)[name = string("op_496_shape_cast_fp16")]; |
|
int32 gather_22_axis_0 = const()[name = string("gather_22_axis_0"), val = int32(0)]; |
|
int32 gather_22_batch_dims_0 = const()[name = string("gather_22_batch_dims_0"), val = int32(0)]; |
|
bool gather_22_validate_indices_0 = const()[name = string("gather_22_validate_indices_0"), val = bool(false)]; |
|
string var_496_shape_cast_fp16_to_uint16_dtype_0 = const()[name = string("op_496_shape_cast_fp16_to_uint16_dtype_0"), val = string("uint16")]; |
|
uint16 select_22_to_uint16 = const()[name = string("select_22_to_uint16"), val = uint16(1)]; |
|
tensor<uint16, [3]> var_496_shape_cast_fp16_to_uint16 = cast(dtype = var_496_shape_cast_fp16_to_uint16_dtype_0, x = var_496_shape_cast_fp16)[name = string("cast_107")]; |
|
uint16 gather_22_cast_uint16 = gather(axis = gather_22_axis_0, batch_dims = gather_22_batch_dims_0, indices = select_22_to_uint16, validate_indices = gather_22_validate_indices_0, x = var_496_shape_cast_fp16_to_uint16)[name = string("gather_22_cast_uint16")]; |
|
string gather_22_cast_uint16_to_int32_dtype_0 = const()[name = string("gather_22_cast_uint16_to_int32_dtype_0"), val = string("int32")]; |
|
tensor<int32, [1]> expand_dims_99_axes_0 = const()[name = string("expand_dims_99_axes_0"), val = tensor<int32, [1]>([0])]; |
|
int32 gather_22_cast_uint16_to_int32 = cast(dtype = gather_22_cast_uint16_to_int32_dtype_0, x = gather_22_cast_uint16)[name = string("cast_106")]; |
|
tensor<int32, [1]> expand_dims_99 = expand_dims(axes = expand_dims_99_axes_0, x = gather_22_cast_uint16_to_int32)[name = string("expand_dims_99")]; |
|
tensor<int32, [4]> concat_71 = const()[name = string("concat_71"), val = tensor<int32, [4]>([11, 0, 0, 0])]; |
|
tensor<int32, [1]> concat_72_values0_0 = const()[name = string("concat_72_values0_0"), val = tensor<int32, [1]>([0])]; |
|
tensor<int32, [1]> concat_72_values1_0 = const()[name = string("concat_72_values1_0"), val = tensor<int32, [1]>([0])]; |
|
tensor<int32, [1]> concat_72_values3_0 = const()[name = string("concat_72_values3_0"), val = tensor<int32, [1]>([0])]; |
|
int32 concat_72_axis_0 = const()[name = string("concat_72_axis_0"), val = int32(0)]; |
|
bool concat_72_interleave_0 = const()[name = string("concat_72_interleave_0"), val = bool(false)]; |
|
tensor<int32, [4]> concat_72 = concat(axis = concat_72_axis_0, interleave = concat_72_interleave_0, values = (concat_72_values0_0, concat_72_values1_0, expand_dims_99, concat_72_values3_0))[name = string("concat_72")]; |
|
tensor<int32, [4]> k_cache2_internal_tensor_assign_12_stride_0 = const()[name = string("k_cache2_internal_tensor_assign_12_stride_0"), val = tensor<int32, [4]>([1, 1, 1, 1])]; |
|
tensor<bool, [4]> k_cache2_internal_tensor_assign_12_begin_mask_0 = const()[name = string("k_cache2_internal_tensor_assign_12_begin_mask_0"), val = tensor<bool, [4]>([false, false, false, false])]; |
|
tensor<bool, [4]> k_cache2_internal_tensor_assign_12_end_mask_0 = const()[name = string("k_cache2_internal_tensor_assign_12_end_mask_0"), val = tensor<bool, [4]>([false, true, false, true])]; |
|
tensor<bool, [4]> k_cache2_internal_tensor_assign_12_squeeze_mask_0 = const()[name = string("k_cache2_internal_tensor_assign_12_squeeze_mask_0"), val = tensor<bool, [4]>([true, false, false, false])]; |
|
tensor<fp16, [24, 1, 1500, 1024]> k_cache2_internal_tensor_assign_12_cast_fp16 = slice_update(begin = concat_71, begin_mask = k_cache2_internal_tensor_assign_12_begin_mask_0, end = concat_72, end_mask = k_cache2_internal_tensor_assign_12_end_mask_0, squeeze_mask = k_cache2_internal_tensor_assign_12_squeeze_mask_0, stride = k_cache2_internal_tensor_assign_12_stride_0, update = linear_22_cast_fp16, x = coreml_update_state_72)[name = string("k_cache2_internal_tensor_assign_12_cast_fp16")]; |
|
write_state(data = k_cache2_internal_tensor_assign_12_cast_fp16, input = k_cache2)[name = string("coreml_update_state_74_write_state")]; |
|
tensor<fp16, [24, 1, 1500, 1024]> coreml_update_state_74 = read_state(input = k_cache2)[name = string("coreml_update_state_74")]; |
|
tensor<int32, [3]> var_501_shape_cast_fp16 = shape(x = linear_23_cast_fp16)[name = string("op_501_shape_cast_fp16")]; |
|
int32 gather_23_axis_0 = const()[name = string("gather_23_axis_0"), val = int32(0)]; |
|
int32 gather_23_batch_dims_0 = const()[name = string("gather_23_batch_dims_0"), val = int32(0)]; |
|
bool gather_23_validate_indices_0 = const()[name = string("gather_23_validate_indices_0"), val = bool(false)]; |
|
string var_501_shape_cast_fp16_to_uint16_dtype_0 = const()[name = string("op_501_shape_cast_fp16_to_uint16_dtype_0"), val = string("uint16")]; |
|
uint16 select_23_to_uint16 = const()[name = string("select_23_to_uint16"), val = uint16(1)]; |
|
tensor<uint16, [3]> var_501_shape_cast_fp16_to_uint16 = cast(dtype = var_501_shape_cast_fp16_to_uint16_dtype_0, x = var_501_shape_cast_fp16)[name = string("cast_105")]; |
|
uint16 gather_23_cast_uint16 = gather(axis = gather_23_axis_0, batch_dims = gather_23_batch_dims_0, indices = select_23_to_uint16, validate_indices = gather_23_validate_indices_0, x = var_501_shape_cast_fp16_to_uint16)[name = string("gather_23_cast_uint16")]; |
|
string gather_23_cast_uint16_to_int32_dtype_0 = const()[name = string("gather_23_cast_uint16_to_int32_dtype_0"), val = string("int32")]; |
|
tensor<int32, [1]> expand_dims_103_axes_0 = const()[name = string("expand_dims_103_axes_0"), val = tensor<int32, [1]>([0])]; |
|
int32 gather_23_cast_uint16_to_int32 = cast(dtype = gather_23_cast_uint16_to_int32_dtype_0, x = gather_23_cast_uint16)[name = string("cast_104")]; |
|
tensor<int32, [1]> expand_dims_103 = expand_dims(axes = expand_dims_103_axes_0, x = gather_23_cast_uint16_to_int32)[name = string("expand_dims_103")]; |
|
tensor<int32, [4]> concat_74 = const()[name = string("concat_74"), val = tensor<int32, [4]>([11, 0, 0, 0])]; |
|
tensor<int32, [1]> concat_75_values0_0 = const()[name = string("concat_75_values0_0"), val = tensor<int32, [1]>([0])]; |
|
tensor<int32, [1]> concat_75_values1_0 = const()[name = string("concat_75_values1_0"), val = tensor<int32, [1]>([0])]; |
|
tensor<int32, [1]> concat_75_values3_0 = const()[name = string("concat_75_values3_0"), val = tensor<int32, [1]>([0])]; |
|
int32 concat_75_axis_0 = const()[name = string("concat_75_axis_0"), val = int32(0)]; |
|
bool concat_75_interleave_0 = const()[name = string("concat_75_interleave_0"), val = bool(false)]; |
|
tensor<int32, [4]> concat_75 = concat(axis = concat_75_axis_0, interleave = concat_75_interleave_0, values = (concat_75_values0_0, concat_75_values1_0, expand_dims_103, concat_75_values3_0))[name = string("concat_75")]; |
|
tensor<int32, [4]> v_cache2_internal_tensor_assign_12_stride_0 = const()[name = string("v_cache2_internal_tensor_assign_12_stride_0"), val = tensor<int32, [4]>([1, 1, 1, 1])]; |
|
tensor<bool, [4]> v_cache2_internal_tensor_assign_12_begin_mask_0 = const()[name = string("v_cache2_internal_tensor_assign_12_begin_mask_0"), val = tensor<bool, [4]>([false, false, false, false])]; |
|
tensor<bool, [4]> v_cache2_internal_tensor_assign_12_end_mask_0 = const()[name = string("v_cache2_internal_tensor_assign_12_end_mask_0"), val = tensor<bool, [4]>([false, true, false, true])]; |
|
tensor<bool, [4]> v_cache2_internal_tensor_assign_12_squeeze_mask_0 = const()[name = string("v_cache2_internal_tensor_assign_12_squeeze_mask_0"), val = tensor<bool, [4]>([true, false, false, false])]; |
|
tensor<fp16, [24, 1, 1500, 1024]> v_cache2_internal_tensor_assign_12_cast_fp16 = slice_update(begin = concat_74, begin_mask = v_cache2_internal_tensor_assign_12_begin_mask_0, end = concat_75, end_mask = v_cache2_internal_tensor_assign_12_end_mask_0, squeeze_mask = v_cache2_internal_tensor_assign_12_squeeze_mask_0, stride = v_cache2_internal_tensor_assign_12_stride_0, update = linear_23_cast_fp16, x = coreml_update_state_73)[name = string("v_cache2_internal_tensor_assign_12_cast_fp16")]; |
|
write_state(data = v_cache2_internal_tensor_assign_12_cast_fp16, input = v_cache2)[name = string("coreml_update_state_75_write_state")]; |
|
tensor<fp16, [24, 1, 1500, 1024]> coreml_update_state_75 = read_state(input = v_cache2)[name = string("coreml_update_state_75")]; |
|
tensor<fp16, [1024, 1024]> var_523_to_fp16 = const()[name = string("op_523_to_fp16"), val = tensor<fp16, [1024, 1024]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(72380864)))]; |
|
tensor<fp16, [1, ?, 1024]> linear_24_cast_fp16 = linear(bias = linear_0_bias_0_to_fp16, weight = var_523_to_fp16, x = audio_data)[name = string("linear_24_cast_fp16")]; |
|
tensor<fp16, [1024, 1024]> var_527_to_fp16 = const()[name = string("op_527_to_fp16"), val = tensor<fp16, [1024, 1024]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(74478080)))]; |
|
tensor<fp16, [1024]> var_528_to_fp16 = const()[name = string("op_528_to_fp16"), val = tensor<fp16, [1024]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(76575296)))]; |
|
tensor<fp16, [1, ?, 1024]> linear_25_cast_fp16 = linear(bias = var_528_to_fp16, weight = var_527_to_fp16, x = audio_data)[name = string("linear_25_cast_fp16")]; |
|
tensor<int32, [3]> var_530_shape_cast_fp16 = shape(x = linear_24_cast_fp16)[name = string("op_530_shape_cast_fp16")]; |
|
int32 gather_24_axis_0 = const()[name = string("gather_24_axis_0"), val = int32(0)]; |
|
int32 gather_24_batch_dims_0 = const()[name = string("gather_24_batch_dims_0"), val = int32(0)]; |
|
bool gather_24_validate_indices_0 = const()[name = string("gather_24_validate_indices_0"), val = bool(false)]; |
|
string var_530_shape_cast_fp16_to_uint16_dtype_0 = const()[name = string("op_530_shape_cast_fp16_to_uint16_dtype_0"), val = string("uint16")]; |
|
uint16 select_24_to_uint16 = const()[name = string("select_24_to_uint16"), val = uint16(1)]; |
|
tensor<uint16, [3]> var_530_shape_cast_fp16_to_uint16 = cast(dtype = var_530_shape_cast_fp16_to_uint16_dtype_0, x = var_530_shape_cast_fp16)[name = string("cast_103")]; |
|
uint16 gather_24_cast_uint16 = gather(axis = gather_24_axis_0, batch_dims = gather_24_batch_dims_0, indices = select_24_to_uint16, validate_indices = gather_24_validate_indices_0, x = var_530_shape_cast_fp16_to_uint16)[name = string("gather_24_cast_uint16")]; |
|
string gather_24_cast_uint16_to_int32_dtype_0 = const()[name = string("gather_24_cast_uint16_to_int32_dtype_0"), val = string("int32")]; |
|
tensor<int32, [1]> expand_dims_107_axes_0 = const()[name = string("expand_dims_107_axes_0"), val = tensor<int32, [1]>([0])]; |
|
int32 gather_24_cast_uint16_to_int32 = cast(dtype = gather_24_cast_uint16_to_int32_dtype_0, x = gather_24_cast_uint16)[name = string("cast_102")]; |
|
tensor<int32, [1]> expand_dims_107 = expand_dims(axes = expand_dims_107_axes_0, x = gather_24_cast_uint16_to_int32)[name = string("expand_dims_107")]; |
|
tensor<int32, [4]> concat_77 = const()[name = string("concat_77"), val = tensor<int32, [4]>([12, 0, 0, 0])]; |
|
tensor<int32, [1]> concat_78_values0_0 = const()[name = string("concat_78_values0_0"), val = tensor<int32, [1]>([0])]; |
|
tensor<int32, [1]> concat_78_values1_0 = const()[name = string("concat_78_values1_0"), val = tensor<int32, [1]>([0])]; |
|
tensor<int32, [1]> concat_78_values3_0 = const()[name = string("concat_78_values3_0"), val = tensor<int32, [1]>([0])]; |
|
int32 concat_78_axis_0 = const()[name = string("concat_78_axis_0"), val = int32(0)]; |
|
bool concat_78_interleave_0 = const()[name = string("concat_78_interleave_0"), val = bool(false)]; |
|
tensor<int32, [4]> concat_78 = concat(axis = concat_78_axis_0, interleave = concat_78_interleave_0, values = (concat_78_values0_0, concat_78_values1_0, expand_dims_107, concat_78_values3_0))[name = string("concat_78")]; |
|
tensor<int32, [4]> k_cache2_internal_tensor_assign_13_stride_0 = const()[name = string("k_cache2_internal_tensor_assign_13_stride_0"), val = tensor<int32, [4]>([1, 1, 1, 1])]; |
|
tensor<bool, [4]> k_cache2_internal_tensor_assign_13_begin_mask_0 = const()[name = string("k_cache2_internal_tensor_assign_13_begin_mask_0"), val = tensor<bool, [4]>([false, false, false, false])]; |
|
tensor<bool, [4]> k_cache2_internal_tensor_assign_13_end_mask_0 = const()[name = string("k_cache2_internal_tensor_assign_13_end_mask_0"), val = tensor<bool, [4]>([false, true, false, true])]; |
|
tensor<bool, [4]> k_cache2_internal_tensor_assign_13_squeeze_mask_0 = const()[name = string("k_cache2_internal_tensor_assign_13_squeeze_mask_0"), val = tensor<bool, [4]>([true, false, false, false])]; |
|
tensor<fp16, [24, 1, 1500, 1024]> k_cache2_internal_tensor_assign_13_cast_fp16 = slice_update(begin = concat_77, begin_mask = k_cache2_internal_tensor_assign_13_begin_mask_0, end = concat_78, end_mask = k_cache2_internal_tensor_assign_13_end_mask_0, squeeze_mask = k_cache2_internal_tensor_assign_13_squeeze_mask_0, stride = k_cache2_internal_tensor_assign_13_stride_0, update = linear_24_cast_fp16, x = coreml_update_state_74)[name = string("k_cache2_internal_tensor_assign_13_cast_fp16")]; |
|
write_state(data = k_cache2_internal_tensor_assign_13_cast_fp16, input = k_cache2)[name = string("coreml_update_state_76_write_state")]; |
|
tensor<fp16, [24, 1, 1500, 1024]> coreml_update_state_76 = read_state(input = k_cache2)[name = string("coreml_update_state_76")]; |
|
tensor<int32, [3]> var_535_shape_cast_fp16 = shape(x = linear_25_cast_fp16)[name = string("op_535_shape_cast_fp16")]; |
|
int32 gather_25_axis_0 = const()[name = string("gather_25_axis_0"), val = int32(0)]; |
|
int32 gather_25_batch_dims_0 = const()[name = string("gather_25_batch_dims_0"), val = int32(0)]; |
|
bool gather_25_validate_indices_0 = const()[name = string("gather_25_validate_indices_0"), val = bool(false)]; |
|
string var_535_shape_cast_fp16_to_uint16_dtype_0 = const()[name = string("op_535_shape_cast_fp16_to_uint16_dtype_0"), val = string("uint16")]; |
|
uint16 select_25_to_uint16 = const()[name = string("select_25_to_uint16"), val = uint16(1)]; |
|
tensor<uint16, [3]> var_535_shape_cast_fp16_to_uint16 = cast(dtype = var_535_shape_cast_fp16_to_uint16_dtype_0, x = var_535_shape_cast_fp16)[name = string("cast_101")]; |
|
uint16 gather_25_cast_uint16 = gather(axis = gather_25_axis_0, batch_dims = gather_25_batch_dims_0, indices = select_25_to_uint16, validate_indices = gather_25_validate_indices_0, x = var_535_shape_cast_fp16_to_uint16)[name = string("gather_25_cast_uint16")]; |
|
string gather_25_cast_uint16_to_int32_dtype_0 = const()[name = string("gather_25_cast_uint16_to_int32_dtype_0"), val = string("int32")]; |
|
tensor<int32, [1]> expand_dims_111_axes_0 = const()[name = string("expand_dims_111_axes_0"), val = tensor<int32, [1]>([0])]; |
|
int32 gather_25_cast_uint16_to_int32 = cast(dtype = gather_25_cast_uint16_to_int32_dtype_0, x = gather_25_cast_uint16)[name = string("cast_100")]; |
|
tensor<int32, [1]> expand_dims_111 = expand_dims(axes = expand_dims_111_axes_0, x = gather_25_cast_uint16_to_int32)[name = string("expand_dims_111")]; |
|
tensor<int32, [4]> concat_80 = const()[name = string("concat_80"), val = tensor<int32, [4]>([12, 0, 0, 0])]; |
|
tensor<int32, [1]> concat_81_values0_0 = const()[name = string("concat_81_values0_0"), val = tensor<int32, [1]>([0])]; |
|
tensor<int32, [1]> concat_81_values1_0 = const()[name = string("concat_81_values1_0"), val = tensor<int32, [1]>([0])]; |
|
tensor<int32, [1]> concat_81_values3_0 = const()[name = string("concat_81_values3_0"), val = tensor<int32, [1]>([0])]; |
|
int32 concat_81_axis_0 = const()[name = string("concat_81_axis_0"), val = int32(0)]; |
|
bool concat_81_interleave_0 = const()[name = string("concat_81_interleave_0"), val = bool(false)]; |
|
tensor<int32, [4]> concat_81 = concat(axis = concat_81_axis_0, interleave = concat_81_interleave_0, values = (concat_81_values0_0, concat_81_values1_0, expand_dims_111, concat_81_values3_0))[name = string("concat_81")]; |
|
tensor<int32, [4]> v_cache2_internal_tensor_assign_13_stride_0 = const()[name = string("v_cache2_internal_tensor_assign_13_stride_0"), val = tensor<int32, [4]>([1, 1, 1, 1])]; |
|
tensor<bool, [4]> v_cache2_internal_tensor_assign_13_begin_mask_0 = const()[name = string("v_cache2_internal_tensor_assign_13_begin_mask_0"), val = tensor<bool, [4]>([false, false, false, false])]; |
|
tensor<bool, [4]> v_cache2_internal_tensor_assign_13_end_mask_0 = const()[name = string("v_cache2_internal_tensor_assign_13_end_mask_0"), val = tensor<bool, [4]>([false, true, false, true])]; |
|
tensor<bool, [4]> v_cache2_internal_tensor_assign_13_squeeze_mask_0 = const()[name = string("v_cache2_internal_tensor_assign_13_squeeze_mask_0"), val = tensor<bool, [4]>([true, false, false, false])]; |
|
tensor<fp16, [24, 1, 1500, 1024]> v_cache2_internal_tensor_assign_13_cast_fp16 = slice_update(begin = concat_80, begin_mask = v_cache2_internal_tensor_assign_13_begin_mask_0, end = concat_81, end_mask = v_cache2_internal_tensor_assign_13_end_mask_0, squeeze_mask = v_cache2_internal_tensor_assign_13_squeeze_mask_0, stride = v_cache2_internal_tensor_assign_13_stride_0, update = linear_25_cast_fp16, x = coreml_update_state_75)[name = string("v_cache2_internal_tensor_assign_13_cast_fp16")]; |
|
write_state(data = v_cache2_internal_tensor_assign_13_cast_fp16, input = v_cache2)[name = string("coreml_update_state_77_write_state")]; |
|
tensor<fp16, [24, 1, 1500, 1024]> coreml_update_state_77 = read_state(input = v_cache2)[name = string("coreml_update_state_77")]; |
|
tensor<fp16, [1024, 1024]> var_557_to_fp16 = const()[name = string("op_557_to_fp16"), val = tensor<fp16, [1024, 1024]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(76577408)))]; |
|
tensor<fp16, [1, ?, 1024]> linear_26_cast_fp16 = linear(bias = linear_0_bias_0_to_fp16, weight = var_557_to_fp16, x = audio_data)[name = string("linear_26_cast_fp16")]; |
|
tensor<fp16, [1024, 1024]> var_561_to_fp16 = const()[name = string("op_561_to_fp16"), val = tensor<fp16, [1024, 1024]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(78674624)))]; |
|
tensor<fp16, [1024]> var_562_to_fp16 = const()[name = string("op_562_to_fp16"), val = tensor<fp16, [1024]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(80771840)))]; |
|
tensor<fp16, [1, ?, 1024]> linear_27_cast_fp16 = linear(bias = var_562_to_fp16, weight = var_561_to_fp16, x = audio_data)[name = string("linear_27_cast_fp16")]; |
|
tensor<int32, [3]> var_564_shape_cast_fp16 = shape(x = linear_26_cast_fp16)[name = string("op_564_shape_cast_fp16")]; |
|
int32 gather_26_axis_0 = const()[name = string("gather_26_axis_0"), val = int32(0)]; |
|
int32 gather_26_batch_dims_0 = const()[name = string("gather_26_batch_dims_0"), val = int32(0)]; |
|
bool gather_26_validate_indices_0 = const()[name = string("gather_26_validate_indices_0"), val = bool(false)]; |
|
string var_564_shape_cast_fp16_to_uint16_dtype_0 = const()[name = string("op_564_shape_cast_fp16_to_uint16_dtype_0"), val = string("uint16")]; |
|
uint16 select_26_to_uint16 = const()[name = string("select_26_to_uint16"), val = uint16(1)]; |
|
tensor<uint16, [3]> var_564_shape_cast_fp16_to_uint16 = cast(dtype = var_564_shape_cast_fp16_to_uint16_dtype_0, x = var_564_shape_cast_fp16)[name = string("cast_99")]; |
|
uint16 gather_26_cast_uint16 = gather(axis = gather_26_axis_0, batch_dims = gather_26_batch_dims_0, indices = select_26_to_uint16, validate_indices = gather_26_validate_indices_0, x = var_564_shape_cast_fp16_to_uint16)[name = string("gather_26_cast_uint16")]; |
|
string gather_26_cast_uint16_to_int32_dtype_0 = const()[name = string("gather_26_cast_uint16_to_int32_dtype_0"), val = string("int32")]; |
|
tensor<int32, [1]> expand_dims_115_axes_0 = const()[name = string("expand_dims_115_axes_0"), val = tensor<int32, [1]>([0])]; |
|
int32 gather_26_cast_uint16_to_int32 = cast(dtype = gather_26_cast_uint16_to_int32_dtype_0, x = gather_26_cast_uint16)[name = string("cast_98")]; |
|
tensor<int32, [1]> expand_dims_115 = expand_dims(axes = expand_dims_115_axes_0, x = gather_26_cast_uint16_to_int32)[name = string("expand_dims_115")]; |
|
tensor<int32, [4]> concat_83 = const()[name = string("concat_83"), val = tensor<int32, [4]>([13, 0, 0, 0])]; |
|
tensor<int32, [1]> concat_84_values0_0 = const()[name = string("concat_84_values0_0"), val = tensor<int32, [1]>([0])]; |
|
tensor<int32, [1]> concat_84_values1_0 = const()[name = string("concat_84_values1_0"), val = tensor<int32, [1]>([0])]; |
|
tensor<int32, [1]> concat_84_values3_0 = const()[name = string("concat_84_values3_0"), val = tensor<int32, [1]>([0])]; |
|
int32 concat_84_axis_0 = const()[name = string("concat_84_axis_0"), val = int32(0)]; |
|
bool concat_84_interleave_0 = const()[name = string("concat_84_interleave_0"), val = bool(false)]; |
|
tensor<int32, [4]> concat_84 = concat(axis = concat_84_axis_0, interleave = concat_84_interleave_0, values = (concat_84_values0_0, concat_84_values1_0, expand_dims_115, concat_84_values3_0))[name = string("concat_84")]; |
|
tensor<int32, [4]> k_cache2_internal_tensor_assign_14_stride_0 = const()[name = string("k_cache2_internal_tensor_assign_14_stride_0"), val = tensor<int32, [4]>([1, 1, 1, 1])]; |
|
tensor<bool, [4]> k_cache2_internal_tensor_assign_14_begin_mask_0 = const()[name = string("k_cache2_internal_tensor_assign_14_begin_mask_0"), val = tensor<bool, [4]>([false, false, false, false])]; |
|
tensor<bool, [4]> k_cache2_internal_tensor_assign_14_end_mask_0 = const()[name = string("k_cache2_internal_tensor_assign_14_end_mask_0"), val = tensor<bool, [4]>([false, true, false, true])]; |
|
tensor<bool, [4]> k_cache2_internal_tensor_assign_14_squeeze_mask_0 = const()[name = string("k_cache2_internal_tensor_assign_14_squeeze_mask_0"), val = tensor<bool, [4]>([true, false, false, false])]; |
|
tensor<fp16, [24, 1, 1500, 1024]> k_cache2_internal_tensor_assign_14_cast_fp16 = slice_update(begin = concat_83, begin_mask = k_cache2_internal_tensor_assign_14_begin_mask_0, end = concat_84, end_mask = k_cache2_internal_tensor_assign_14_end_mask_0, squeeze_mask = k_cache2_internal_tensor_assign_14_squeeze_mask_0, stride = k_cache2_internal_tensor_assign_14_stride_0, update = linear_26_cast_fp16, x = coreml_update_state_76)[name = string("k_cache2_internal_tensor_assign_14_cast_fp16")]; |
|
write_state(data = k_cache2_internal_tensor_assign_14_cast_fp16, input = k_cache2)[name = string("coreml_update_state_78_write_state")]; |
|
tensor<fp16, [24, 1, 1500, 1024]> coreml_update_state_78 = read_state(input = k_cache2)[name = string("coreml_update_state_78")]; |
|
tensor<int32, [3]> var_569_shape_cast_fp16 = shape(x = linear_27_cast_fp16)[name = string("op_569_shape_cast_fp16")]; |
|
int32 gather_27_axis_0 = const()[name = string("gather_27_axis_0"), val = int32(0)]; |
|
int32 gather_27_batch_dims_0 = const()[name = string("gather_27_batch_dims_0"), val = int32(0)]; |
|
bool gather_27_validate_indices_0 = const()[name = string("gather_27_validate_indices_0"), val = bool(false)]; |
|
string var_569_shape_cast_fp16_to_uint16_dtype_0 = const()[name = string("op_569_shape_cast_fp16_to_uint16_dtype_0"), val = string("uint16")]; |
|
uint16 select_27_to_uint16 = const()[name = string("select_27_to_uint16"), val = uint16(1)]; |
|
tensor<uint16, [3]> var_569_shape_cast_fp16_to_uint16 = cast(dtype = var_569_shape_cast_fp16_to_uint16_dtype_0, x = var_569_shape_cast_fp16)[name = string("cast_97")]; |
|
uint16 gather_27_cast_uint16 = gather(axis = gather_27_axis_0, batch_dims = gather_27_batch_dims_0, indices = select_27_to_uint16, validate_indices = gather_27_validate_indices_0, x = var_569_shape_cast_fp16_to_uint16)[name = string("gather_27_cast_uint16")]; |
|
string gather_27_cast_uint16_to_int32_dtype_0 = const()[name = string("gather_27_cast_uint16_to_int32_dtype_0"), val = string("int32")]; |
|
tensor<int32, [1]> expand_dims_119_axes_0 = const()[name = string("expand_dims_119_axes_0"), val = tensor<int32, [1]>([0])]; |
|
int32 gather_27_cast_uint16_to_int32 = cast(dtype = gather_27_cast_uint16_to_int32_dtype_0, x = gather_27_cast_uint16)[name = string("cast_96")]; |
|
tensor<int32, [1]> expand_dims_119 = expand_dims(axes = expand_dims_119_axes_0, x = gather_27_cast_uint16_to_int32)[name = string("expand_dims_119")]; |
|
tensor<int32, [4]> concat_86 = const()[name = string("concat_86"), val = tensor<int32, [4]>([13, 0, 0, 0])]; |
|
tensor<int32, [1]> concat_87_values0_0 = const()[name = string("concat_87_values0_0"), val = tensor<int32, [1]>([0])]; |
|
tensor<int32, [1]> concat_87_values1_0 = const()[name = string("concat_87_values1_0"), val = tensor<int32, [1]>([0])]; |
|
tensor<int32, [1]> concat_87_values3_0 = const()[name = string("concat_87_values3_0"), val = tensor<int32, [1]>([0])]; |
|
int32 concat_87_axis_0 = const()[name = string("concat_87_axis_0"), val = int32(0)]; |
|
bool concat_87_interleave_0 = const()[name = string("concat_87_interleave_0"), val = bool(false)]; |
|
tensor<int32, [4]> concat_87 = concat(axis = concat_87_axis_0, interleave = concat_87_interleave_0, values = (concat_87_values0_0, concat_87_values1_0, expand_dims_119, concat_87_values3_0))[name = string("concat_87")]; |
|
tensor<int32, [4]> v_cache2_internal_tensor_assign_14_stride_0 = const()[name = string("v_cache2_internal_tensor_assign_14_stride_0"), val = tensor<int32, [4]>([1, 1, 1, 1])]; |
|
tensor<bool, [4]> v_cache2_internal_tensor_assign_14_begin_mask_0 = const()[name = string("v_cache2_internal_tensor_assign_14_begin_mask_0"), val = tensor<bool, [4]>([false, false, false, false])]; |
|
tensor<bool, [4]> v_cache2_internal_tensor_assign_14_end_mask_0 = const()[name = string("v_cache2_internal_tensor_assign_14_end_mask_0"), val = tensor<bool, [4]>([false, true, false, true])]; |
|
tensor<bool, [4]> v_cache2_internal_tensor_assign_14_squeeze_mask_0 = const()[name = string("v_cache2_internal_tensor_assign_14_squeeze_mask_0"), val = tensor<bool, [4]>([true, false, false, false])]; |
|
tensor<fp16, [24, 1, 1500, 1024]> v_cache2_internal_tensor_assign_14_cast_fp16 = slice_update(begin = concat_86, begin_mask = v_cache2_internal_tensor_assign_14_begin_mask_0, end = concat_87, end_mask = v_cache2_internal_tensor_assign_14_end_mask_0, squeeze_mask = v_cache2_internal_tensor_assign_14_squeeze_mask_0, stride = v_cache2_internal_tensor_assign_14_stride_0, update = linear_27_cast_fp16, x = coreml_update_state_77)[name = string("v_cache2_internal_tensor_assign_14_cast_fp16")]; |
|
write_state(data = v_cache2_internal_tensor_assign_14_cast_fp16, input = v_cache2)[name = string("coreml_update_state_79_write_state")]; |
|
tensor<fp16, [24, 1, 1500, 1024]> coreml_update_state_79 = read_state(input = v_cache2)[name = string("coreml_update_state_79")]; |
|
tensor<fp16, [1024, 1024]> var_591_to_fp16 = const()[name = string("op_591_to_fp16"), val = tensor<fp16, [1024, 1024]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(80773952)))]; |
|
tensor<fp16, [1, ?, 1024]> linear_28_cast_fp16 = linear(bias = linear_0_bias_0_to_fp16, weight = var_591_to_fp16, x = audio_data)[name = string("linear_28_cast_fp16")]; |
|
tensor<fp16, [1024, 1024]> var_595_to_fp16 = const()[name = string("op_595_to_fp16"), val = tensor<fp16, [1024, 1024]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(82871168)))]; |
|
tensor<fp16, [1024]> var_596_to_fp16 = const()[name = string("op_596_to_fp16"), val = tensor<fp16, [1024]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(84968384)))]; |
|
tensor<fp16, [1, ?, 1024]> linear_29_cast_fp16 = linear(bias = var_596_to_fp16, weight = var_595_to_fp16, x = audio_data)[name = string("linear_29_cast_fp16")]; |
|
tensor<int32, [3]> var_598_shape_cast_fp16 = shape(x = linear_28_cast_fp16)[name = string("op_598_shape_cast_fp16")]; |
|
int32 gather_28_axis_0 = const()[name = string("gather_28_axis_0"), val = int32(0)]; |
|
int32 gather_28_batch_dims_0 = const()[name = string("gather_28_batch_dims_0"), val = int32(0)]; |
|
bool gather_28_validate_indices_0 = const()[name = string("gather_28_validate_indices_0"), val = bool(false)]; |
|
string var_598_shape_cast_fp16_to_uint16_dtype_0 = const()[name = string("op_598_shape_cast_fp16_to_uint16_dtype_0"), val = string("uint16")]; |
|
uint16 select_28_to_uint16 = const()[name = string("select_28_to_uint16"), val = uint16(1)]; |
|
tensor<uint16, [3]> var_598_shape_cast_fp16_to_uint16 = cast(dtype = var_598_shape_cast_fp16_to_uint16_dtype_0, x = var_598_shape_cast_fp16)[name = string("cast_95")]; |
|
uint16 gather_28_cast_uint16 = gather(axis = gather_28_axis_0, batch_dims = gather_28_batch_dims_0, indices = select_28_to_uint16, validate_indices = gather_28_validate_indices_0, x = var_598_shape_cast_fp16_to_uint16)[name = string("gather_28_cast_uint16")]; |
|
string gather_28_cast_uint16_to_int32_dtype_0 = const()[name = string("gather_28_cast_uint16_to_int32_dtype_0"), val = string("int32")]; |
|
tensor<int32, [1]> expand_dims_123_axes_0 = const()[name = string("expand_dims_123_axes_0"), val = tensor<int32, [1]>([0])]; |
|
int32 gather_28_cast_uint16_to_int32 = cast(dtype = gather_28_cast_uint16_to_int32_dtype_0, x = gather_28_cast_uint16)[name = string("cast_94")]; |
|
tensor<int32, [1]> expand_dims_123 = expand_dims(axes = expand_dims_123_axes_0, x = gather_28_cast_uint16_to_int32)[name = string("expand_dims_123")]; |
|
tensor<int32, [4]> concat_89 = const()[name = string("concat_89"), val = tensor<int32, [4]>([14, 0, 0, 0])]; |
|
tensor<int32, [1]> concat_90_values0_0 = const()[name = string("concat_90_values0_0"), val = tensor<int32, [1]>([0])]; |
|
tensor<int32, [1]> concat_90_values1_0 = const()[name = string("concat_90_values1_0"), val = tensor<int32, [1]>([0])]; |
|
tensor<int32, [1]> concat_90_values3_0 = const()[name = string("concat_90_values3_0"), val = tensor<int32, [1]>([0])]; |
|
int32 concat_90_axis_0 = const()[name = string("concat_90_axis_0"), val = int32(0)]; |
|
bool concat_90_interleave_0 = const()[name = string("concat_90_interleave_0"), val = bool(false)]; |
|
tensor<int32, [4]> concat_90 = concat(axis = concat_90_axis_0, interleave = concat_90_interleave_0, values = (concat_90_values0_0, concat_90_values1_0, expand_dims_123, concat_90_values3_0))[name = string("concat_90")]; |
|
tensor<int32, [4]> k_cache2_internal_tensor_assign_15_stride_0 = const()[name = string("k_cache2_internal_tensor_assign_15_stride_0"), val = tensor<int32, [4]>([1, 1, 1, 1])]; |
|
tensor<bool, [4]> k_cache2_internal_tensor_assign_15_begin_mask_0 = const()[name = string("k_cache2_internal_tensor_assign_15_begin_mask_0"), val = tensor<bool, [4]>([false, false, false, false])]; |
|
tensor<bool, [4]> k_cache2_internal_tensor_assign_15_end_mask_0 = const()[name = string("k_cache2_internal_tensor_assign_15_end_mask_0"), val = tensor<bool, [4]>([false, true, false, true])]; |
|
tensor<bool, [4]> k_cache2_internal_tensor_assign_15_squeeze_mask_0 = const()[name = string("k_cache2_internal_tensor_assign_15_squeeze_mask_0"), val = tensor<bool, [4]>([true, false, false, false])]; |
|
tensor<fp16, [24, 1, 1500, 1024]> k_cache2_internal_tensor_assign_15_cast_fp16 = slice_update(begin = concat_89, begin_mask = k_cache2_internal_tensor_assign_15_begin_mask_0, end = concat_90, end_mask = k_cache2_internal_tensor_assign_15_end_mask_0, squeeze_mask = k_cache2_internal_tensor_assign_15_squeeze_mask_0, stride = k_cache2_internal_tensor_assign_15_stride_0, update = linear_28_cast_fp16, x = coreml_update_state_78)[name = string("k_cache2_internal_tensor_assign_15_cast_fp16")]; |
|
write_state(data = k_cache2_internal_tensor_assign_15_cast_fp16, input = k_cache2)[name = string("coreml_update_state_80_write_state")]; |
|
tensor<fp16, [24, 1, 1500, 1024]> coreml_update_state_80 = read_state(input = k_cache2)[name = string("coreml_update_state_80")]; |
|
tensor<int32, [3]> var_603_shape_cast_fp16 = shape(x = linear_29_cast_fp16)[name = string("op_603_shape_cast_fp16")]; |
|
int32 gather_29_axis_0 = const()[name = string("gather_29_axis_0"), val = int32(0)]; |
|
int32 gather_29_batch_dims_0 = const()[name = string("gather_29_batch_dims_0"), val = int32(0)]; |
|
bool gather_29_validate_indices_0 = const()[name = string("gather_29_validate_indices_0"), val = bool(false)]; |
|
string var_603_shape_cast_fp16_to_uint16_dtype_0 = const()[name = string("op_603_shape_cast_fp16_to_uint16_dtype_0"), val = string("uint16")]; |
|
uint16 select_29_to_uint16 = const()[name = string("select_29_to_uint16"), val = uint16(1)]; |
|
tensor<uint16, [3]> var_603_shape_cast_fp16_to_uint16 = cast(dtype = var_603_shape_cast_fp16_to_uint16_dtype_0, x = var_603_shape_cast_fp16)[name = string("cast_93")]; |
|
uint16 gather_29_cast_uint16 = gather(axis = gather_29_axis_0, batch_dims = gather_29_batch_dims_0, indices = select_29_to_uint16, validate_indices = gather_29_validate_indices_0, x = var_603_shape_cast_fp16_to_uint16)[name = string("gather_29_cast_uint16")]; |
|
string gather_29_cast_uint16_to_int32_dtype_0 = const()[name = string("gather_29_cast_uint16_to_int32_dtype_0"), val = string("int32")]; |
|
tensor<int32, [1]> expand_dims_127_axes_0 = const()[name = string("expand_dims_127_axes_0"), val = tensor<int32, [1]>([0])]; |
|
int32 gather_29_cast_uint16_to_int32 = cast(dtype = gather_29_cast_uint16_to_int32_dtype_0, x = gather_29_cast_uint16)[name = string("cast_92")]; |
|
tensor<int32, [1]> expand_dims_127 = expand_dims(axes = expand_dims_127_axes_0, x = gather_29_cast_uint16_to_int32)[name = string("expand_dims_127")]; |
|
tensor<int32, [4]> concat_92 = const()[name = string("concat_92"), val = tensor<int32, [4]>([14, 0, 0, 0])]; |
|
tensor<int32, [1]> concat_93_values0_0 = const()[name = string("concat_93_values0_0"), val = tensor<int32, [1]>([0])]; |
|
tensor<int32, [1]> concat_93_values1_0 = const()[name = string("concat_93_values1_0"), val = tensor<int32, [1]>([0])]; |
|
tensor<int32, [1]> concat_93_values3_0 = const()[name = string("concat_93_values3_0"), val = tensor<int32, [1]>([0])]; |
|
int32 concat_93_axis_0 = const()[name = string("concat_93_axis_0"), val = int32(0)]; |
|
bool concat_93_interleave_0 = const()[name = string("concat_93_interleave_0"), val = bool(false)]; |
|
tensor<int32, [4]> concat_93 = concat(axis = concat_93_axis_0, interleave = concat_93_interleave_0, values = (concat_93_values0_0, concat_93_values1_0, expand_dims_127, concat_93_values3_0))[name = string("concat_93")]; |
|
tensor<int32, [4]> v_cache2_internal_tensor_assign_15_stride_0 = const()[name = string("v_cache2_internal_tensor_assign_15_stride_0"), val = tensor<int32, [4]>([1, 1, 1, 1])]; |
|
tensor<bool, [4]> v_cache2_internal_tensor_assign_15_begin_mask_0 = const()[name = string("v_cache2_internal_tensor_assign_15_begin_mask_0"), val = tensor<bool, [4]>([false, false, false, false])]; |
|
tensor<bool, [4]> v_cache2_internal_tensor_assign_15_end_mask_0 = const()[name = string("v_cache2_internal_tensor_assign_15_end_mask_0"), val = tensor<bool, [4]>([false, true, false, true])]; |
|
tensor<bool, [4]> v_cache2_internal_tensor_assign_15_squeeze_mask_0 = const()[name = string("v_cache2_internal_tensor_assign_15_squeeze_mask_0"), val = tensor<bool, [4]>([true, false, false, false])]; |
|
tensor<fp16, [24, 1, 1500, 1024]> v_cache2_internal_tensor_assign_15_cast_fp16 = slice_update(begin = concat_92, begin_mask = v_cache2_internal_tensor_assign_15_begin_mask_0, end = concat_93, end_mask = v_cache2_internal_tensor_assign_15_end_mask_0, squeeze_mask = v_cache2_internal_tensor_assign_15_squeeze_mask_0, stride = v_cache2_internal_tensor_assign_15_stride_0, update = linear_29_cast_fp16, x = coreml_update_state_79)[name = string("v_cache2_internal_tensor_assign_15_cast_fp16")]; |
|
write_state(data = v_cache2_internal_tensor_assign_15_cast_fp16, input = v_cache2)[name = string("coreml_update_state_81_write_state")]; |
|
tensor<fp16, [24, 1, 1500, 1024]> coreml_update_state_81 = read_state(input = v_cache2)[name = string("coreml_update_state_81")]; |
|
tensor<fp16, [1024, 1024]> var_625_to_fp16 = const()[name = string("op_625_to_fp16"), val = tensor<fp16, [1024, 1024]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(84970496)))]; |
|
tensor<fp16, [1, ?, 1024]> linear_30_cast_fp16 = linear(bias = linear_0_bias_0_to_fp16, weight = var_625_to_fp16, x = audio_data)[name = string("linear_30_cast_fp16")]; |
|
tensor<fp16, [1024, 1024]> var_629_to_fp16 = const()[name = string("op_629_to_fp16"), val = tensor<fp16, [1024, 1024]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(87067712)))]; |
|
tensor<fp16, [1024]> var_630_to_fp16 = const()[name = string("op_630_to_fp16"), val = tensor<fp16, [1024]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(89164928)))]; |
|
tensor<fp16, [1, ?, 1024]> linear_31_cast_fp16 = linear(bias = var_630_to_fp16, weight = var_629_to_fp16, x = audio_data)[name = string("linear_31_cast_fp16")]; |
|
tensor<int32, [3]> var_632_shape_cast_fp16 = shape(x = linear_30_cast_fp16)[name = string("op_632_shape_cast_fp16")]; |
|
int32 gather_30_axis_0 = const()[name = string("gather_30_axis_0"), val = int32(0)]; |
|
int32 gather_30_batch_dims_0 = const()[name = string("gather_30_batch_dims_0"), val = int32(0)]; |
|
bool gather_30_validate_indices_0 = const()[name = string("gather_30_validate_indices_0"), val = bool(false)]; |
|
string var_632_shape_cast_fp16_to_uint16_dtype_0 = const()[name = string("op_632_shape_cast_fp16_to_uint16_dtype_0"), val = string("uint16")]; |
|
uint16 select_30_to_uint16 = const()[name = string("select_30_to_uint16"), val = uint16(1)]; |
|
tensor<uint16, [3]> var_632_shape_cast_fp16_to_uint16 = cast(dtype = var_632_shape_cast_fp16_to_uint16_dtype_0, x = var_632_shape_cast_fp16)[name = string("cast_91")]; |
|
uint16 gather_30_cast_uint16 = gather(axis = gather_30_axis_0, batch_dims = gather_30_batch_dims_0, indices = select_30_to_uint16, validate_indices = gather_30_validate_indices_0, x = var_632_shape_cast_fp16_to_uint16)[name = string("gather_30_cast_uint16")]; |
|
string gather_30_cast_uint16_to_int32_dtype_0 = const()[name = string("gather_30_cast_uint16_to_int32_dtype_0"), val = string("int32")]; |
|
tensor<int32, [1]> expand_dims_131_axes_0 = const()[name = string("expand_dims_131_axes_0"), val = tensor<int32, [1]>([0])]; |
|
int32 gather_30_cast_uint16_to_int32 = cast(dtype = gather_30_cast_uint16_to_int32_dtype_0, x = gather_30_cast_uint16)[name = string("cast_90")]; |
|
tensor<int32, [1]> expand_dims_131 = expand_dims(axes = expand_dims_131_axes_0, x = gather_30_cast_uint16_to_int32)[name = string("expand_dims_131")]; |
|
tensor<int32, [4]> concat_95 = const()[name = string("concat_95"), val = tensor<int32, [4]>([15, 0, 0, 0])]; |
|
tensor<int32, [1]> concat_96_values0_0 = const()[name = string("concat_96_values0_0"), val = tensor<int32, [1]>([0])]; |
|
tensor<int32, [1]> concat_96_values1_0 = const()[name = string("concat_96_values1_0"), val = tensor<int32, [1]>([0])]; |
|
tensor<int32, [1]> concat_96_values3_0 = const()[name = string("concat_96_values3_0"), val = tensor<int32, [1]>([0])]; |
|
int32 concat_96_axis_0 = const()[name = string("concat_96_axis_0"), val = int32(0)]; |
|
bool concat_96_interleave_0 = const()[name = string("concat_96_interleave_0"), val = bool(false)]; |
|
tensor<int32, [4]> concat_96 = concat(axis = concat_96_axis_0, interleave = concat_96_interleave_0, values = (concat_96_values0_0, concat_96_values1_0, expand_dims_131, concat_96_values3_0))[name = string("concat_96")]; |
|
tensor<int32, [4]> k_cache2_internal_tensor_assign_16_stride_0 = const()[name = string("k_cache2_internal_tensor_assign_16_stride_0"), val = tensor<int32, [4]>([1, 1, 1, 1])]; |
|
tensor<bool, [4]> k_cache2_internal_tensor_assign_16_begin_mask_0 = const()[name = string("k_cache2_internal_tensor_assign_16_begin_mask_0"), val = tensor<bool, [4]>([false, false, false, false])]; |
|
tensor<bool, [4]> k_cache2_internal_tensor_assign_16_end_mask_0 = const()[name = string("k_cache2_internal_tensor_assign_16_end_mask_0"), val = tensor<bool, [4]>([false, true, false, true])]; |
|
tensor<bool, [4]> k_cache2_internal_tensor_assign_16_squeeze_mask_0 = const()[name = string("k_cache2_internal_tensor_assign_16_squeeze_mask_0"), val = tensor<bool, [4]>([true, false, false, false])]; |
|
tensor<fp16, [24, 1, 1500, 1024]> k_cache2_internal_tensor_assign_16_cast_fp16 = slice_update(begin = concat_95, begin_mask = k_cache2_internal_tensor_assign_16_begin_mask_0, end = concat_96, end_mask = k_cache2_internal_tensor_assign_16_end_mask_0, squeeze_mask = k_cache2_internal_tensor_assign_16_squeeze_mask_0, stride = k_cache2_internal_tensor_assign_16_stride_0, update = linear_30_cast_fp16, x = coreml_update_state_80)[name = string("k_cache2_internal_tensor_assign_16_cast_fp16")]; |
|
write_state(data = k_cache2_internal_tensor_assign_16_cast_fp16, input = k_cache2)[name = string("coreml_update_state_82_write_state")]; |
|
tensor<fp16, [24, 1, 1500, 1024]> coreml_update_state_82 = read_state(input = k_cache2)[name = string("coreml_update_state_82")]; |
|
tensor<int32, [3]> var_637_shape_cast_fp16 = shape(x = linear_31_cast_fp16)[name = string("op_637_shape_cast_fp16")]; |
|
int32 gather_31_axis_0 = const()[name = string("gather_31_axis_0"), val = int32(0)]; |
|
int32 gather_31_batch_dims_0 = const()[name = string("gather_31_batch_dims_0"), val = int32(0)]; |
|
bool gather_31_validate_indices_0 = const()[name = string("gather_31_validate_indices_0"), val = bool(false)]; |
|
string var_637_shape_cast_fp16_to_uint16_dtype_0 = const()[name = string("op_637_shape_cast_fp16_to_uint16_dtype_0"), val = string("uint16")]; |
|
uint16 select_31_to_uint16 = const()[name = string("select_31_to_uint16"), val = uint16(1)]; |
|
tensor<uint16, [3]> var_637_shape_cast_fp16_to_uint16 = cast(dtype = var_637_shape_cast_fp16_to_uint16_dtype_0, x = var_637_shape_cast_fp16)[name = string("cast_89")]; |
|
uint16 gather_31_cast_uint16 = gather(axis = gather_31_axis_0, batch_dims = gather_31_batch_dims_0, indices = select_31_to_uint16, validate_indices = gather_31_validate_indices_0, x = var_637_shape_cast_fp16_to_uint16)[name = string("gather_31_cast_uint16")]; |
|
string gather_31_cast_uint16_to_int32_dtype_0 = const()[name = string("gather_31_cast_uint16_to_int32_dtype_0"), val = string("int32")]; |
|
tensor<int32, [1]> expand_dims_135_axes_0 = const()[name = string("expand_dims_135_axes_0"), val = tensor<int32, [1]>([0])]; |
|
int32 gather_31_cast_uint16_to_int32 = cast(dtype = gather_31_cast_uint16_to_int32_dtype_0, x = gather_31_cast_uint16)[name = string("cast_88")]; |
|
tensor<int32, [1]> expand_dims_135 = expand_dims(axes = expand_dims_135_axes_0, x = gather_31_cast_uint16_to_int32)[name = string("expand_dims_135")]; |
|
tensor<int32, [4]> concat_98 = const()[name = string("concat_98"), val = tensor<int32, [4]>([15, 0, 0, 0])]; |
|
tensor<int32, [1]> concat_99_values0_0 = const()[name = string("concat_99_values0_0"), val = tensor<int32, [1]>([0])]; |
|
tensor<int32, [1]> concat_99_values1_0 = const()[name = string("concat_99_values1_0"), val = tensor<int32, [1]>([0])]; |
|
tensor<int32, [1]> concat_99_values3_0 = const()[name = string("concat_99_values3_0"), val = tensor<int32, [1]>([0])]; |
|
int32 concat_99_axis_0 = const()[name = string("concat_99_axis_0"), val = int32(0)]; |
|
bool concat_99_interleave_0 = const()[name = string("concat_99_interleave_0"), val = bool(false)]; |
|
tensor<int32, [4]> concat_99 = concat(axis = concat_99_axis_0, interleave = concat_99_interleave_0, values = (concat_99_values0_0, concat_99_values1_0, expand_dims_135, concat_99_values3_0))[name = string("concat_99")]; |
|
tensor<int32, [4]> v_cache2_internal_tensor_assign_16_stride_0 = const()[name = string("v_cache2_internal_tensor_assign_16_stride_0"), val = tensor<int32, [4]>([1, 1, 1, 1])]; |
|
tensor<bool, [4]> v_cache2_internal_tensor_assign_16_begin_mask_0 = const()[name = string("v_cache2_internal_tensor_assign_16_begin_mask_0"), val = tensor<bool, [4]>([false, false, false, false])]; |
|
tensor<bool, [4]> v_cache2_internal_tensor_assign_16_end_mask_0 = const()[name = string("v_cache2_internal_tensor_assign_16_end_mask_0"), val = tensor<bool, [4]>([false, true, false, true])]; |
|
tensor<bool, [4]> v_cache2_internal_tensor_assign_16_squeeze_mask_0 = const()[name = string("v_cache2_internal_tensor_assign_16_squeeze_mask_0"), val = tensor<bool, [4]>([true, false, false, false])]; |
|
tensor<fp16, [24, 1, 1500, 1024]> v_cache2_internal_tensor_assign_16_cast_fp16 = slice_update(begin = concat_98, begin_mask = v_cache2_internal_tensor_assign_16_begin_mask_0, end = concat_99, end_mask = v_cache2_internal_tensor_assign_16_end_mask_0, squeeze_mask = v_cache2_internal_tensor_assign_16_squeeze_mask_0, stride = v_cache2_internal_tensor_assign_16_stride_0, update = linear_31_cast_fp16, x = coreml_update_state_81)[name = string("v_cache2_internal_tensor_assign_16_cast_fp16")]; |
|
write_state(data = v_cache2_internal_tensor_assign_16_cast_fp16, input = v_cache2)[name = string("coreml_update_state_83_write_state")]; |
|
tensor<fp16, [24, 1, 1500, 1024]> coreml_update_state_83 = read_state(input = v_cache2)[name = string("coreml_update_state_83")]; |
|
tensor<fp16, [1024, 1024]> var_659_to_fp16 = const()[name = string("op_659_to_fp16"), val = tensor<fp16, [1024, 1024]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(89167040)))]; |
|
tensor<fp16, [1, ?, 1024]> linear_32_cast_fp16 = linear(bias = linear_0_bias_0_to_fp16, weight = var_659_to_fp16, x = audio_data)[name = string("linear_32_cast_fp16")]; |
|
tensor<fp16, [1024, 1024]> var_663_to_fp16 = const()[name = string("op_663_to_fp16"), val = tensor<fp16, [1024, 1024]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(91264256)))]; |
|
tensor<fp16, [1024]> var_664_to_fp16 = const()[name = string("op_664_to_fp16"), val = tensor<fp16, [1024]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(93361472)))]; |
|
tensor<fp16, [1, ?, 1024]> linear_33_cast_fp16 = linear(bias = var_664_to_fp16, weight = var_663_to_fp16, x = audio_data)[name = string("linear_33_cast_fp16")]; |
|
tensor<int32, [3]> var_666_shape_cast_fp16 = shape(x = linear_32_cast_fp16)[name = string("op_666_shape_cast_fp16")]; |
|
int32 gather_32_axis_0 = const()[name = string("gather_32_axis_0"), val = int32(0)]; |
|
int32 gather_32_batch_dims_0 = const()[name = string("gather_32_batch_dims_0"), val = int32(0)]; |
|
bool gather_32_validate_indices_0 = const()[name = string("gather_32_validate_indices_0"), val = bool(false)]; |
|
string var_666_shape_cast_fp16_to_uint16_dtype_0 = const()[name = string("op_666_shape_cast_fp16_to_uint16_dtype_0"), val = string("uint16")]; |
|
uint16 select_32_to_uint16 = const()[name = string("select_32_to_uint16"), val = uint16(1)]; |
|
tensor<uint16, [3]> var_666_shape_cast_fp16_to_uint16 = cast(dtype = var_666_shape_cast_fp16_to_uint16_dtype_0, x = var_666_shape_cast_fp16)[name = string("cast_87")]; |
|
uint16 gather_32_cast_uint16 = gather(axis = gather_32_axis_0, batch_dims = gather_32_batch_dims_0, indices = select_32_to_uint16, validate_indices = gather_32_validate_indices_0, x = var_666_shape_cast_fp16_to_uint16)[name = string("gather_32_cast_uint16")]; |
|
string gather_32_cast_uint16_to_int32_dtype_0 = const()[name = string("gather_32_cast_uint16_to_int32_dtype_0"), val = string("int32")]; |
|
tensor<int32, [1]> expand_dims_139_axes_0 = const()[name = string("expand_dims_139_axes_0"), val = tensor<int32, [1]>([0])]; |
|
int32 gather_32_cast_uint16_to_int32 = cast(dtype = gather_32_cast_uint16_to_int32_dtype_0, x = gather_32_cast_uint16)[name = string("cast_86")]; |
|
tensor<int32, [1]> expand_dims_139 = expand_dims(axes = expand_dims_139_axes_0, x = gather_32_cast_uint16_to_int32)[name = string("expand_dims_139")]; |
|
tensor<int32, [4]> concat_101 = const()[name = string("concat_101"), val = tensor<int32, [4]>([16, 0, 0, 0])]; |
|
tensor<int32, [1]> concat_102_values0_0 = const()[name = string("concat_102_values0_0"), val = tensor<int32, [1]>([0])]; |
|
tensor<int32, [1]> concat_102_values1_0 = const()[name = string("concat_102_values1_0"), val = tensor<int32, [1]>([0])]; |
|
tensor<int32, [1]> concat_102_values3_0 = const()[name = string("concat_102_values3_0"), val = tensor<int32, [1]>([0])]; |
|
int32 concat_102_axis_0 = const()[name = string("concat_102_axis_0"), val = int32(0)]; |
|
bool concat_102_interleave_0 = const()[name = string("concat_102_interleave_0"), val = bool(false)]; |
|
tensor<int32, [4]> concat_102 = concat(axis = concat_102_axis_0, interleave = concat_102_interleave_0, values = (concat_102_values0_0, concat_102_values1_0, expand_dims_139, concat_102_values3_0))[name = string("concat_102")]; |
|
tensor<int32, [4]> k_cache2_internal_tensor_assign_17_stride_0 = const()[name = string("k_cache2_internal_tensor_assign_17_stride_0"), val = tensor<int32, [4]>([1, 1, 1, 1])]; |
|
tensor<bool, [4]> k_cache2_internal_tensor_assign_17_begin_mask_0 = const()[name = string("k_cache2_internal_tensor_assign_17_begin_mask_0"), val = tensor<bool, [4]>([false, false, false, false])]; |
|
tensor<bool, [4]> k_cache2_internal_tensor_assign_17_end_mask_0 = const()[name = string("k_cache2_internal_tensor_assign_17_end_mask_0"), val = tensor<bool, [4]>([false, true, false, true])]; |
|
tensor<bool, [4]> k_cache2_internal_tensor_assign_17_squeeze_mask_0 = const()[name = string("k_cache2_internal_tensor_assign_17_squeeze_mask_0"), val = tensor<bool, [4]>([true, false, false, false])]; |
|
tensor<fp16, [24, 1, 1500, 1024]> k_cache2_internal_tensor_assign_17_cast_fp16 = slice_update(begin = concat_101, begin_mask = k_cache2_internal_tensor_assign_17_begin_mask_0, end = concat_102, end_mask = k_cache2_internal_tensor_assign_17_end_mask_0, squeeze_mask = k_cache2_internal_tensor_assign_17_squeeze_mask_0, stride = k_cache2_internal_tensor_assign_17_stride_0, update = linear_32_cast_fp16, x = coreml_update_state_82)[name = string("k_cache2_internal_tensor_assign_17_cast_fp16")]; |
|
write_state(data = k_cache2_internal_tensor_assign_17_cast_fp16, input = k_cache2)[name = string("coreml_update_state_84_write_state")]; |
|
tensor<fp16, [24, 1, 1500, 1024]> coreml_update_state_84 = read_state(input = k_cache2)[name = string("coreml_update_state_84")]; |
|
tensor<int32, [3]> var_671_shape_cast_fp16 = shape(x = linear_33_cast_fp16)[name = string("op_671_shape_cast_fp16")]; |
|
int32 gather_33_axis_0 = const()[name = string("gather_33_axis_0"), val = int32(0)]; |
|
int32 gather_33_batch_dims_0 = const()[name = string("gather_33_batch_dims_0"), val = int32(0)]; |
|
bool gather_33_validate_indices_0 = const()[name = string("gather_33_validate_indices_0"), val = bool(false)]; |
|
string var_671_shape_cast_fp16_to_uint16_dtype_0 = const()[name = string("op_671_shape_cast_fp16_to_uint16_dtype_0"), val = string("uint16")]; |
|
uint16 select_33_to_uint16 = const()[name = string("select_33_to_uint16"), val = uint16(1)]; |
|
tensor<uint16, [3]> var_671_shape_cast_fp16_to_uint16 = cast(dtype = var_671_shape_cast_fp16_to_uint16_dtype_0, x = var_671_shape_cast_fp16)[name = string("cast_85")]; |
|
uint16 gather_33_cast_uint16 = gather(axis = gather_33_axis_0, batch_dims = gather_33_batch_dims_0, indices = select_33_to_uint16, validate_indices = gather_33_validate_indices_0, x = var_671_shape_cast_fp16_to_uint16)[name = string("gather_33_cast_uint16")]; |
|
string gather_33_cast_uint16_to_int32_dtype_0 = const()[name = string("gather_33_cast_uint16_to_int32_dtype_0"), val = string("int32")]; |
|
tensor<int32, [1]> expand_dims_143_axes_0 = const()[name = string("expand_dims_143_axes_0"), val = tensor<int32, [1]>([0])]; |
|
int32 gather_33_cast_uint16_to_int32 = cast(dtype = gather_33_cast_uint16_to_int32_dtype_0, x = gather_33_cast_uint16)[name = string("cast_84")]; |
|
tensor<int32, [1]> expand_dims_143 = expand_dims(axes = expand_dims_143_axes_0, x = gather_33_cast_uint16_to_int32)[name = string("expand_dims_143")]; |
|
tensor<int32, [4]> concat_104 = const()[name = string("concat_104"), val = tensor<int32, [4]>([16, 0, 0, 0])]; |
|
tensor<int32, [1]> concat_105_values0_0 = const()[name = string("concat_105_values0_0"), val = tensor<int32, [1]>([0])]; |
|
tensor<int32, [1]> concat_105_values1_0 = const()[name = string("concat_105_values1_0"), val = tensor<int32, [1]>([0])]; |
|
tensor<int32, [1]> concat_105_values3_0 = const()[name = string("concat_105_values3_0"), val = tensor<int32, [1]>([0])]; |
|
int32 concat_105_axis_0 = const()[name = string("concat_105_axis_0"), val = int32(0)]; |
|
bool concat_105_interleave_0 = const()[name = string("concat_105_interleave_0"), val = bool(false)]; |
|
tensor<int32, [4]> concat_105 = concat(axis = concat_105_axis_0, interleave = concat_105_interleave_0, values = (concat_105_values0_0, concat_105_values1_0, expand_dims_143, concat_105_values3_0))[name = string("concat_105")]; |
|
tensor<int32, [4]> v_cache2_internal_tensor_assign_17_stride_0 = const()[name = string("v_cache2_internal_tensor_assign_17_stride_0"), val = tensor<int32, [4]>([1, 1, 1, 1])]; |
|
tensor<bool, [4]> v_cache2_internal_tensor_assign_17_begin_mask_0 = const()[name = string("v_cache2_internal_tensor_assign_17_begin_mask_0"), val = tensor<bool, [4]>([false, false, false, false])]; |
|
tensor<bool, [4]> v_cache2_internal_tensor_assign_17_end_mask_0 = const()[name = string("v_cache2_internal_tensor_assign_17_end_mask_0"), val = tensor<bool, [4]>([false, true, false, true])]; |
|
tensor<bool, [4]> v_cache2_internal_tensor_assign_17_squeeze_mask_0 = const()[name = string("v_cache2_internal_tensor_assign_17_squeeze_mask_0"), val = tensor<bool, [4]>([true, false, false, false])]; |
|
tensor<fp16, [24, 1, 1500, 1024]> v_cache2_internal_tensor_assign_17_cast_fp16 = slice_update(begin = concat_104, begin_mask = v_cache2_internal_tensor_assign_17_begin_mask_0, end = concat_105, end_mask = v_cache2_internal_tensor_assign_17_end_mask_0, squeeze_mask = v_cache2_internal_tensor_assign_17_squeeze_mask_0, stride = v_cache2_internal_tensor_assign_17_stride_0, update = linear_33_cast_fp16, x = coreml_update_state_83)[name = string("v_cache2_internal_tensor_assign_17_cast_fp16")]; |
|
write_state(data = v_cache2_internal_tensor_assign_17_cast_fp16, input = v_cache2)[name = string("coreml_update_state_85_write_state")]; |
|
tensor<fp16, [24, 1, 1500, 1024]> coreml_update_state_85 = read_state(input = v_cache2)[name = string("coreml_update_state_85")]; |
|
tensor<fp16, [1024, 1024]> var_693_to_fp16 = const()[name = string("op_693_to_fp16"), val = tensor<fp16, [1024, 1024]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(93363584)))]; |
|
tensor<fp16, [1, ?, 1024]> linear_34_cast_fp16 = linear(bias = linear_0_bias_0_to_fp16, weight = var_693_to_fp16, x = audio_data)[name = string("linear_34_cast_fp16")]; |
|
tensor<fp16, [1024, 1024]> var_697_to_fp16 = const()[name = string("op_697_to_fp16"), val = tensor<fp16, [1024, 1024]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(95460800)))]; |
|
tensor<fp16, [1024]> var_698_to_fp16 = const()[name = string("op_698_to_fp16"), val = tensor<fp16, [1024]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(97558016)))]; |
|
tensor<fp16, [1, ?, 1024]> linear_35_cast_fp16 = linear(bias = var_698_to_fp16, weight = var_697_to_fp16, x = audio_data)[name = string("linear_35_cast_fp16")]; |
|
tensor<int32, [3]> var_700_shape_cast_fp16 = shape(x = linear_34_cast_fp16)[name = string("op_700_shape_cast_fp16")]; |
|
int32 gather_34_axis_0 = const()[name = string("gather_34_axis_0"), val = int32(0)]; |
|
int32 gather_34_batch_dims_0 = const()[name = string("gather_34_batch_dims_0"), val = int32(0)]; |
|
bool gather_34_validate_indices_0 = const()[name = string("gather_34_validate_indices_0"), val = bool(false)]; |
|
string var_700_shape_cast_fp16_to_uint16_dtype_0 = const()[name = string("op_700_shape_cast_fp16_to_uint16_dtype_0"), val = string("uint16")]; |
|
uint16 select_34_to_uint16 = const()[name = string("select_34_to_uint16"), val = uint16(1)]; |
|
tensor<uint16, [3]> var_700_shape_cast_fp16_to_uint16 = cast(dtype = var_700_shape_cast_fp16_to_uint16_dtype_0, x = var_700_shape_cast_fp16)[name = string("cast_83")]; |
|
uint16 gather_34_cast_uint16 = gather(axis = gather_34_axis_0, batch_dims = gather_34_batch_dims_0, indices = select_34_to_uint16, validate_indices = gather_34_validate_indices_0, x = var_700_shape_cast_fp16_to_uint16)[name = string("gather_34_cast_uint16")]; |
|
string gather_34_cast_uint16_to_int32_dtype_0 = const()[name = string("gather_34_cast_uint16_to_int32_dtype_0"), val = string("int32")]; |
|
tensor<int32, [1]> expand_dims_147_axes_0 = const()[name = string("expand_dims_147_axes_0"), val = tensor<int32, [1]>([0])]; |
|
int32 gather_34_cast_uint16_to_int32 = cast(dtype = gather_34_cast_uint16_to_int32_dtype_0, x = gather_34_cast_uint16)[name = string("cast_82")]; |
|
tensor<int32, [1]> expand_dims_147 = expand_dims(axes = expand_dims_147_axes_0, x = gather_34_cast_uint16_to_int32)[name = string("expand_dims_147")]; |
|
tensor<int32, [4]> concat_107 = const()[name = string("concat_107"), val = tensor<int32, [4]>([17, 0, 0, 0])]; |
|
tensor<int32, [1]> concat_108_values0_0 = const()[name = string("concat_108_values0_0"), val = tensor<int32, [1]>([0])]; |
|
tensor<int32, [1]> concat_108_values1_0 = const()[name = string("concat_108_values1_0"), val = tensor<int32, [1]>([0])]; |
|
tensor<int32, [1]> concat_108_values3_0 = const()[name = string("concat_108_values3_0"), val = tensor<int32, [1]>([0])]; |
|
int32 concat_108_axis_0 = const()[name = string("concat_108_axis_0"), val = int32(0)]; |
|
bool concat_108_interleave_0 = const()[name = string("concat_108_interleave_0"), val = bool(false)]; |
|
tensor<int32, [4]> concat_108 = concat(axis = concat_108_axis_0, interleave = concat_108_interleave_0, values = (concat_108_values0_0, concat_108_values1_0, expand_dims_147, concat_108_values3_0))[name = string("concat_108")]; |
|
tensor<int32, [4]> k_cache2_internal_tensor_assign_18_stride_0 = const()[name = string("k_cache2_internal_tensor_assign_18_stride_0"), val = tensor<int32, [4]>([1, 1, 1, 1])]; |
|
tensor<bool, [4]> k_cache2_internal_tensor_assign_18_begin_mask_0 = const()[name = string("k_cache2_internal_tensor_assign_18_begin_mask_0"), val = tensor<bool, [4]>([false, false, false, false])]; |
|
tensor<bool, [4]> k_cache2_internal_tensor_assign_18_end_mask_0 = const()[name = string("k_cache2_internal_tensor_assign_18_end_mask_0"), val = tensor<bool, [4]>([false, true, false, true])]; |
|
tensor<bool, [4]> k_cache2_internal_tensor_assign_18_squeeze_mask_0 = const()[name = string("k_cache2_internal_tensor_assign_18_squeeze_mask_0"), val = tensor<bool, [4]>([true, false, false, false])]; |
|
tensor<fp16, [24, 1, 1500, 1024]> k_cache2_internal_tensor_assign_18_cast_fp16 = slice_update(begin = concat_107, begin_mask = k_cache2_internal_tensor_assign_18_begin_mask_0, end = concat_108, end_mask = k_cache2_internal_tensor_assign_18_end_mask_0, squeeze_mask = k_cache2_internal_tensor_assign_18_squeeze_mask_0, stride = k_cache2_internal_tensor_assign_18_stride_0, update = linear_34_cast_fp16, x = coreml_update_state_84)[name = string("k_cache2_internal_tensor_assign_18_cast_fp16")]; |
|
write_state(data = k_cache2_internal_tensor_assign_18_cast_fp16, input = k_cache2)[name = string("coreml_update_state_86_write_state")]; |
|
tensor<fp16, [24, 1, 1500, 1024]> coreml_update_state_86 = read_state(input = k_cache2)[name = string("coreml_update_state_86")]; |
|
tensor<int32, [3]> var_705_shape_cast_fp16 = shape(x = linear_35_cast_fp16)[name = string("op_705_shape_cast_fp16")]; |
|
int32 gather_35_axis_0 = const()[name = string("gather_35_axis_0"), val = int32(0)]; |
|
int32 gather_35_batch_dims_0 = const()[name = string("gather_35_batch_dims_0"), val = int32(0)]; |
|
bool gather_35_validate_indices_0 = const()[name = string("gather_35_validate_indices_0"), val = bool(false)]; |
|
string var_705_shape_cast_fp16_to_uint16_dtype_0 = const()[name = string("op_705_shape_cast_fp16_to_uint16_dtype_0"), val = string("uint16")]; |
|
uint16 select_35_to_uint16 = const()[name = string("select_35_to_uint16"), val = uint16(1)]; |
|
tensor<uint16, [3]> var_705_shape_cast_fp16_to_uint16 = cast(dtype = var_705_shape_cast_fp16_to_uint16_dtype_0, x = var_705_shape_cast_fp16)[name = string("cast_81")]; |
|
uint16 gather_35_cast_uint16 = gather(axis = gather_35_axis_0, batch_dims = gather_35_batch_dims_0, indices = select_35_to_uint16, validate_indices = gather_35_validate_indices_0, x = var_705_shape_cast_fp16_to_uint16)[name = string("gather_35_cast_uint16")]; |
|
string gather_35_cast_uint16_to_int32_dtype_0 = const()[name = string("gather_35_cast_uint16_to_int32_dtype_0"), val = string("int32")]; |
|
tensor<int32, [1]> expand_dims_151_axes_0 = const()[name = string("expand_dims_151_axes_0"), val = tensor<int32, [1]>([0])]; |
|
int32 gather_35_cast_uint16_to_int32 = cast(dtype = gather_35_cast_uint16_to_int32_dtype_0, x = gather_35_cast_uint16)[name = string("cast_80")]; |
|
tensor<int32, [1]> expand_dims_151 = expand_dims(axes = expand_dims_151_axes_0, x = gather_35_cast_uint16_to_int32)[name = string("expand_dims_151")]; |
|
tensor<int32, [4]> concat_110 = const()[name = string("concat_110"), val = tensor<int32, [4]>([17, 0, 0, 0])]; |
|
tensor<int32, [1]> concat_111_values0_0 = const()[name = string("concat_111_values0_0"), val = tensor<int32, [1]>([0])]; |
|
tensor<int32, [1]> concat_111_values1_0 = const()[name = string("concat_111_values1_0"), val = tensor<int32, [1]>([0])]; |
|
tensor<int32, [1]> concat_111_values3_0 = const()[name = string("concat_111_values3_0"), val = tensor<int32, [1]>([0])]; |
|
int32 concat_111_axis_0 = const()[name = string("concat_111_axis_0"), val = int32(0)]; |
|
bool concat_111_interleave_0 = const()[name = string("concat_111_interleave_0"), val = bool(false)]; |
|
tensor<int32, [4]> concat_111 = concat(axis = concat_111_axis_0, interleave = concat_111_interleave_0, values = (concat_111_values0_0, concat_111_values1_0, expand_dims_151, concat_111_values3_0))[name = string("concat_111")]; |
|
tensor<int32, [4]> v_cache2_internal_tensor_assign_18_stride_0 = const()[name = string("v_cache2_internal_tensor_assign_18_stride_0"), val = tensor<int32, [4]>([1, 1, 1, 1])]; |
|
tensor<bool, [4]> v_cache2_internal_tensor_assign_18_begin_mask_0 = const()[name = string("v_cache2_internal_tensor_assign_18_begin_mask_0"), val = tensor<bool, [4]>([false, false, false, false])]; |
|
tensor<bool, [4]> v_cache2_internal_tensor_assign_18_end_mask_0 = const()[name = string("v_cache2_internal_tensor_assign_18_end_mask_0"), val = tensor<bool, [4]>([false, true, false, true])]; |
|
tensor<bool, [4]> v_cache2_internal_tensor_assign_18_squeeze_mask_0 = const()[name = string("v_cache2_internal_tensor_assign_18_squeeze_mask_0"), val = tensor<bool, [4]>([true, false, false, false])]; |
|
tensor<fp16, [24, 1, 1500, 1024]> v_cache2_internal_tensor_assign_18_cast_fp16 = slice_update(begin = concat_110, begin_mask = v_cache2_internal_tensor_assign_18_begin_mask_0, end = concat_111, end_mask = v_cache2_internal_tensor_assign_18_end_mask_0, squeeze_mask = v_cache2_internal_tensor_assign_18_squeeze_mask_0, stride = v_cache2_internal_tensor_assign_18_stride_0, update = linear_35_cast_fp16, x = coreml_update_state_85)[name = string("v_cache2_internal_tensor_assign_18_cast_fp16")]; |
|
write_state(data = v_cache2_internal_tensor_assign_18_cast_fp16, input = v_cache2)[name = string("coreml_update_state_87_write_state")]; |
|
tensor<fp16, [24, 1, 1500, 1024]> coreml_update_state_87 = read_state(input = v_cache2)[name = string("coreml_update_state_87")]; |
|
tensor<fp16, [1024, 1024]> var_727_to_fp16 = const()[name = string("op_727_to_fp16"), val = tensor<fp16, [1024, 1024]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(97560128)))]; |
|
tensor<fp16, [1, ?, 1024]> linear_36_cast_fp16 = linear(bias = linear_0_bias_0_to_fp16, weight = var_727_to_fp16, x = audio_data)[name = string("linear_36_cast_fp16")]; |
|
tensor<fp16, [1024, 1024]> var_731_to_fp16 = const()[name = string("op_731_to_fp16"), val = tensor<fp16, [1024, 1024]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(99657344)))]; |
|
tensor<fp16, [1024]> var_732_to_fp16 = const()[name = string("op_732_to_fp16"), val = tensor<fp16, [1024]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(101754560)))]; |
|
tensor<fp16, [1, ?, 1024]> linear_37_cast_fp16 = linear(bias = var_732_to_fp16, weight = var_731_to_fp16, x = audio_data)[name = string("linear_37_cast_fp16")]; |
|
tensor<int32, [3]> var_734_shape_cast_fp16 = shape(x = linear_36_cast_fp16)[name = string("op_734_shape_cast_fp16")]; |
|
int32 gather_36_axis_0 = const()[name = string("gather_36_axis_0"), val = int32(0)]; |
|
int32 gather_36_batch_dims_0 = const()[name = string("gather_36_batch_dims_0"), val = int32(0)]; |
|
bool gather_36_validate_indices_0 = const()[name = string("gather_36_validate_indices_0"), val = bool(false)]; |
|
string var_734_shape_cast_fp16_to_uint16_dtype_0 = const()[name = string("op_734_shape_cast_fp16_to_uint16_dtype_0"), val = string("uint16")]; |
|
uint16 select_36_to_uint16 = const()[name = string("select_36_to_uint16"), val = uint16(1)]; |
|
tensor<uint16, [3]> var_734_shape_cast_fp16_to_uint16 = cast(dtype = var_734_shape_cast_fp16_to_uint16_dtype_0, x = var_734_shape_cast_fp16)[name = string("cast_79")]; |
|
uint16 gather_36_cast_uint16 = gather(axis = gather_36_axis_0, batch_dims = gather_36_batch_dims_0, indices = select_36_to_uint16, validate_indices = gather_36_validate_indices_0, x = var_734_shape_cast_fp16_to_uint16)[name = string("gather_36_cast_uint16")]; |
|
string gather_36_cast_uint16_to_int32_dtype_0 = const()[name = string("gather_36_cast_uint16_to_int32_dtype_0"), val = string("int32")]; |
|
tensor<int32, [1]> expand_dims_155_axes_0 = const()[name = string("expand_dims_155_axes_0"), val = tensor<int32, [1]>([0])]; |
|
int32 gather_36_cast_uint16_to_int32 = cast(dtype = gather_36_cast_uint16_to_int32_dtype_0, x = gather_36_cast_uint16)[name = string("cast_78")]; |
|
tensor<int32, [1]> expand_dims_155 = expand_dims(axes = expand_dims_155_axes_0, x = gather_36_cast_uint16_to_int32)[name = string("expand_dims_155")]; |
|
tensor<int32, [4]> concat_113 = const()[name = string("concat_113"), val = tensor<int32, [4]>([18, 0, 0, 0])]; |
|
tensor<int32, [1]> concat_114_values0_0 = const()[name = string("concat_114_values0_0"), val = tensor<int32, [1]>([0])]; |
|
tensor<int32, [1]> concat_114_values1_0 = const()[name = string("concat_114_values1_0"), val = tensor<int32, [1]>([0])]; |
|
tensor<int32, [1]> concat_114_values3_0 = const()[name = string("concat_114_values3_0"), val = tensor<int32, [1]>([0])]; |
|
int32 concat_114_axis_0 = const()[name = string("concat_114_axis_0"), val = int32(0)]; |
|
bool concat_114_interleave_0 = const()[name = string("concat_114_interleave_0"), val = bool(false)]; |
|
tensor<int32, [4]> concat_114 = concat(axis = concat_114_axis_0, interleave = concat_114_interleave_0, values = (concat_114_values0_0, concat_114_values1_0, expand_dims_155, concat_114_values3_0))[name = string("concat_114")]; |
|
tensor<int32, [4]> k_cache2_internal_tensor_assign_19_stride_0 = const()[name = string("k_cache2_internal_tensor_assign_19_stride_0"), val = tensor<int32, [4]>([1, 1, 1, 1])]; |
|
tensor<bool, [4]> k_cache2_internal_tensor_assign_19_begin_mask_0 = const()[name = string("k_cache2_internal_tensor_assign_19_begin_mask_0"), val = tensor<bool, [4]>([false, false, false, false])]; |
|
tensor<bool, [4]> k_cache2_internal_tensor_assign_19_end_mask_0 = const()[name = string("k_cache2_internal_tensor_assign_19_end_mask_0"), val = tensor<bool, [4]>([false, true, false, true])]; |
|
tensor<bool, [4]> k_cache2_internal_tensor_assign_19_squeeze_mask_0 = const()[name = string("k_cache2_internal_tensor_assign_19_squeeze_mask_0"), val = tensor<bool, [4]>([true, false, false, false])]; |
|
tensor<fp16, [24, 1, 1500, 1024]> k_cache2_internal_tensor_assign_19_cast_fp16 = slice_update(begin = concat_113, begin_mask = k_cache2_internal_tensor_assign_19_begin_mask_0, end = concat_114, end_mask = k_cache2_internal_tensor_assign_19_end_mask_0, squeeze_mask = k_cache2_internal_tensor_assign_19_squeeze_mask_0, stride = k_cache2_internal_tensor_assign_19_stride_0, update = linear_36_cast_fp16, x = coreml_update_state_86)[name = string("k_cache2_internal_tensor_assign_19_cast_fp16")]; |
|
write_state(data = k_cache2_internal_tensor_assign_19_cast_fp16, input = k_cache2)[name = string("coreml_update_state_88_write_state")]; |
|
tensor<fp16, [24, 1, 1500, 1024]> coreml_update_state_88 = read_state(input = k_cache2)[name = string("coreml_update_state_88")]; |
|
tensor<int32, [3]> var_739_shape_cast_fp16 = shape(x = linear_37_cast_fp16)[name = string("op_739_shape_cast_fp16")]; |
|
int32 gather_37_axis_0 = const()[name = string("gather_37_axis_0"), val = int32(0)]; |
|
int32 gather_37_batch_dims_0 = const()[name = string("gather_37_batch_dims_0"), val = int32(0)]; |
|
bool gather_37_validate_indices_0 = const()[name = string("gather_37_validate_indices_0"), val = bool(false)]; |
|
string var_739_shape_cast_fp16_to_uint16_dtype_0 = const()[name = string("op_739_shape_cast_fp16_to_uint16_dtype_0"), val = string("uint16")]; |
|
uint16 select_37_to_uint16 = const()[name = string("select_37_to_uint16"), val = uint16(1)]; |
|
tensor<uint16, [3]> var_739_shape_cast_fp16_to_uint16 = cast(dtype = var_739_shape_cast_fp16_to_uint16_dtype_0, x = var_739_shape_cast_fp16)[name = string("cast_77")]; |
|
uint16 gather_37_cast_uint16 = gather(axis = gather_37_axis_0, batch_dims = gather_37_batch_dims_0, indices = select_37_to_uint16, validate_indices = gather_37_validate_indices_0, x = var_739_shape_cast_fp16_to_uint16)[name = string("gather_37_cast_uint16")]; |
|
string gather_37_cast_uint16_to_int32_dtype_0 = const()[name = string("gather_37_cast_uint16_to_int32_dtype_0"), val = string("int32")]; |
|
tensor<int32, [1]> expand_dims_159_axes_0 = const()[name = string("expand_dims_159_axes_0"), val = tensor<int32, [1]>([0])]; |
|
int32 gather_37_cast_uint16_to_int32 = cast(dtype = gather_37_cast_uint16_to_int32_dtype_0, x = gather_37_cast_uint16)[name = string("cast_76")]; |
|
tensor<int32, [1]> expand_dims_159 = expand_dims(axes = expand_dims_159_axes_0, x = gather_37_cast_uint16_to_int32)[name = string("expand_dims_159")]; |
|
tensor<int32, [4]> concat_116 = const()[name = string("concat_116"), val = tensor<int32, [4]>([18, 0, 0, 0])]; |
|
tensor<int32, [1]> concat_117_values0_0 = const()[name = string("concat_117_values0_0"), val = tensor<int32, [1]>([0])]; |
|
tensor<int32, [1]> concat_117_values1_0 = const()[name = string("concat_117_values1_0"), val = tensor<int32, [1]>([0])]; |
|
tensor<int32, [1]> concat_117_values3_0 = const()[name = string("concat_117_values3_0"), val = tensor<int32, [1]>([0])]; |
|
int32 concat_117_axis_0 = const()[name = string("concat_117_axis_0"), val = int32(0)]; |
|
bool concat_117_interleave_0 = const()[name = string("concat_117_interleave_0"), val = bool(false)]; |
|
tensor<int32, [4]> concat_117 = concat(axis = concat_117_axis_0, interleave = concat_117_interleave_0, values = (concat_117_values0_0, concat_117_values1_0, expand_dims_159, concat_117_values3_0))[name = string("concat_117")]; |
|
tensor<int32, [4]> v_cache2_internal_tensor_assign_19_stride_0 = const()[name = string("v_cache2_internal_tensor_assign_19_stride_0"), val = tensor<int32, [4]>([1, 1, 1, 1])]; |
|
tensor<bool, [4]> v_cache2_internal_tensor_assign_19_begin_mask_0 = const()[name = string("v_cache2_internal_tensor_assign_19_begin_mask_0"), val = tensor<bool, [4]>([false, false, false, false])]; |
|
tensor<bool, [4]> v_cache2_internal_tensor_assign_19_end_mask_0 = const()[name = string("v_cache2_internal_tensor_assign_19_end_mask_0"), val = tensor<bool, [4]>([false, true, false, true])]; |
|
tensor<bool, [4]> v_cache2_internal_tensor_assign_19_squeeze_mask_0 = const()[name = string("v_cache2_internal_tensor_assign_19_squeeze_mask_0"), val = tensor<bool, [4]>([true, false, false, false])]; |
|
tensor<fp16, [24, 1, 1500, 1024]> v_cache2_internal_tensor_assign_19_cast_fp16 = slice_update(begin = concat_116, begin_mask = v_cache2_internal_tensor_assign_19_begin_mask_0, end = concat_117, end_mask = v_cache2_internal_tensor_assign_19_end_mask_0, squeeze_mask = v_cache2_internal_tensor_assign_19_squeeze_mask_0, stride = v_cache2_internal_tensor_assign_19_stride_0, update = linear_37_cast_fp16, x = coreml_update_state_87)[name = string("v_cache2_internal_tensor_assign_19_cast_fp16")]; |
|
write_state(data = v_cache2_internal_tensor_assign_19_cast_fp16, input = v_cache2)[name = string("coreml_update_state_89_write_state")]; |
|
tensor<fp16, [24, 1, 1500, 1024]> coreml_update_state_89 = read_state(input = v_cache2)[name = string("coreml_update_state_89")]; |
|
tensor<fp16, [1024, 1024]> var_761_to_fp16 = const()[name = string("op_761_to_fp16"), val = tensor<fp16, [1024, 1024]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(101756672)))]; |
|
tensor<fp16, [1, ?, 1024]> linear_38_cast_fp16 = linear(bias = linear_0_bias_0_to_fp16, weight = var_761_to_fp16, x = audio_data)[name = string("linear_38_cast_fp16")]; |
|
tensor<fp16, [1024, 1024]> var_765_to_fp16 = const()[name = string("op_765_to_fp16"), val = tensor<fp16, [1024, 1024]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(103853888)))]; |
|
tensor<fp16, [1024]> var_766_to_fp16 = const()[name = string("op_766_to_fp16"), val = tensor<fp16, [1024]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(105951104)))]; |
|
tensor<fp16, [1, ?, 1024]> linear_39_cast_fp16 = linear(bias = var_766_to_fp16, weight = var_765_to_fp16, x = audio_data)[name = string("linear_39_cast_fp16")]; |
|
tensor<int32, [3]> var_768_shape_cast_fp16 = shape(x = linear_38_cast_fp16)[name = string("op_768_shape_cast_fp16")]; |
|
int32 gather_38_axis_0 = const()[name = string("gather_38_axis_0"), val = int32(0)]; |
|
int32 gather_38_batch_dims_0 = const()[name = string("gather_38_batch_dims_0"), val = int32(0)]; |
|
bool gather_38_validate_indices_0 = const()[name = string("gather_38_validate_indices_0"), val = bool(false)]; |
|
string var_768_shape_cast_fp16_to_uint16_dtype_0 = const()[name = string("op_768_shape_cast_fp16_to_uint16_dtype_0"), val = string("uint16")]; |
|
uint16 select_38_to_uint16 = const()[name = string("select_38_to_uint16"), val = uint16(1)]; |
|
tensor<uint16, [3]> var_768_shape_cast_fp16_to_uint16 = cast(dtype = var_768_shape_cast_fp16_to_uint16_dtype_0, x = var_768_shape_cast_fp16)[name = string("cast_75")]; |
|
uint16 gather_38_cast_uint16 = gather(axis = gather_38_axis_0, batch_dims = gather_38_batch_dims_0, indices = select_38_to_uint16, validate_indices = gather_38_validate_indices_0, x = var_768_shape_cast_fp16_to_uint16)[name = string("gather_38_cast_uint16")]; |
|
string gather_38_cast_uint16_to_int32_dtype_0 = const()[name = string("gather_38_cast_uint16_to_int32_dtype_0"), val = string("int32")]; |
|
tensor<int32, [1]> expand_dims_163_axes_0 = const()[name = string("expand_dims_163_axes_0"), val = tensor<int32, [1]>([0])]; |
|
int32 gather_38_cast_uint16_to_int32 = cast(dtype = gather_38_cast_uint16_to_int32_dtype_0, x = gather_38_cast_uint16)[name = string("cast_74")]; |
|
tensor<int32, [1]> expand_dims_163 = expand_dims(axes = expand_dims_163_axes_0, x = gather_38_cast_uint16_to_int32)[name = string("expand_dims_163")]; |
|
tensor<int32, [4]> concat_119 = const()[name = string("concat_119"), val = tensor<int32, [4]>([19, 0, 0, 0])]; |
|
tensor<int32, [1]> concat_120_values0_0 = const()[name = string("concat_120_values0_0"), val = tensor<int32, [1]>([0])]; |
|
tensor<int32, [1]> concat_120_values1_0 = const()[name = string("concat_120_values1_0"), val = tensor<int32, [1]>([0])]; |
|
tensor<int32, [1]> concat_120_values3_0 = const()[name = string("concat_120_values3_0"), val = tensor<int32, [1]>([0])]; |
|
int32 concat_120_axis_0 = const()[name = string("concat_120_axis_0"), val = int32(0)]; |
|
bool concat_120_interleave_0 = const()[name = string("concat_120_interleave_0"), val = bool(false)]; |
|
tensor<int32, [4]> concat_120 = concat(axis = concat_120_axis_0, interleave = concat_120_interleave_0, values = (concat_120_values0_0, concat_120_values1_0, expand_dims_163, concat_120_values3_0))[name = string("concat_120")]; |
|
tensor<int32, [4]> k_cache2_internal_tensor_assign_20_stride_0 = const()[name = string("k_cache2_internal_tensor_assign_20_stride_0"), val = tensor<int32, [4]>([1, 1, 1, 1])]; |
|
tensor<bool, [4]> k_cache2_internal_tensor_assign_20_begin_mask_0 = const()[name = string("k_cache2_internal_tensor_assign_20_begin_mask_0"), val = tensor<bool, [4]>([false, false, false, false])]; |
|
tensor<bool, [4]> k_cache2_internal_tensor_assign_20_end_mask_0 = const()[name = string("k_cache2_internal_tensor_assign_20_end_mask_0"), val = tensor<bool, [4]>([false, true, false, true])]; |
|
tensor<bool, [4]> k_cache2_internal_tensor_assign_20_squeeze_mask_0 = const()[name = string("k_cache2_internal_tensor_assign_20_squeeze_mask_0"), val = tensor<bool, [4]>([true, false, false, false])]; |
|
tensor<fp16, [24, 1, 1500, 1024]> k_cache2_internal_tensor_assign_20_cast_fp16 = slice_update(begin = concat_119, begin_mask = k_cache2_internal_tensor_assign_20_begin_mask_0, end = concat_120, end_mask = k_cache2_internal_tensor_assign_20_end_mask_0, squeeze_mask = k_cache2_internal_tensor_assign_20_squeeze_mask_0, stride = k_cache2_internal_tensor_assign_20_stride_0, update = linear_38_cast_fp16, x = coreml_update_state_88)[name = string("k_cache2_internal_tensor_assign_20_cast_fp16")]; |
|
write_state(data = k_cache2_internal_tensor_assign_20_cast_fp16, input = k_cache2)[name = string("coreml_update_state_90_write_state")]; |
|
tensor<fp16, [24, 1, 1500, 1024]> coreml_update_state_90 = read_state(input = k_cache2)[name = string("coreml_update_state_90")]; |
|
tensor<int32, [3]> var_773_shape_cast_fp16 = shape(x = linear_39_cast_fp16)[name = string("op_773_shape_cast_fp16")]; |
|
int32 gather_39_axis_0 = const()[name = string("gather_39_axis_0"), val = int32(0)]; |
|
int32 gather_39_batch_dims_0 = const()[name = string("gather_39_batch_dims_0"), val = int32(0)]; |
|
bool gather_39_validate_indices_0 = const()[name = string("gather_39_validate_indices_0"), val = bool(false)]; |
|
string var_773_shape_cast_fp16_to_uint16_dtype_0 = const()[name = string("op_773_shape_cast_fp16_to_uint16_dtype_0"), val = string("uint16")]; |
|
uint16 select_39_to_uint16 = const()[name = string("select_39_to_uint16"), val = uint16(1)]; |
|
tensor<uint16, [3]> var_773_shape_cast_fp16_to_uint16 = cast(dtype = var_773_shape_cast_fp16_to_uint16_dtype_0, x = var_773_shape_cast_fp16)[name = string("cast_73")]; |
|
uint16 gather_39_cast_uint16 = gather(axis = gather_39_axis_0, batch_dims = gather_39_batch_dims_0, indices = select_39_to_uint16, validate_indices = gather_39_validate_indices_0, x = var_773_shape_cast_fp16_to_uint16)[name = string("gather_39_cast_uint16")]; |
|
string gather_39_cast_uint16_to_int32_dtype_0 = const()[name = string("gather_39_cast_uint16_to_int32_dtype_0"), val = string("int32")]; |
|
tensor<int32, [1]> expand_dims_167_axes_0 = const()[name = string("expand_dims_167_axes_0"), val = tensor<int32, [1]>([0])]; |
|
int32 gather_39_cast_uint16_to_int32 = cast(dtype = gather_39_cast_uint16_to_int32_dtype_0, x = gather_39_cast_uint16)[name = string("cast_72")]; |
|
tensor<int32, [1]> expand_dims_167 = expand_dims(axes = expand_dims_167_axes_0, x = gather_39_cast_uint16_to_int32)[name = string("expand_dims_167")]; |
|
tensor<int32, [4]> concat_122 = const()[name = string("concat_122"), val = tensor<int32, [4]>([19, 0, 0, 0])]; |
|
tensor<int32, [1]> concat_123_values0_0 = const()[name = string("concat_123_values0_0"), val = tensor<int32, [1]>([0])]; |
|
tensor<int32, [1]> concat_123_values1_0 = const()[name = string("concat_123_values1_0"), val = tensor<int32, [1]>([0])]; |
|
tensor<int32, [1]> concat_123_values3_0 = const()[name = string("concat_123_values3_0"), val = tensor<int32, [1]>([0])]; |
|
int32 concat_123_axis_0 = const()[name = string("concat_123_axis_0"), val = int32(0)]; |
|
bool concat_123_interleave_0 = const()[name = string("concat_123_interleave_0"), val = bool(false)]; |
|
tensor<int32, [4]> concat_123 = concat(axis = concat_123_axis_0, interleave = concat_123_interleave_0, values = (concat_123_values0_0, concat_123_values1_0, expand_dims_167, concat_123_values3_0))[name = string("concat_123")]; |
|
tensor<int32, [4]> v_cache2_internal_tensor_assign_20_stride_0 = const()[name = string("v_cache2_internal_tensor_assign_20_stride_0"), val = tensor<int32, [4]>([1, 1, 1, 1])]; |
|
tensor<bool, [4]> v_cache2_internal_tensor_assign_20_begin_mask_0 = const()[name = string("v_cache2_internal_tensor_assign_20_begin_mask_0"), val = tensor<bool, [4]>([false, false, false, false])]; |
|
tensor<bool, [4]> v_cache2_internal_tensor_assign_20_end_mask_0 = const()[name = string("v_cache2_internal_tensor_assign_20_end_mask_0"), val = tensor<bool, [4]>([false, true, false, true])]; |
|
tensor<bool, [4]> v_cache2_internal_tensor_assign_20_squeeze_mask_0 = const()[name = string("v_cache2_internal_tensor_assign_20_squeeze_mask_0"), val = tensor<bool, [4]>([true, false, false, false])]; |
|
tensor<fp16, [24, 1, 1500, 1024]> v_cache2_internal_tensor_assign_20_cast_fp16 = slice_update(begin = concat_122, begin_mask = v_cache2_internal_tensor_assign_20_begin_mask_0, end = concat_123, end_mask = v_cache2_internal_tensor_assign_20_end_mask_0, squeeze_mask = v_cache2_internal_tensor_assign_20_squeeze_mask_0, stride = v_cache2_internal_tensor_assign_20_stride_0, update = linear_39_cast_fp16, x = coreml_update_state_89)[name = string("v_cache2_internal_tensor_assign_20_cast_fp16")]; |
|
write_state(data = v_cache2_internal_tensor_assign_20_cast_fp16, input = v_cache2)[name = string("coreml_update_state_91_write_state")]; |
|
tensor<fp16, [24, 1, 1500, 1024]> coreml_update_state_91 = read_state(input = v_cache2)[name = string("coreml_update_state_91")]; |
|
tensor<fp16, [1024, 1024]> var_795_to_fp16 = const()[name = string("op_795_to_fp16"), val = tensor<fp16, [1024, 1024]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(105953216)))]; |
|
tensor<fp16, [1, ?, 1024]> linear_40_cast_fp16 = linear(bias = linear_0_bias_0_to_fp16, weight = var_795_to_fp16, x = audio_data)[name = string("linear_40_cast_fp16")]; |
|
tensor<fp16, [1024, 1024]> var_799_to_fp16 = const()[name = string("op_799_to_fp16"), val = tensor<fp16, [1024, 1024]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(108050432)))]; |
|
tensor<fp16, [1024]> var_800_to_fp16 = const()[name = string("op_800_to_fp16"), val = tensor<fp16, [1024]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(110147648)))]; |
|
tensor<fp16, [1, ?, 1024]> linear_41_cast_fp16 = linear(bias = var_800_to_fp16, weight = var_799_to_fp16, x = audio_data)[name = string("linear_41_cast_fp16")]; |
|
tensor<int32, [3]> var_802_shape_cast_fp16 = shape(x = linear_40_cast_fp16)[name = string("op_802_shape_cast_fp16")]; |
|
int32 gather_40_axis_0 = const()[name = string("gather_40_axis_0"), val = int32(0)]; |
|
int32 gather_40_batch_dims_0 = const()[name = string("gather_40_batch_dims_0"), val = int32(0)]; |
|
bool gather_40_validate_indices_0 = const()[name = string("gather_40_validate_indices_0"), val = bool(false)]; |
|
string var_802_shape_cast_fp16_to_uint16_dtype_0 = const()[name = string("op_802_shape_cast_fp16_to_uint16_dtype_0"), val = string("uint16")]; |
|
uint16 select_40_to_uint16 = const()[name = string("select_40_to_uint16"), val = uint16(1)]; |
|
tensor<uint16, [3]> var_802_shape_cast_fp16_to_uint16 = cast(dtype = var_802_shape_cast_fp16_to_uint16_dtype_0, x = var_802_shape_cast_fp16)[name = string("cast_71")]; |
|
uint16 gather_40_cast_uint16 = gather(axis = gather_40_axis_0, batch_dims = gather_40_batch_dims_0, indices = select_40_to_uint16, validate_indices = gather_40_validate_indices_0, x = var_802_shape_cast_fp16_to_uint16)[name = string("gather_40_cast_uint16")]; |
|
string gather_40_cast_uint16_to_int32_dtype_0 = const()[name = string("gather_40_cast_uint16_to_int32_dtype_0"), val = string("int32")]; |
|
tensor<int32, [1]> expand_dims_171_axes_0 = const()[name = string("expand_dims_171_axes_0"), val = tensor<int32, [1]>([0])]; |
|
int32 gather_40_cast_uint16_to_int32 = cast(dtype = gather_40_cast_uint16_to_int32_dtype_0, x = gather_40_cast_uint16)[name = string("cast_70")]; |
|
tensor<int32, [1]> expand_dims_171 = expand_dims(axes = expand_dims_171_axes_0, x = gather_40_cast_uint16_to_int32)[name = string("expand_dims_171")]; |
|
tensor<int32, [4]> concat_125 = const()[name = string("concat_125"), val = tensor<int32, [4]>([20, 0, 0, 0])]; |
|
tensor<int32, [1]> concat_126_values0_0 = const()[name = string("concat_126_values0_0"), val = tensor<int32, [1]>([0])]; |
|
tensor<int32, [1]> concat_126_values1_0 = const()[name = string("concat_126_values1_0"), val = tensor<int32, [1]>([0])]; |
|
tensor<int32, [1]> concat_126_values3_0 = const()[name = string("concat_126_values3_0"), val = tensor<int32, [1]>([0])]; |
|
int32 concat_126_axis_0 = const()[name = string("concat_126_axis_0"), val = int32(0)]; |
|
bool concat_126_interleave_0 = const()[name = string("concat_126_interleave_0"), val = bool(false)]; |
|
tensor<int32, [4]> concat_126 = concat(axis = concat_126_axis_0, interleave = concat_126_interleave_0, values = (concat_126_values0_0, concat_126_values1_0, expand_dims_171, concat_126_values3_0))[name = string("concat_126")]; |
|
tensor<int32, [4]> k_cache2_internal_tensor_assign_21_stride_0 = const()[name = string("k_cache2_internal_tensor_assign_21_stride_0"), val = tensor<int32, [4]>([1, 1, 1, 1])]; |
|
tensor<bool, [4]> k_cache2_internal_tensor_assign_21_begin_mask_0 = const()[name = string("k_cache2_internal_tensor_assign_21_begin_mask_0"), val = tensor<bool, [4]>([false, false, false, false])]; |
|
tensor<bool, [4]> k_cache2_internal_tensor_assign_21_end_mask_0 = const()[name = string("k_cache2_internal_tensor_assign_21_end_mask_0"), val = tensor<bool, [4]>([false, true, false, true])]; |
|
tensor<bool, [4]> k_cache2_internal_tensor_assign_21_squeeze_mask_0 = const()[name = string("k_cache2_internal_tensor_assign_21_squeeze_mask_0"), val = tensor<bool, [4]>([true, false, false, false])]; |
|
tensor<fp16, [24, 1, 1500, 1024]> k_cache2_internal_tensor_assign_21_cast_fp16 = slice_update(begin = concat_125, begin_mask = k_cache2_internal_tensor_assign_21_begin_mask_0, end = concat_126, end_mask = k_cache2_internal_tensor_assign_21_end_mask_0, squeeze_mask = k_cache2_internal_tensor_assign_21_squeeze_mask_0, stride = k_cache2_internal_tensor_assign_21_stride_0, update = linear_40_cast_fp16, x = coreml_update_state_90)[name = string("k_cache2_internal_tensor_assign_21_cast_fp16")]; |
|
write_state(data = k_cache2_internal_tensor_assign_21_cast_fp16, input = k_cache2)[name = string("coreml_update_state_92_write_state")]; |
|
tensor<fp16, [24, 1, 1500, 1024]> coreml_update_state_92 = read_state(input = k_cache2)[name = string("coreml_update_state_92")]; |
|
tensor<int32, [3]> var_807_shape_cast_fp16 = shape(x = linear_41_cast_fp16)[name = string("op_807_shape_cast_fp16")]; |
|
int32 gather_41_axis_0 = const()[name = string("gather_41_axis_0"), val = int32(0)]; |
|
int32 gather_41_batch_dims_0 = const()[name = string("gather_41_batch_dims_0"), val = int32(0)]; |
|
bool gather_41_validate_indices_0 = const()[name = string("gather_41_validate_indices_0"), val = bool(false)]; |
|
string var_807_shape_cast_fp16_to_uint16_dtype_0 = const()[name = string("op_807_shape_cast_fp16_to_uint16_dtype_0"), val = string("uint16")]; |
|
uint16 select_41_to_uint16 = const()[name = string("select_41_to_uint16"), val = uint16(1)]; |
|
tensor<uint16, [3]> var_807_shape_cast_fp16_to_uint16 = cast(dtype = var_807_shape_cast_fp16_to_uint16_dtype_0, x = var_807_shape_cast_fp16)[name = string("cast_69")]; |
|
uint16 gather_41_cast_uint16 = gather(axis = gather_41_axis_0, batch_dims = gather_41_batch_dims_0, indices = select_41_to_uint16, validate_indices = gather_41_validate_indices_0, x = var_807_shape_cast_fp16_to_uint16)[name = string("gather_41_cast_uint16")]; |
|
string gather_41_cast_uint16_to_int32_dtype_0 = const()[name = string("gather_41_cast_uint16_to_int32_dtype_0"), val = string("int32")]; |
|
tensor<int32, [1]> expand_dims_175_axes_0 = const()[name = string("expand_dims_175_axes_0"), val = tensor<int32, [1]>([0])]; |
|
int32 gather_41_cast_uint16_to_int32 = cast(dtype = gather_41_cast_uint16_to_int32_dtype_0, x = gather_41_cast_uint16)[name = string("cast_68")]; |
|
tensor<int32, [1]> expand_dims_175 = expand_dims(axes = expand_dims_175_axes_0, x = gather_41_cast_uint16_to_int32)[name = string("expand_dims_175")]; |
|
tensor<int32, [4]> concat_128 = const()[name = string("concat_128"), val = tensor<int32, [4]>([20, 0, 0, 0])]; |
|
tensor<int32, [1]> concat_129_values0_0 = const()[name = string("concat_129_values0_0"), val = tensor<int32, [1]>([0])]; |
|
tensor<int32, [1]> concat_129_values1_0 = const()[name = string("concat_129_values1_0"), val = tensor<int32, [1]>([0])]; |
|
tensor<int32, [1]> concat_129_values3_0 = const()[name = string("concat_129_values3_0"), val = tensor<int32, [1]>([0])]; |
|
int32 concat_129_axis_0 = const()[name = string("concat_129_axis_0"), val = int32(0)]; |
|
bool concat_129_interleave_0 = const()[name = string("concat_129_interleave_0"), val = bool(false)]; |
|
tensor<int32, [4]> concat_129 = concat(axis = concat_129_axis_0, interleave = concat_129_interleave_0, values = (concat_129_values0_0, concat_129_values1_0, expand_dims_175, concat_129_values3_0))[name = string("concat_129")]; |
|
tensor<int32, [4]> v_cache2_internal_tensor_assign_21_stride_0 = const()[name = string("v_cache2_internal_tensor_assign_21_stride_0"), val = tensor<int32, [4]>([1, 1, 1, 1])]; |
|
tensor<bool, [4]> v_cache2_internal_tensor_assign_21_begin_mask_0 = const()[name = string("v_cache2_internal_tensor_assign_21_begin_mask_0"), val = tensor<bool, [4]>([false, false, false, false])]; |
|
tensor<bool, [4]> v_cache2_internal_tensor_assign_21_end_mask_0 = const()[name = string("v_cache2_internal_tensor_assign_21_end_mask_0"), val = tensor<bool, [4]>([false, true, false, true])]; |
|
tensor<bool, [4]> v_cache2_internal_tensor_assign_21_squeeze_mask_0 = const()[name = string("v_cache2_internal_tensor_assign_21_squeeze_mask_0"), val = tensor<bool, [4]>([true, false, false, false])]; |
|
tensor<fp16, [24, 1, 1500, 1024]> v_cache2_internal_tensor_assign_21_cast_fp16 = slice_update(begin = concat_128, begin_mask = v_cache2_internal_tensor_assign_21_begin_mask_0, end = concat_129, end_mask = v_cache2_internal_tensor_assign_21_end_mask_0, squeeze_mask = v_cache2_internal_tensor_assign_21_squeeze_mask_0, stride = v_cache2_internal_tensor_assign_21_stride_0, update = linear_41_cast_fp16, x = coreml_update_state_91)[name = string("v_cache2_internal_tensor_assign_21_cast_fp16")]; |
|
write_state(data = v_cache2_internal_tensor_assign_21_cast_fp16, input = v_cache2)[name = string("coreml_update_state_93_write_state")]; |
|
tensor<fp16, [24, 1, 1500, 1024]> coreml_update_state_93 = read_state(input = v_cache2)[name = string("coreml_update_state_93")]; |
|
tensor<fp16, [1024, 1024]> var_829_to_fp16 = const()[name = string("op_829_to_fp16"), val = tensor<fp16, [1024, 1024]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(110149760)))]; |
|
tensor<fp16, [1, ?, 1024]> linear_42_cast_fp16 = linear(bias = linear_0_bias_0_to_fp16, weight = var_829_to_fp16, x = audio_data)[name = string("linear_42_cast_fp16")]; |
|
tensor<fp16, [1024, 1024]> var_833_to_fp16 = const()[name = string("op_833_to_fp16"), val = tensor<fp16, [1024, 1024]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(112246976)))]; |
|
tensor<fp16, [1024]> var_834_to_fp16 = const()[name = string("op_834_to_fp16"), val = tensor<fp16, [1024]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(114344192)))]; |
|
tensor<fp16, [1, ?, 1024]> linear_43_cast_fp16 = linear(bias = var_834_to_fp16, weight = var_833_to_fp16, x = audio_data)[name = string("linear_43_cast_fp16")]; |
|
tensor<int32, [3]> var_836_shape_cast_fp16 = shape(x = linear_42_cast_fp16)[name = string("op_836_shape_cast_fp16")]; |
|
int32 gather_42_axis_0 = const()[name = string("gather_42_axis_0"), val = int32(0)]; |
|
int32 gather_42_batch_dims_0 = const()[name = string("gather_42_batch_dims_0"), val = int32(0)]; |
|
bool gather_42_validate_indices_0 = const()[name = string("gather_42_validate_indices_0"), val = bool(false)]; |
|
string var_836_shape_cast_fp16_to_uint16_dtype_0 = const()[name = string("op_836_shape_cast_fp16_to_uint16_dtype_0"), val = string("uint16")]; |
|
uint16 select_42_to_uint16 = const()[name = string("select_42_to_uint16"), val = uint16(1)]; |
|
tensor<uint16, [3]> var_836_shape_cast_fp16_to_uint16 = cast(dtype = var_836_shape_cast_fp16_to_uint16_dtype_0, x = var_836_shape_cast_fp16)[name = string("cast_67")]; |
|
uint16 gather_42_cast_uint16 = gather(axis = gather_42_axis_0, batch_dims = gather_42_batch_dims_0, indices = select_42_to_uint16, validate_indices = gather_42_validate_indices_0, x = var_836_shape_cast_fp16_to_uint16)[name = string("gather_42_cast_uint16")]; |
|
string gather_42_cast_uint16_to_int32_dtype_0 = const()[name = string("gather_42_cast_uint16_to_int32_dtype_0"), val = string("int32")]; |
|
tensor<int32, [1]> expand_dims_179_axes_0 = const()[name = string("expand_dims_179_axes_0"), val = tensor<int32, [1]>([0])]; |
|
int32 gather_42_cast_uint16_to_int32 = cast(dtype = gather_42_cast_uint16_to_int32_dtype_0, x = gather_42_cast_uint16)[name = string("cast_66")]; |
|
tensor<int32, [1]> expand_dims_179 = expand_dims(axes = expand_dims_179_axes_0, x = gather_42_cast_uint16_to_int32)[name = string("expand_dims_179")]; |
|
tensor<int32, [4]> concat_131 = const()[name = string("concat_131"), val = tensor<int32, [4]>([21, 0, 0, 0])]; |
|
tensor<int32, [1]> concat_132_values0_0 = const()[name = string("concat_132_values0_0"), val = tensor<int32, [1]>([0])]; |
|
tensor<int32, [1]> concat_132_values1_0 = const()[name = string("concat_132_values1_0"), val = tensor<int32, [1]>([0])]; |
|
tensor<int32, [1]> concat_132_values3_0 = const()[name = string("concat_132_values3_0"), val = tensor<int32, [1]>([0])]; |
|
int32 concat_132_axis_0 = const()[name = string("concat_132_axis_0"), val = int32(0)]; |
|
bool concat_132_interleave_0 = const()[name = string("concat_132_interleave_0"), val = bool(false)]; |
|
tensor<int32, [4]> concat_132 = concat(axis = concat_132_axis_0, interleave = concat_132_interleave_0, values = (concat_132_values0_0, concat_132_values1_0, expand_dims_179, concat_132_values3_0))[name = string("concat_132")]; |
|
tensor<int32, [4]> k_cache2_internal_tensor_assign_22_stride_0 = const()[name = string("k_cache2_internal_tensor_assign_22_stride_0"), val = tensor<int32, [4]>([1, 1, 1, 1])]; |
|
tensor<bool, [4]> k_cache2_internal_tensor_assign_22_begin_mask_0 = const()[name = string("k_cache2_internal_tensor_assign_22_begin_mask_0"), val = tensor<bool, [4]>([false, false, false, false])]; |
|
tensor<bool, [4]> k_cache2_internal_tensor_assign_22_end_mask_0 = const()[name = string("k_cache2_internal_tensor_assign_22_end_mask_0"), val = tensor<bool, [4]>([false, true, false, true])]; |
|
tensor<bool, [4]> k_cache2_internal_tensor_assign_22_squeeze_mask_0 = const()[name = string("k_cache2_internal_tensor_assign_22_squeeze_mask_0"), val = tensor<bool, [4]>([true, false, false, false])]; |
|
tensor<fp16, [24, 1, 1500, 1024]> k_cache2_internal_tensor_assign_22_cast_fp16 = slice_update(begin = concat_131, begin_mask = k_cache2_internal_tensor_assign_22_begin_mask_0, end = concat_132, end_mask = k_cache2_internal_tensor_assign_22_end_mask_0, squeeze_mask = k_cache2_internal_tensor_assign_22_squeeze_mask_0, stride = k_cache2_internal_tensor_assign_22_stride_0, update = linear_42_cast_fp16, x = coreml_update_state_92)[name = string("k_cache2_internal_tensor_assign_22_cast_fp16")]; |
|
write_state(data = k_cache2_internal_tensor_assign_22_cast_fp16, input = k_cache2)[name = string("coreml_update_state_94_write_state")]; |
|
tensor<fp16, [24, 1, 1500, 1024]> coreml_update_state_94 = read_state(input = k_cache2)[name = string("coreml_update_state_94")]; |
|
tensor<int32, [3]> var_841_shape_cast_fp16 = shape(x = linear_43_cast_fp16)[name = string("op_841_shape_cast_fp16")]; |
|
int32 gather_43_axis_0 = const()[name = string("gather_43_axis_0"), val = int32(0)]; |
|
int32 gather_43_batch_dims_0 = const()[name = string("gather_43_batch_dims_0"), val = int32(0)]; |
|
bool gather_43_validate_indices_0 = const()[name = string("gather_43_validate_indices_0"), val = bool(false)]; |
|
string var_841_shape_cast_fp16_to_uint16_dtype_0 = const()[name = string("op_841_shape_cast_fp16_to_uint16_dtype_0"), val = string("uint16")]; |
|
uint16 select_43_to_uint16 = const()[name = string("select_43_to_uint16"), val = uint16(1)]; |
|
tensor<uint16, [3]> var_841_shape_cast_fp16_to_uint16 = cast(dtype = var_841_shape_cast_fp16_to_uint16_dtype_0, x = var_841_shape_cast_fp16)[name = string("cast_65")]; |
|
uint16 gather_43_cast_uint16 = gather(axis = gather_43_axis_0, batch_dims = gather_43_batch_dims_0, indices = select_43_to_uint16, validate_indices = gather_43_validate_indices_0, x = var_841_shape_cast_fp16_to_uint16)[name = string("gather_43_cast_uint16")]; |
|
string gather_43_cast_uint16_to_int32_dtype_0 = const()[name = string("gather_43_cast_uint16_to_int32_dtype_0"), val = string("int32")]; |
|
tensor<int32, [1]> expand_dims_183_axes_0 = const()[name = string("expand_dims_183_axes_0"), val = tensor<int32, [1]>([0])]; |
|
int32 gather_43_cast_uint16_to_int32 = cast(dtype = gather_43_cast_uint16_to_int32_dtype_0, x = gather_43_cast_uint16)[name = string("cast_64")]; |
|
tensor<int32, [1]> expand_dims_183 = expand_dims(axes = expand_dims_183_axes_0, x = gather_43_cast_uint16_to_int32)[name = string("expand_dims_183")]; |
|
tensor<int32, [4]> concat_134 = const()[name = string("concat_134"), val = tensor<int32, [4]>([21, 0, 0, 0])]; |
|
tensor<int32, [1]> concat_135_values0_0 = const()[name = string("concat_135_values0_0"), val = tensor<int32, [1]>([0])]; |
|
tensor<int32, [1]> concat_135_values1_0 = const()[name = string("concat_135_values1_0"), val = tensor<int32, [1]>([0])]; |
|
tensor<int32, [1]> concat_135_values3_0 = const()[name = string("concat_135_values3_0"), val = tensor<int32, [1]>([0])]; |
|
int32 concat_135_axis_0 = const()[name = string("concat_135_axis_0"), val = int32(0)]; |
|
bool concat_135_interleave_0 = const()[name = string("concat_135_interleave_0"), val = bool(false)]; |
|
tensor<int32, [4]> concat_135 = concat(axis = concat_135_axis_0, interleave = concat_135_interleave_0, values = (concat_135_values0_0, concat_135_values1_0, expand_dims_183, concat_135_values3_0))[name = string("concat_135")]; |
|
tensor<int32, [4]> v_cache2_internal_tensor_assign_22_stride_0 = const()[name = string("v_cache2_internal_tensor_assign_22_stride_0"), val = tensor<int32, [4]>([1, 1, 1, 1])]; |
|
tensor<bool, [4]> v_cache2_internal_tensor_assign_22_begin_mask_0 = const()[name = string("v_cache2_internal_tensor_assign_22_begin_mask_0"), val = tensor<bool, [4]>([false, false, false, false])]; |
|
tensor<bool, [4]> v_cache2_internal_tensor_assign_22_end_mask_0 = const()[name = string("v_cache2_internal_tensor_assign_22_end_mask_0"), val = tensor<bool, [4]>([false, true, false, true])]; |
|
tensor<bool, [4]> v_cache2_internal_tensor_assign_22_squeeze_mask_0 = const()[name = string("v_cache2_internal_tensor_assign_22_squeeze_mask_0"), val = tensor<bool, [4]>([true, false, false, false])]; |
|
tensor<fp16, [24, 1, 1500, 1024]> v_cache2_internal_tensor_assign_22_cast_fp16 = slice_update(begin = concat_134, begin_mask = v_cache2_internal_tensor_assign_22_begin_mask_0, end = concat_135, end_mask = v_cache2_internal_tensor_assign_22_end_mask_0, squeeze_mask = v_cache2_internal_tensor_assign_22_squeeze_mask_0, stride = v_cache2_internal_tensor_assign_22_stride_0, update = linear_43_cast_fp16, x = coreml_update_state_93)[name = string("v_cache2_internal_tensor_assign_22_cast_fp16")]; |
|
write_state(data = v_cache2_internal_tensor_assign_22_cast_fp16, input = v_cache2)[name = string("coreml_update_state_95_write_state")]; |
|
tensor<fp16, [24, 1, 1500, 1024]> coreml_update_state_95 = read_state(input = v_cache2)[name = string("coreml_update_state_95")]; |
|
tensor<fp16, [1024, 1024]> var_863_to_fp16 = const()[name = string("op_863_to_fp16"), val = tensor<fp16, [1024, 1024]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(114346304)))]; |
|
tensor<fp16, [1, ?, 1024]> linear_44_cast_fp16 = linear(bias = linear_0_bias_0_to_fp16, weight = var_863_to_fp16, x = audio_data)[name = string("linear_44_cast_fp16")]; |
|
tensor<fp16, [1024, 1024]> var_867_to_fp16 = const()[name = string("op_867_to_fp16"), val = tensor<fp16, [1024, 1024]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(116443520)))]; |
|
tensor<fp16, [1024]> var_868_to_fp16 = const()[name = string("op_868_to_fp16"), val = tensor<fp16, [1024]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(118540736)))]; |
|
tensor<fp16, [1, ?, 1024]> linear_45_cast_fp16 = linear(bias = var_868_to_fp16, weight = var_867_to_fp16, x = audio_data)[name = string("linear_45_cast_fp16")]; |
|
tensor<int32, [3]> var_870_shape_cast_fp16 = shape(x = linear_44_cast_fp16)[name = string("op_870_shape_cast_fp16")]; |
|
int32 gather_44_axis_0 = const()[name = string("gather_44_axis_0"), val = int32(0)]; |
|
int32 gather_44_batch_dims_0 = const()[name = string("gather_44_batch_dims_0"), val = int32(0)]; |
|
bool gather_44_validate_indices_0 = const()[name = string("gather_44_validate_indices_0"), val = bool(false)]; |
|
string var_870_shape_cast_fp16_to_uint16_dtype_0 = const()[name = string("op_870_shape_cast_fp16_to_uint16_dtype_0"), val = string("uint16")]; |
|
uint16 select_44_to_uint16 = const()[name = string("select_44_to_uint16"), val = uint16(1)]; |
|
tensor<uint16, [3]> var_870_shape_cast_fp16_to_uint16 = cast(dtype = var_870_shape_cast_fp16_to_uint16_dtype_0, x = var_870_shape_cast_fp16)[name = string("cast_63")]; |
|
uint16 gather_44_cast_uint16 = gather(axis = gather_44_axis_0, batch_dims = gather_44_batch_dims_0, indices = select_44_to_uint16, validate_indices = gather_44_validate_indices_0, x = var_870_shape_cast_fp16_to_uint16)[name = string("gather_44_cast_uint16")]; |
|
string gather_44_cast_uint16_to_int32_dtype_0 = const()[name = string("gather_44_cast_uint16_to_int32_dtype_0"), val = string("int32")]; |
|
tensor<int32, [1]> expand_dims_187_axes_0 = const()[name = string("expand_dims_187_axes_0"), val = tensor<int32, [1]>([0])]; |
|
int32 gather_44_cast_uint16_to_int32 = cast(dtype = gather_44_cast_uint16_to_int32_dtype_0, x = gather_44_cast_uint16)[name = string("cast_62")]; |
|
tensor<int32, [1]> expand_dims_187 = expand_dims(axes = expand_dims_187_axes_0, x = gather_44_cast_uint16_to_int32)[name = string("expand_dims_187")]; |
|
tensor<int32, [4]> concat_137 = const()[name = string("concat_137"), val = tensor<int32, [4]>([22, 0, 0, 0])]; |
|
tensor<int32, [1]> concat_138_values0_0 = const()[name = string("concat_138_values0_0"), val = tensor<int32, [1]>([0])]; |
|
tensor<int32, [1]> concat_138_values1_0 = const()[name = string("concat_138_values1_0"), val = tensor<int32, [1]>([0])]; |
|
tensor<int32, [1]> concat_138_values3_0 = const()[name = string("concat_138_values3_0"), val = tensor<int32, [1]>([0])]; |
|
int32 concat_138_axis_0 = const()[name = string("concat_138_axis_0"), val = int32(0)]; |
|
bool concat_138_interleave_0 = const()[name = string("concat_138_interleave_0"), val = bool(false)]; |
|
tensor<int32, [4]> concat_138 = concat(axis = concat_138_axis_0, interleave = concat_138_interleave_0, values = (concat_138_values0_0, concat_138_values1_0, expand_dims_187, concat_138_values3_0))[name = string("concat_138")]; |
|
tensor<int32, [4]> k_cache2_internal_tensor_assign_23_stride_0 = const()[name = string("k_cache2_internal_tensor_assign_23_stride_0"), val = tensor<int32, [4]>([1, 1, 1, 1])]; |
|
tensor<bool, [4]> k_cache2_internal_tensor_assign_23_begin_mask_0 = const()[name = string("k_cache2_internal_tensor_assign_23_begin_mask_0"), val = tensor<bool, [4]>([false, false, false, false])]; |
|
tensor<bool, [4]> k_cache2_internal_tensor_assign_23_end_mask_0 = const()[name = string("k_cache2_internal_tensor_assign_23_end_mask_0"), val = tensor<bool, [4]>([false, true, false, true])]; |
|
tensor<bool, [4]> k_cache2_internal_tensor_assign_23_squeeze_mask_0 = const()[name = string("k_cache2_internal_tensor_assign_23_squeeze_mask_0"), val = tensor<bool, [4]>([true, false, false, false])]; |
|
tensor<fp16, [24, 1, 1500, 1024]> k_cache2_internal_tensor_assign_23_cast_fp16 = slice_update(begin = concat_137, begin_mask = k_cache2_internal_tensor_assign_23_begin_mask_0, end = concat_138, end_mask = k_cache2_internal_tensor_assign_23_end_mask_0, squeeze_mask = k_cache2_internal_tensor_assign_23_squeeze_mask_0, stride = k_cache2_internal_tensor_assign_23_stride_0, update = linear_44_cast_fp16, x = coreml_update_state_94)[name = string("k_cache2_internal_tensor_assign_23_cast_fp16")]; |
|
write_state(data = k_cache2_internal_tensor_assign_23_cast_fp16, input = k_cache2)[name = string("coreml_update_state_96_write_state")]; |
|
tensor<fp16, [24, 1, 1500, 1024]> coreml_update_state_96 = read_state(input = k_cache2)[name = string("coreml_update_state_96")]; |
|
tensor<int32, [3]> var_875_shape_cast_fp16 = shape(x = linear_45_cast_fp16)[name = string("op_875_shape_cast_fp16")]; |
|
int32 gather_45_axis_0 = const()[name = string("gather_45_axis_0"), val = int32(0)]; |
|
int32 gather_45_batch_dims_0 = const()[name = string("gather_45_batch_dims_0"), val = int32(0)]; |
|
bool gather_45_validate_indices_0 = const()[name = string("gather_45_validate_indices_0"), val = bool(false)]; |
|
string var_875_shape_cast_fp16_to_uint16_dtype_0 = const()[name = string("op_875_shape_cast_fp16_to_uint16_dtype_0"), val = string("uint16")]; |
|
uint16 select_45_to_uint16 = const()[name = string("select_45_to_uint16"), val = uint16(1)]; |
|
tensor<uint16, [3]> var_875_shape_cast_fp16_to_uint16 = cast(dtype = var_875_shape_cast_fp16_to_uint16_dtype_0, x = var_875_shape_cast_fp16)[name = string("cast_61")]; |
|
uint16 gather_45_cast_uint16 = gather(axis = gather_45_axis_0, batch_dims = gather_45_batch_dims_0, indices = select_45_to_uint16, validate_indices = gather_45_validate_indices_0, x = var_875_shape_cast_fp16_to_uint16)[name = string("gather_45_cast_uint16")]; |
|
string gather_45_cast_uint16_to_int32_dtype_0 = const()[name = string("gather_45_cast_uint16_to_int32_dtype_0"), val = string("int32")]; |
|
tensor<int32, [1]> expand_dims_191_axes_0 = const()[name = string("expand_dims_191_axes_0"), val = tensor<int32, [1]>([0])]; |
|
int32 gather_45_cast_uint16_to_int32 = cast(dtype = gather_45_cast_uint16_to_int32_dtype_0, x = gather_45_cast_uint16)[name = string("cast_60")]; |
|
tensor<int32, [1]> expand_dims_191 = expand_dims(axes = expand_dims_191_axes_0, x = gather_45_cast_uint16_to_int32)[name = string("expand_dims_191")]; |
|
tensor<int32, [4]> concat_140 = const()[name = string("concat_140"), val = tensor<int32, [4]>([22, 0, 0, 0])]; |
|
tensor<int32, [1]> concat_141_values0_0 = const()[name = string("concat_141_values0_0"), val = tensor<int32, [1]>([0])]; |
|
tensor<int32, [1]> concat_141_values1_0 = const()[name = string("concat_141_values1_0"), val = tensor<int32, [1]>([0])]; |
|
tensor<int32, [1]> concat_141_values3_0 = const()[name = string("concat_141_values3_0"), val = tensor<int32, [1]>([0])]; |
|
int32 concat_141_axis_0 = const()[name = string("concat_141_axis_0"), val = int32(0)]; |
|
bool concat_141_interleave_0 = const()[name = string("concat_141_interleave_0"), val = bool(false)]; |
|
tensor<int32, [4]> concat_141 = concat(axis = concat_141_axis_0, interleave = concat_141_interleave_0, values = (concat_141_values0_0, concat_141_values1_0, expand_dims_191, concat_141_values3_0))[name = string("concat_141")]; |
|
tensor<int32, [4]> v_cache2_internal_tensor_assign_23_stride_0 = const()[name = string("v_cache2_internal_tensor_assign_23_stride_0"), val = tensor<int32, [4]>([1, 1, 1, 1])]; |
|
tensor<bool, [4]> v_cache2_internal_tensor_assign_23_begin_mask_0 = const()[name = string("v_cache2_internal_tensor_assign_23_begin_mask_0"), val = tensor<bool, [4]>([false, false, false, false])]; |
|
tensor<bool, [4]> v_cache2_internal_tensor_assign_23_end_mask_0 = const()[name = string("v_cache2_internal_tensor_assign_23_end_mask_0"), val = tensor<bool, [4]>([false, true, false, true])]; |
|
tensor<bool, [4]> v_cache2_internal_tensor_assign_23_squeeze_mask_0 = const()[name = string("v_cache2_internal_tensor_assign_23_squeeze_mask_0"), val = tensor<bool, [4]>([true, false, false, false])]; |
|
tensor<fp16, [24, 1, 1500, 1024]> v_cache2_internal_tensor_assign_23_cast_fp16 = slice_update(begin = concat_140, begin_mask = v_cache2_internal_tensor_assign_23_begin_mask_0, end = concat_141, end_mask = v_cache2_internal_tensor_assign_23_end_mask_0, squeeze_mask = v_cache2_internal_tensor_assign_23_squeeze_mask_0, stride = v_cache2_internal_tensor_assign_23_stride_0, update = linear_45_cast_fp16, x = coreml_update_state_95)[name = string("v_cache2_internal_tensor_assign_23_cast_fp16")]; |
|
write_state(data = v_cache2_internal_tensor_assign_23_cast_fp16, input = v_cache2)[name = string("coreml_update_state_97_write_state")]; |
|
tensor<fp16, [24, 1, 1500, 1024]> coreml_update_state_97 = read_state(input = v_cache2)[name = string("coreml_update_state_97")]; |
|
tensor<fp16, [1024, 1024]> var_897_to_fp16 = const()[name = string("op_897_to_fp16"), val = tensor<fp16, [1024, 1024]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(118542848)))]; |
|
tensor<fp16, [1, ?, 1024]> linear_46_cast_fp16 = linear(bias = linear_0_bias_0_to_fp16, weight = var_897_to_fp16, x = audio_data)[name = string("linear_46_cast_fp16")]; |
|
tensor<fp16, [1024, 1024]> var_901_to_fp16 = const()[name = string("op_901_to_fp16"), val = tensor<fp16, [1024, 1024]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(120640064)))]; |
|
tensor<fp16, [1024]> var_902_to_fp16 = const()[name = string("op_902_to_fp16"), val = tensor<fp16, [1024]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(122737280)))]; |
|
tensor<fp16, [1, ?, 1024]> linear_47_cast_fp16 = linear(bias = var_902_to_fp16, weight = var_901_to_fp16, x = audio_data)[name = string("linear_47_cast_fp16")]; |
|
tensor<int32, [3]> var_904_shape_cast_fp16 = shape(x = linear_46_cast_fp16)[name = string("op_904_shape_cast_fp16")]; |
|
int32 gather_46_axis_0 = const()[name = string("gather_46_axis_0"), val = int32(0)]; |
|
int32 gather_46_batch_dims_0 = const()[name = string("gather_46_batch_dims_0"), val = int32(0)]; |
|
bool gather_46_validate_indices_0 = const()[name = string("gather_46_validate_indices_0"), val = bool(false)]; |
|
string var_904_shape_cast_fp16_to_uint16_dtype_0 = const()[name = string("op_904_shape_cast_fp16_to_uint16_dtype_0"), val = string("uint16")]; |
|
uint16 select_46_to_uint16 = const()[name = string("select_46_to_uint16"), val = uint16(1)]; |
|
tensor<uint16, [3]> var_904_shape_cast_fp16_to_uint16 = cast(dtype = var_904_shape_cast_fp16_to_uint16_dtype_0, x = var_904_shape_cast_fp16)[name = string("cast_59")]; |
|
uint16 gather_46_cast_uint16 = gather(axis = gather_46_axis_0, batch_dims = gather_46_batch_dims_0, indices = select_46_to_uint16, validate_indices = gather_46_validate_indices_0, x = var_904_shape_cast_fp16_to_uint16)[name = string("gather_46_cast_uint16")]; |
|
string gather_46_cast_uint16_to_int32_dtype_0 = const()[name = string("gather_46_cast_uint16_to_int32_dtype_0"), val = string("int32")]; |
|
tensor<int32, [1]> expand_dims_195_axes_0 = const()[name = string("expand_dims_195_axes_0"), val = tensor<int32, [1]>([0])]; |
|
int32 gather_46_cast_uint16_to_int32 = cast(dtype = gather_46_cast_uint16_to_int32_dtype_0, x = gather_46_cast_uint16)[name = string("cast_58")]; |
|
tensor<int32, [1]> expand_dims_195 = expand_dims(axes = expand_dims_195_axes_0, x = gather_46_cast_uint16_to_int32)[name = string("expand_dims_195")]; |
|
tensor<int32, [4]> concat_143 = const()[name = string("concat_143"), val = tensor<int32, [4]>([23, 0, 0, 0])]; |
|
tensor<int32, [1]> concat_144_values0_0 = const()[name = string("concat_144_values0_0"), val = tensor<int32, [1]>([0])]; |
|
tensor<int32, [1]> concat_144_values1_0 = const()[name = string("concat_144_values1_0"), val = tensor<int32, [1]>([0])]; |
|
tensor<int32, [1]> concat_144_values3_0 = const()[name = string("concat_144_values3_0"), val = tensor<int32, [1]>([0])]; |
|
int32 concat_144_axis_0 = const()[name = string("concat_144_axis_0"), val = int32(0)]; |
|
bool concat_144_interleave_0 = const()[name = string("concat_144_interleave_0"), val = bool(false)]; |
|
tensor<int32, [4]> concat_144 = concat(axis = concat_144_axis_0, interleave = concat_144_interleave_0, values = (concat_144_values0_0, concat_144_values1_0, expand_dims_195, concat_144_values3_0))[name = string("concat_144")]; |
|
tensor<int32, [4]> k_cache2_internal_tensor_assign_24_stride_0 = const()[name = string("k_cache2_internal_tensor_assign_24_stride_0"), val = tensor<int32, [4]>([1, 1, 1, 1])]; |
|
tensor<bool, [4]> k_cache2_internal_tensor_assign_24_begin_mask_0 = const()[name = string("k_cache2_internal_tensor_assign_24_begin_mask_0"), val = tensor<bool, [4]>([false, false, false, false])]; |
|
tensor<bool, [4]> k_cache2_internal_tensor_assign_24_end_mask_0 = const()[name = string("k_cache2_internal_tensor_assign_24_end_mask_0"), val = tensor<bool, [4]>([false, true, false, true])]; |
|
tensor<bool, [4]> k_cache2_internal_tensor_assign_24_squeeze_mask_0 = const()[name = string("k_cache2_internal_tensor_assign_24_squeeze_mask_0"), val = tensor<bool, [4]>([true, false, false, false])]; |
|
tensor<fp16, [24, 1, 1500, 1024]> k_cache2_internal_tensor_assign_24_cast_fp16 = slice_update(begin = concat_143, begin_mask = k_cache2_internal_tensor_assign_24_begin_mask_0, end = concat_144, end_mask = k_cache2_internal_tensor_assign_24_end_mask_0, squeeze_mask = k_cache2_internal_tensor_assign_24_squeeze_mask_0, stride = k_cache2_internal_tensor_assign_24_stride_0, update = linear_46_cast_fp16, x = coreml_update_state_96)[name = string("k_cache2_internal_tensor_assign_24_cast_fp16")]; |
|
write_state(data = k_cache2_internal_tensor_assign_24_cast_fp16, input = k_cache2)[name = string("coreml_update_state_98_write_state")]; |
|
tensor<int32, [3]> var_909_shape_cast_fp16 = shape(x = linear_47_cast_fp16)[name = string("op_909_shape_cast_fp16")]; |
|
int32 gather_47_axis_0 = const()[name = string("gather_47_axis_0"), val = int32(0)]; |
|
int32 gather_47_batch_dims_0 = const()[name = string("gather_47_batch_dims_0"), val = int32(0)]; |
|
bool gather_47_validate_indices_0 = const()[name = string("gather_47_validate_indices_0"), val = bool(false)]; |
|
string var_909_shape_cast_fp16_to_uint16_dtype_0 = const()[name = string("op_909_shape_cast_fp16_to_uint16_dtype_0"), val = string("uint16")]; |
|
uint16 select_47_to_uint16 = const()[name = string("select_47_to_uint16"), val = uint16(1)]; |
|
tensor<uint16, [3]> var_909_shape_cast_fp16_to_uint16 = cast(dtype = var_909_shape_cast_fp16_to_uint16_dtype_0, x = var_909_shape_cast_fp16)[name = string("cast_57")]; |
|
uint16 gather_47_cast_uint16 = gather(axis = gather_47_axis_0, batch_dims = gather_47_batch_dims_0, indices = select_47_to_uint16, validate_indices = gather_47_validate_indices_0, x = var_909_shape_cast_fp16_to_uint16)[name = string("gather_47_cast_uint16")]; |
|
string gather_47_cast_uint16_to_int32_dtype_0 = const()[name = string("gather_47_cast_uint16_to_int32_dtype_0"), val = string("int32")]; |
|
tensor<int32, [1]> expand_dims_199_axes_0 = const()[name = string("expand_dims_199_axes_0"), val = tensor<int32, [1]>([0])]; |
|
int32 gather_47_cast_uint16_to_int32 = cast(dtype = gather_47_cast_uint16_to_int32_dtype_0, x = gather_47_cast_uint16)[name = string("cast_56")]; |
|
tensor<int32, [1]> expand_dims_199 = expand_dims(axes = expand_dims_199_axes_0, x = gather_47_cast_uint16_to_int32)[name = string("expand_dims_199")]; |
|
tensor<int32, [4]> concat_146 = const()[name = string("concat_146"), val = tensor<int32, [4]>([23, 0, 0, 0])]; |
|
tensor<int32, [1]> concat_147_values0_0 = const()[name = string("concat_147_values0_0"), val = tensor<int32, [1]>([0])]; |
|
tensor<int32, [1]> concat_147_values1_0 = const()[name = string("concat_147_values1_0"), val = tensor<int32, [1]>([0])]; |
|
tensor<int32, [1]> concat_147_values3_0 = const()[name = string("concat_147_values3_0"), val = tensor<int32, [1]>([0])]; |
|
int32 concat_147_axis_0 = const()[name = string("concat_147_axis_0"), val = int32(0)]; |
|
bool concat_147_interleave_0 = const()[name = string("concat_147_interleave_0"), val = bool(false)]; |
|
tensor<int32, [4]> concat_147 = concat(axis = concat_147_axis_0, interleave = concat_147_interleave_0, values = (concat_147_values0_0, concat_147_values1_0, expand_dims_199, concat_147_values3_0))[name = string("concat_147")]; |
|
tensor<int32, [4]> v_cache2_internal_tensor_assign_24_stride_0 = const()[name = string("v_cache2_internal_tensor_assign_24_stride_0"), val = tensor<int32, [4]>([1, 1, 1, 1])]; |
|
tensor<bool, [4]> v_cache2_internal_tensor_assign_24_begin_mask_0 = const()[name = string("v_cache2_internal_tensor_assign_24_begin_mask_0"), val = tensor<bool, [4]>([false, false, false, false])]; |
|
tensor<bool, [4]> v_cache2_internal_tensor_assign_24_end_mask_0 = const()[name = string("v_cache2_internal_tensor_assign_24_end_mask_0"), val = tensor<bool, [4]>([false, true, false, true])]; |
|
tensor<bool, [4]> v_cache2_internal_tensor_assign_24_squeeze_mask_0 = const()[name = string("v_cache2_internal_tensor_assign_24_squeeze_mask_0"), val = tensor<bool, [4]>([true, false, false, false])]; |
|
tensor<fp16, [24, 1, 1500, 1024]> v_cache2_internal_tensor_assign_24_cast_fp16 = slice_update(begin = concat_146, begin_mask = v_cache2_internal_tensor_assign_24_begin_mask_0, end = concat_147, end_mask = v_cache2_internal_tensor_assign_24_end_mask_0, squeeze_mask = v_cache2_internal_tensor_assign_24_squeeze_mask_0, stride = v_cache2_internal_tensor_assign_24_stride_0, update = linear_47_cast_fp16, x = coreml_update_state_97)[name = string("v_cache2_internal_tensor_assign_24_cast_fp16")]; |
|
write_state(data = v_cache2_internal_tensor_assign_24_cast_fp16, input = v_cache2)[name = string("coreml_update_state_99_write_state")]; |
|
} -> (dummy); |
|
} |