|
program(1.0) |
|
[buildInfo = dict<tensor<string, []>, tensor<string, []>>({{"coremlc-component-MIL", "3400.43.1"}, {"coremlc-version", "3400.58.2"}, {"coremltools-component-torch", "2.4.1"}, {"coremltools-source-dialect", "TorchScript"}, {"coremltools-version", "8.0"}})] |
|
{ |
|
func main<ios16>(tensor<int32, [1]> cache_length, tensor<fp16, [1, 448]> decoder_key_padding_mask, tensor<fp16, [1, 1280, 1, 1500]> encoder_output_embeds, tensor<int32, [1]> input_ids, tensor<fp16, [1, 2560, 1, 448]> key_cache, tensor<fp16, [1, 448]> kv_cache_update_mask, tensor<fp16, [1, 2560, 1, 448]> value_cache) { |
|
tensor<int32, []> var_20_axis_0 = const()[name = tensor<string, []>("op_20_axis_0"), val = tensor<int32, []>(0)]; |
|
tensor<int32, []> var_20_batch_dims_0 = const()[name = tensor<string, []>("op_20_batch_dims_0"), val = tensor<int32, []>(0)]; |
|
tensor<fp16, [51866, 1280]> embed_tokens_weight_to_fp16 = const()[name = tensor<string, []>("embed_tokens_weight_to_fp16"), val = tensor<fp16, [51866, 1280]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(64)))]; |
|
tensor<fp16, [1, 1280]> var_20_cast_fp16 = gather(axis = var_20_axis_0, batch_dims = var_20_batch_dims_0, indices = input_ids, x = embed_tokens_weight_to_fp16)[name = tensor<string, []>("op_20_cast_fp16")]; |
|
tensor<int32, []> var_24_axis_0 = const()[name = tensor<string, []>("op_24_axis_0"), val = tensor<int32, []>(0)]; |
|
tensor<int32, []> var_24_batch_dims_0 = const()[name = tensor<string, []>("op_24_batch_dims_0"), val = tensor<int32, []>(0)]; |
|
tensor<fp16, [448, 1280]> embed_positions_weight_to_fp16 = const()[name = tensor<string, []>("embed_positions_weight_to_fp16"), val = tensor<fp16, [448, 1280]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(132777088)))]; |
|
tensor<fp16, [1, 1280]> var_24_cast_fp16 = gather(axis = var_24_axis_0, batch_dims = var_24_batch_dims_0, indices = cache_length, x = embed_positions_weight_to_fp16)[name = tensor<string, []>("op_24_cast_fp16")]; |
|
tensor<fp16, [1, 1280]> hidden_states_1_cast_fp16 = add(x = var_20_cast_fp16, y = var_24_cast_fp16)[name = tensor<string, []>("hidden_states_1_cast_fp16")]; |
|
tensor<int32, [1]> var_38_axes_0 = const()[name = tensor<string, []>("op_38_axes_0"), val = tensor<int32, [1]>([2])]; |
|
tensor<fp16, [1, 1280, 1]> var_38_cast_fp16 = expand_dims(axes = var_38_axes_0, x = hidden_states_1_cast_fp16)[name = tensor<string, []>("op_38_cast_fp16")]; |
|
tensor<int32, [1]> inputs_1_axes_0 = const()[name = tensor<string, []>("inputs_1_axes_0"), val = tensor<int32, [1]>([3])]; |
|
tensor<fp16, [1, 1280, 1, 1]> inputs_1_cast_fp16 = expand_dims(axes = inputs_1_axes_0, x = var_38_cast_fp16)[name = tensor<string, []>("inputs_1_cast_fp16")]; |
|
tensor<int32, [2]> tile_0 = const()[name = tensor<string, []>("tile_0"), val = tensor<int32, [2]>([1280, 1280])]; |
|
tensor<int32, []> var_43_axis_0 = const()[name = tensor<string, []>("op_43_axis_0"), val = tensor<int32, []>(1)]; |
|
tensor<fp16, [1, 1280, 1, 448]> var_43_cast_fp16_0, tensor<fp16, [1, 1280, 1, 448]> var_43_cast_fp16_1 = split(axis = var_43_axis_0, split_sizes = tile_0, x = key_cache)[name = tensor<string, []>("op_43_cast_fp16")]; |
|
tensor<int32, [2]> tile_1 = const()[name = tensor<string, []>("tile_1"), val = tensor<int32, [2]>([1280, 1280])]; |
|
tensor<int32, []> var_48_axis_0 = const()[name = tensor<string, []>("op_48_axis_0"), val = tensor<int32, []>(1)]; |
|
tensor<fp16, [1, 1280, 1, 448]> var_48_cast_fp16_0, tensor<fp16, [1, 1280, 1, 448]> var_48_cast_fp16_1 = split(axis = var_48_axis_0, split_sizes = tile_1, x = value_cache)[name = tensor<string, []>("op_48_cast_fp16")]; |
|
tensor<int32, []> var_56 = const()[name = tensor<string, []>("op_56"), val = tensor<int32, []>(3)]; |
|
tensor<int32, [1]> out_1_axes_0 = const()[name = tensor<string, []>("out_1_axes_0"), val = tensor<int32, [1]>([1])]; |
|
tensor<fp16, []> var_82_to_fp16 = const()[name = tensor<string, []>("op_82_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)]; |
|
tensor<fp16, [1, 1280, 1, 1]> out_1_cast_fp16 = layer_norm(axes = out_1_axes_0, epsilon = var_82_to_fp16, x = inputs_1_cast_fp16)[name = tensor<string, []>("out_1_cast_fp16")]; |
|
tensor<fp16, [1280]> obj_1_mean_0_to_fp16 = const()[name = tensor<string, []>("obj_1_mean_0_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(133924032)))]; |
|
tensor<fp16, [1280]> obj_1_variance_0_to_fp16 = const()[name = tensor<string, []>("obj_1_variance_0_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(133926656)))]; |
|
tensor<fp16, [1280]> obj_1_gamma_0_to_fp16 = const()[name = tensor<string, []>("obj_1_gamma_0_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(133929280)))]; |
|
tensor<fp16, [1280]> obj_1_beta_0_to_fp16 = const()[name = tensor<string, []>("obj_1_beta_0_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(133931904)))]; |
|
tensor<fp16, []> obj_1_epsilon_0_to_fp16 = const()[name = tensor<string, []>("obj_1_epsilon_0_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)]; |
|
tensor<fp16, [1, 1280, 1, 1]> obj_1_cast_fp16 = batch_norm(beta = obj_1_beta_0_to_fp16, epsilon = obj_1_epsilon_0_to_fp16, gamma = obj_1_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_1_cast_fp16)[name = tensor<string, []>("obj_1_cast_fp16")]; |
|
tensor<string, []> query_1_pad_type_0 = const()[name = tensor<string, []>("query_1_pad_type_0"), val = tensor<string, []>("valid")]; |
|
tensor<int32, [2]> query_1_strides_0 = const()[name = tensor<string, []>("query_1_strides_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, [4]> query_1_pad_0 = const()[name = tensor<string, []>("query_1_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [2]> query_1_dilations_0 = const()[name = tensor<string, []>("query_1_dilations_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, []> query_1_groups_0 = const()[name = tensor<string, []>("query_1_groups_0"), val = tensor<int32, []>(1)]; |
|
tensor<fp16, [1280, 1280, 1, 1]> layers_0_self_attn_q_proj_weight_to_fp16 = const()[name = tensor<string, []>("layers_0_self_attn_q_proj_weight_to_fp16"), val = tensor<fp16, [1280, 1280, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(133934528)))]; |
|
tensor<fp16, [1280]> layers_0_self_attn_q_proj_bias_to_fp16 = const()[name = tensor<string, []>("layers_0_self_attn_q_proj_bias_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(137211392)))]; |
|
tensor<fp16, [1, 1280, 1, 1]> query_1_cast_fp16 = conv(bias = layers_0_self_attn_q_proj_bias_to_fp16, dilations = query_1_dilations_0, groups = query_1_groups_0, pad = query_1_pad_0, pad_type = query_1_pad_type_0, strides = query_1_strides_0, weight = layers_0_self_attn_q_proj_weight_to_fp16, x = obj_1_cast_fp16)[name = tensor<string, []>("query_1_cast_fp16")]; |
|
tensor<string, []> current_key_1_pad_type_0 = const()[name = tensor<string, []>("current_key_1_pad_type_0"), val = tensor<string, []>("valid")]; |
|
tensor<int32, [2]> current_key_1_strides_0 = const()[name = tensor<string, []>("current_key_1_strides_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, [4]> current_key_1_pad_0 = const()[name = tensor<string, []>("current_key_1_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [2]> current_key_1_dilations_0 = const()[name = tensor<string, []>("current_key_1_dilations_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, []> current_key_1_groups_0 = const()[name = tensor<string, []>("current_key_1_groups_0"), val = tensor<int32, []>(1)]; |
|
tensor<fp16, [1280, 1280, 1, 1]> layers_0_self_attn_k_proj_weight_to_fp16 = const()[name = tensor<string, []>("layers_0_self_attn_k_proj_weight_to_fp16"), val = tensor<fp16, [1280, 1280, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(137214016)))]; |
|
tensor<fp16, [1, 1280, 1, 1]> current_key_1_cast_fp16 = conv(dilations = current_key_1_dilations_0, groups = current_key_1_groups_0, pad = current_key_1_pad_0, pad_type = current_key_1_pad_type_0, strides = current_key_1_strides_0, weight = layers_0_self_attn_k_proj_weight_to_fp16, x = obj_1_cast_fp16)[name = tensor<string, []>("current_key_1_cast_fp16")]; |
|
tensor<string, []> current_value_1_pad_type_0 = const()[name = tensor<string, []>("current_value_1_pad_type_0"), val = tensor<string, []>("valid")]; |
|
tensor<int32, [2]> current_value_1_strides_0 = const()[name = tensor<string, []>("current_value_1_strides_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, [4]> current_value_1_pad_0 = const()[name = tensor<string, []>("current_value_1_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [2]> current_value_1_dilations_0 = const()[name = tensor<string, []>("current_value_1_dilations_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, []> current_value_1_groups_0 = const()[name = tensor<string, []>("current_value_1_groups_0"), val = tensor<int32, []>(1)]; |
|
tensor<fp16, [1280, 1280, 1, 1]> layers_0_self_attn_v_proj_weight_to_fp16 = const()[name = tensor<string, []>("layers_0_self_attn_v_proj_weight_to_fp16"), val = tensor<fp16, [1280, 1280, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(140490880)))]; |
|
tensor<fp16, [1280]> layers_0_self_attn_v_proj_bias_to_fp16 = const()[name = tensor<string, []>("layers_0_self_attn_v_proj_bias_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(143767744)))]; |
|
tensor<fp16, [1, 1280, 1, 1]> current_value_1_cast_fp16 = conv(bias = layers_0_self_attn_v_proj_bias_to_fp16, dilations = current_value_1_dilations_0, groups = current_value_1_groups_0, pad = current_value_1_pad_0, pad_type = current_value_1_pad_type_0, strides = current_value_1_strides_0, weight = layers_0_self_attn_v_proj_weight_to_fp16, x = obj_1_cast_fp16)[name = tensor<string, []>("current_value_1_cast_fp16")]; |
|
tensor<int32, [1]> var_117_axes_0 = const()[name = tensor<string, []>("op_117_axes_0"), val = tensor<int32, [1]>([1])]; |
|
tensor<fp16, [1, 1, 448]> var_117_cast_fp16 = expand_dims(axes = var_117_axes_0, x = kv_cache_update_mask)[name = tensor<string, []>("op_117_cast_fp16")]; |
|
tensor<int32, [1]> var_118_axes_0 = const()[name = tensor<string, []>("op_118_axes_0"), val = tensor<int32, [1]>([2])]; |
|
tensor<fp16, [1, 1, 1, 448]> var_118_cast_fp16 = expand_dims(axes = var_118_axes_0, x = var_117_cast_fp16)[name = tensor<string, []>("op_118_cast_fp16")]; |
|
tensor<fp16, [1, 1280, 1, 448]> var_120_cast_fp16 = mul(x = current_key_1_cast_fp16, y = var_118_cast_fp16)[name = tensor<string, []>("op_120_cast_fp16")]; |
|
tensor<fp16, []> var_57_to_fp16 = const()[name = tensor<string, []>("op_57_to_fp16"), val = tensor<fp16, []>(0x1p+0)]; |
|
tensor<fp16, [1, 1, 1, 448]> var_121_cast_fp16 = sub(x = var_57_to_fp16, y = var_118_cast_fp16)[name = tensor<string, []>("op_121_cast_fp16")]; |
|
tensor<fp16, [1, 1280, 1, 448]> var_122_cast_fp16 = mul(x = var_43_cast_fp16_0, y = var_121_cast_fp16)[name = tensor<string, []>("op_122_cast_fp16")]; |
|
tensor<fp16, [1, 1280, 1, 448]> key_1_cast_fp16 = add(x = var_120_cast_fp16, y = var_122_cast_fp16)[name = tensor<string, []>("key_1_cast_fp16")]; |
|
tensor<fp16, [1, 1280, 1, 448]> var_124_cast_fp16 = mul(x = current_value_1_cast_fp16, y = var_118_cast_fp16)[name = tensor<string, []>("op_124_cast_fp16")]; |
|
tensor<fp16, [1, 1280, 1, 448]> var_126_cast_fp16 = mul(x = var_48_cast_fp16_0, y = var_121_cast_fp16)[name = tensor<string, []>("op_126_cast_fp16")]; |
|
tensor<fp16, [1, 1280, 1, 448]> value_1_cast_fp16 = add(x = var_124_cast_fp16, y = var_126_cast_fp16)[name = tensor<string, []>("value_1_cast_fp16")]; |
|
tensor<int32, [4]> var_129 = const()[name = tensor<string, []>("op_129"), val = tensor<int32, [4]>([1, 20, 64, -1])]; |
|
tensor<fp16, [1, 20, 64, 1]> mh_q_1_cast_fp16 = reshape(shape = var_129, x = query_1_cast_fp16)[name = tensor<string, []>("mh_q_1_cast_fp16")]; |
|
tensor<fp16, []> var_131_to_fp16 = const()[name = tensor<string, []>("op_131_to_fp16"), val = tensor<fp16, []>(0x1p-3)]; |
|
tensor<fp16, [1, 20, 64, 1]> var_132_cast_fp16 = mul(x = mh_q_1_cast_fp16, y = var_131_to_fp16)[name = tensor<string, []>("op_132_cast_fp16")]; |
|
tensor<int32, [4]> var_133 = const()[name = tensor<string, []>("op_133"), val = tensor<int32, [4]>([1, 20, 64, -1])]; |
|
tensor<fp16, [1, 20, 64, 448]> var_134_cast_fp16 = reshape(shape = var_133, x = key_1_cast_fp16)[name = tensor<string, []>("op_134_cast_fp16")]; |
|
tensor<bool, []> mh_w_1_transpose_x_0 = const()[name = tensor<string, []>("mh_w_1_transpose_x_0"), val = tensor<bool, []>(true)]; |
|
tensor<bool, []> mh_w_1_transpose_y_0 = const()[name = tensor<string, []>("mh_w_1_transpose_y_0"), val = tensor<bool, []>(false)]; |
|
tensor<fp16, [1, 20, 1, 448]> mh_w_1_cast_fp16 = matmul(transpose_x = mh_w_1_transpose_x_0, transpose_y = mh_w_1_transpose_y_0, x = var_132_cast_fp16, y = var_134_cast_fp16)[name = tensor<string, []>("mh_w_1_cast_fp16")]; |
|
tensor<int32, [1]> var_138_axes_0 = const()[name = tensor<string, []>("op_138_axes_0"), val = tensor<int32, [1]>([1])]; |
|
tensor<fp16, [1, 1, 448]> var_138_cast_fp16 = expand_dims(axes = var_138_axes_0, x = decoder_key_padding_mask)[name = tensor<string, []>("op_138_cast_fp16")]; |
|
tensor<int32, [1]> var_139_axes_0 = const()[name = tensor<string, []>("op_139_axes_0"), val = tensor<int32, [1]>([2])]; |
|
tensor<fp16, [1, 1, 1, 448]> var_139_cast_fp16 = expand_dims(axes = var_139_axes_0, x = var_138_cast_fp16)[name = tensor<string, []>("op_139_cast_fp16")]; |
|
tensor<fp16, [1, 20, 1, 448]> mh_w_3_cast_fp16 = add(x = mh_w_1_cast_fp16, y = var_139_cast_fp16)[name = tensor<string, []>("mh_w_3_cast_fp16")]; |
|
tensor<fp16, [1, 20, 1, 448]> var_142_cast_fp16 = softmax(axis = var_56, x = mh_w_3_cast_fp16)[name = tensor<string, []>("op_142_cast_fp16")]; |
|
tensor<int32, [4]> var_143 = const()[name = tensor<string, []>("op_143"), val = tensor<int32, [4]>([1, 20, 64, -1])]; |
|
tensor<fp16, [1, 20, 64, 448]> var_144_cast_fp16 = reshape(shape = var_143, x = value_1_cast_fp16)[name = tensor<string, []>("op_144_cast_fp16")]; |
|
tensor<bool, []> attn_1_transpose_x_0 = const()[name = tensor<string, []>("attn_1_transpose_x_0"), val = tensor<bool, []>(false)]; |
|
tensor<bool, []> attn_1_transpose_y_0 = const()[name = tensor<string, []>("attn_1_transpose_y_0"), val = tensor<bool, []>(true)]; |
|
tensor<fp16, [1, 20, 64, 1]> attn_1_cast_fp16 = matmul(transpose_x = attn_1_transpose_x_0, transpose_y = attn_1_transpose_y_0, x = var_144_cast_fp16, y = var_142_cast_fp16)[name = tensor<string, []>("attn_1_cast_fp16")]; |
|
tensor<int32, [4]> var_147 = const()[name = tensor<string, []>("op_147"), val = tensor<int32, [4]>([1, 1280, 1, -1])]; |
|
tensor<fp16, [1, 1280, 1, 1]> input_1_cast_fp16 = reshape(shape = var_147, x = attn_1_cast_fp16)[name = tensor<string, []>("input_1_cast_fp16")]; |
|
tensor<string, []> obj_7_pad_type_0 = const()[name = tensor<string, []>("obj_7_pad_type_0"), val = tensor<string, []>("valid")]; |
|
tensor<int32, [2]> obj_7_strides_0 = const()[name = tensor<string, []>("obj_7_strides_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, [4]> obj_7_pad_0 = const()[name = tensor<string, []>("obj_7_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [2]> obj_7_dilations_0 = const()[name = tensor<string, []>("obj_7_dilations_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, []> obj_7_groups_0 = const()[name = tensor<string, []>("obj_7_groups_0"), val = tensor<int32, []>(1)]; |
|
tensor<fp16, [1280, 1280, 1, 1]> layers_0_self_attn_o_proj_weight_to_fp16 = const()[name = tensor<string, []>("layers_0_self_attn_o_proj_weight_to_fp16"), val = tensor<fp16, [1280, 1280, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(143770368)))]; |
|
tensor<fp16, [1280]> layers_0_self_attn_o_proj_bias_to_fp16 = const()[name = tensor<string, []>("layers_0_self_attn_o_proj_bias_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(147047232)))]; |
|
tensor<fp16, [1, 1280, 1, 1]> obj_7_cast_fp16 = conv(bias = layers_0_self_attn_o_proj_bias_to_fp16, dilations = obj_7_dilations_0, groups = obj_7_groups_0, pad = obj_7_pad_0, pad_type = obj_7_pad_type_0, strides = obj_7_strides_0, weight = layers_0_self_attn_o_proj_weight_to_fp16, x = input_1_cast_fp16)[name = tensor<string, []>("obj_7_cast_fp16")]; |
|
tensor<fp16, [1, 1280, 1, 1]> inputs_3_cast_fp16 = add(x = inputs_1_cast_fp16, y = obj_7_cast_fp16)[name = tensor<string, []>("inputs_3_cast_fp16")]; |
|
tensor<int32, [1]> out_3_axes_0 = const()[name = tensor<string, []>("out_3_axes_0"), val = tensor<int32, [1]>([1])]; |
|
tensor<fp16, []> var_169_to_fp16 = const()[name = tensor<string, []>("op_169_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)]; |
|
tensor<fp16, [1, 1280, 1, 1]> out_3_cast_fp16 = layer_norm(axes = out_3_axes_0, epsilon = var_169_to_fp16, x = inputs_3_cast_fp16)[name = tensor<string, []>("out_3_cast_fp16")]; |
|
tensor<fp16, [1280]> obj_9_gamma_0_to_fp16 = const()[name = tensor<string, []>("obj_9_gamma_0_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(147049856)))]; |
|
tensor<fp16, [1280]> obj_9_beta_0_to_fp16 = const()[name = tensor<string, []>("obj_9_beta_0_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(147052480)))]; |
|
tensor<fp16, []> obj_9_epsilon_0_to_fp16 = const()[name = tensor<string, []>("obj_9_epsilon_0_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)]; |
|
tensor<fp16, [1, 1280, 1, 1]> obj_9_cast_fp16 = batch_norm(beta = obj_9_beta_0_to_fp16, epsilon = obj_9_epsilon_0_to_fp16, gamma = obj_9_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_3_cast_fp16)[name = tensor<string, []>("obj_9_cast_fp16")]; |
|
tensor<string, []> query_3_pad_type_0 = const()[name = tensor<string, []>("query_3_pad_type_0"), val = tensor<string, []>("valid")]; |
|
tensor<int32, [2]> query_3_strides_0 = const()[name = tensor<string, []>("query_3_strides_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, [4]> query_3_pad_0 = const()[name = tensor<string, []>("query_3_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [2]> query_3_dilations_0 = const()[name = tensor<string, []>("query_3_dilations_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, []> query_3_groups_0 = const()[name = tensor<string, []>("query_3_groups_0"), val = tensor<int32, []>(1)]; |
|
tensor<fp16, [1280, 1280, 1, 1]> layers_0_encoder_attn_q_proj_weight_to_fp16 = const()[name = tensor<string, []>("layers_0_encoder_attn_q_proj_weight_to_fp16"), val = tensor<fp16, [1280, 1280, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(147055104)))]; |
|
tensor<fp16, [1280]> layers_0_encoder_attn_q_proj_bias_to_fp16 = const()[name = tensor<string, []>("layers_0_encoder_attn_q_proj_bias_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(150331968)))]; |
|
tensor<fp16, [1, 1280, 1, 1]> query_3_cast_fp16 = conv(bias = layers_0_encoder_attn_q_proj_bias_to_fp16, dilations = query_3_dilations_0, groups = query_3_groups_0, pad = query_3_pad_0, pad_type = query_3_pad_type_0, strides = query_3_strides_0, weight = layers_0_encoder_attn_q_proj_weight_to_fp16, x = obj_9_cast_fp16)[name = tensor<string, []>("query_3_cast_fp16")]; |
|
tensor<string, []> key_3_pad_type_0 = const()[name = tensor<string, []>("key_3_pad_type_0"), val = tensor<string, []>("valid")]; |
|
tensor<int32, [2]> key_3_strides_0 = const()[name = tensor<string, []>("key_3_strides_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, [4]> key_3_pad_0 = const()[name = tensor<string, []>("key_3_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [2]> key_3_dilations_0 = const()[name = tensor<string, []>("key_3_dilations_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, []> key_3_groups_0 = const()[name = tensor<string, []>("key_3_groups_0"), val = tensor<int32, []>(1)]; |
|
tensor<fp16, [1280, 1280, 1, 1]> layers_0_encoder_attn_k_proj_weight_to_fp16 = const()[name = tensor<string, []>("layers_0_encoder_attn_k_proj_weight_to_fp16"), val = tensor<fp16, [1280, 1280, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(150334592)))]; |
|
tensor<fp16, [1, 1280, 1, 1500]> key_3_cast_fp16 = conv(dilations = key_3_dilations_0, groups = key_3_groups_0, pad = key_3_pad_0, pad_type = key_3_pad_type_0, strides = key_3_strides_0, weight = layers_0_encoder_attn_k_proj_weight_to_fp16, x = encoder_output_embeds)[name = tensor<string, []>("key_3_cast_fp16")]; |
|
tensor<string, []> value_3_pad_type_0 = const()[name = tensor<string, []>("value_3_pad_type_0"), val = tensor<string, []>("valid")]; |
|
tensor<int32, [2]> value_3_strides_0 = const()[name = tensor<string, []>("value_3_strides_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, [4]> value_3_pad_0 = const()[name = tensor<string, []>("value_3_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [2]> value_3_dilations_0 = const()[name = tensor<string, []>("value_3_dilations_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, []> value_3_groups_0 = const()[name = tensor<string, []>("value_3_groups_0"), val = tensor<int32, []>(1)]; |
|
tensor<fp16, [1280, 1280, 1, 1]> layers_0_encoder_attn_v_proj_weight_to_fp16 = const()[name = tensor<string, []>("layers_0_encoder_attn_v_proj_weight_to_fp16"), val = tensor<fp16, [1280, 1280, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(153611456)))]; |
|
tensor<fp16, [1280]> layers_0_encoder_attn_v_proj_bias_to_fp16 = const()[name = tensor<string, []>("layers_0_encoder_attn_v_proj_bias_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(156888320)))]; |
|
tensor<fp16, [1, 1280, 1, 1500]> value_3_cast_fp16 = conv(bias = layers_0_encoder_attn_v_proj_bias_to_fp16, dilations = value_3_dilations_0, groups = value_3_groups_0, pad = value_3_pad_0, pad_type = value_3_pad_type_0, strides = value_3_strides_0, weight = layers_0_encoder_attn_v_proj_weight_to_fp16, x = encoder_output_embeds)[name = tensor<string, []>("value_3_cast_fp16")]; |
|
tensor<int32, [4]> var_204 = const()[name = tensor<string, []>("op_204"), val = tensor<int32, [4]>([1, 20, 64, -1])]; |
|
tensor<fp16, [1, 20, 64, 1]> mh_q_3_cast_fp16 = reshape(shape = var_204, x = query_3_cast_fp16)[name = tensor<string, []>("mh_q_3_cast_fp16")]; |
|
tensor<fp16, []> var_206_to_fp16 = const()[name = tensor<string, []>("op_206_to_fp16"), val = tensor<fp16, []>(0x1p-3)]; |
|
tensor<fp16, [1, 20, 64, 1]> var_207_cast_fp16 = mul(x = mh_q_3_cast_fp16, y = var_206_to_fp16)[name = tensor<string, []>("op_207_cast_fp16")]; |
|
tensor<int32, [4]> var_208 = const()[name = tensor<string, []>("op_208"), val = tensor<int32, [4]>([1, 20, 64, -1])]; |
|
tensor<fp16, [1, 20, 64, 1500]> var_209_cast_fp16 = reshape(shape = var_208, x = key_3_cast_fp16)[name = tensor<string, []>("op_209_cast_fp16")]; |
|
tensor<bool, []> mh_w_5_transpose_x_0 = const()[name = tensor<string, []>("mh_w_5_transpose_x_0"), val = tensor<bool, []>(true)]; |
|
tensor<bool, []> mh_w_5_transpose_y_0 = const()[name = tensor<string, []>("mh_w_5_transpose_y_0"), val = tensor<bool, []>(false)]; |
|
tensor<fp16, [1, 20, 1, 1500]> mh_w_5_cast_fp16 = matmul(transpose_x = mh_w_5_transpose_x_0, transpose_y = mh_w_5_transpose_y_0, x = var_207_cast_fp16, y = var_209_cast_fp16)[name = tensor<string, []>("mh_w_5_cast_fp16")]; |
|
tensor<fp16, [1, 20, 1, 1500]> obj_13_cast_fp16 = softmax(axis = var_56, x = mh_w_5_cast_fp16)[name = tensor<string, []>("obj_13_cast_fp16")]; |
|
tensor<int32, [4]> var_213 = const()[name = tensor<string, []>("op_213"), val = tensor<int32, [4]>([1, 20, 64, -1])]; |
|
tensor<fp16, [1, 20, 64, 1500]> var_214_cast_fp16 = reshape(shape = var_213, x = value_3_cast_fp16)[name = tensor<string, []>("op_214_cast_fp16")]; |
|
tensor<bool, []> attn_3_transpose_x_0 = const()[name = tensor<string, []>("attn_3_transpose_x_0"), val = tensor<bool, []>(false)]; |
|
tensor<bool, []> attn_3_transpose_y_0 = const()[name = tensor<string, []>("attn_3_transpose_y_0"), val = tensor<bool, []>(true)]; |
|
tensor<fp16, [1, 20, 64, 1]> attn_3_cast_fp16 = matmul(transpose_x = attn_3_transpose_x_0, transpose_y = attn_3_transpose_y_0, x = var_214_cast_fp16, y = obj_13_cast_fp16)[name = tensor<string, []>("attn_3_cast_fp16")]; |
|
tensor<int32, [4]> var_217 = const()[name = tensor<string, []>("op_217"), val = tensor<int32, [4]>([1, 1280, 1, -1])]; |
|
tensor<fp16, [1, 1280, 1, 1]> input_3_cast_fp16 = reshape(shape = var_217, x = attn_3_cast_fp16)[name = tensor<string, []>("input_3_cast_fp16")]; |
|
tensor<string, []> obj_11_pad_type_0 = const()[name = tensor<string, []>("obj_11_pad_type_0"), val = tensor<string, []>("valid")]; |
|
tensor<int32, [2]> obj_11_strides_0 = const()[name = tensor<string, []>("obj_11_strides_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, [4]> obj_11_pad_0 = const()[name = tensor<string, []>("obj_11_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [2]> obj_11_dilations_0 = const()[name = tensor<string, []>("obj_11_dilations_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, []> obj_11_groups_0 = const()[name = tensor<string, []>("obj_11_groups_0"), val = tensor<int32, []>(1)]; |
|
tensor<fp16, [1280, 1280, 1, 1]> layers_0_encoder_attn_o_proj_weight_to_fp16 = const()[name = tensor<string, []>("layers_0_encoder_attn_o_proj_weight_to_fp16"), val = tensor<fp16, [1280, 1280, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(156890944)))]; |
|
tensor<fp16, [1280]> layers_0_encoder_attn_o_proj_bias_to_fp16 = const()[name = tensor<string, []>("layers_0_encoder_attn_o_proj_bias_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(160167808)))]; |
|
tensor<fp16, [1, 1280, 1, 1]> obj_11_cast_fp16 = conv(bias = layers_0_encoder_attn_o_proj_bias_to_fp16, dilations = obj_11_dilations_0, groups = obj_11_groups_0, pad = obj_11_pad_0, pad_type = obj_11_pad_type_0, strides = obj_11_strides_0, weight = layers_0_encoder_attn_o_proj_weight_to_fp16, x = input_3_cast_fp16)[name = tensor<string, []>("obj_11_cast_fp16")]; |
|
tensor<fp16, [1, 1280, 1, 1]> inputs_5_cast_fp16 = add(x = inputs_3_cast_fp16, y = obj_11_cast_fp16)[name = tensor<string, []>("inputs_5_cast_fp16")]; |
|
tensor<int32, [1]> out_5_axes_0 = const()[name = tensor<string, []>("out_5_axes_0"), val = tensor<int32, [1]>([1])]; |
|
tensor<fp16, []> var_235_to_fp16 = const()[name = tensor<string, []>("op_235_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)]; |
|
tensor<fp16, [1, 1280, 1, 1]> out_5_cast_fp16 = layer_norm(axes = out_5_axes_0, epsilon = var_235_to_fp16, x = inputs_5_cast_fp16)[name = tensor<string, []>("out_5_cast_fp16")]; |
|
tensor<fp16, [1280]> input_5_gamma_0_to_fp16 = const()[name = tensor<string, []>("input_5_gamma_0_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(160170432)))]; |
|
tensor<fp16, [1280]> input_5_beta_0_to_fp16 = const()[name = tensor<string, []>("input_5_beta_0_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(160173056)))]; |
|
tensor<fp16, []> input_5_epsilon_0_to_fp16 = const()[name = tensor<string, []>("input_5_epsilon_0_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)]; |
|
tensor<fp16, [1, 1280, 1, 1]> input_5_cast_fp16 = batch_norm(beta = input_5_beta_0_to_fp16, epsilon = input_5_epsilon_0_to_fp16, gamma = input_5_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_5_cast_fp16)[name = tensor<string, []>("input_5_cast_fp16")]; |
|
tensor<string, []> input_7_pad_type_0 = const()[name = tensor<string, []>("input_7_pad_type_0"), val = tensor<string, []>("valid")]; |
|
tensor<int32, [2]> input_7_strides_0 = const()[name = tensor<string, []>("input_7_strides_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, [4]> input_7_pad_0 = const()[name = tensor<string, []>("input_7_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [2]> input_7_dilations_0 = const()[name = tensor<string, []>("input_7_dilations_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, []> input_7_groups_0 = const()[name = tensor<string, []>("input_7_groups_0"), val = tensor<int32, []>(1)]; |
|
tensor<fp16, [5120, 1280, 1, 1]> layers_0_fc1_weight_to_fp16 = const()[name = tensor<string, []>("layers_0_fc1_weight_to_fp16"), val = tensor<fp16, [5120, 1280, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(160175680)))]; |
|
tensor<fp16, [5120]> layers_0_fc1_bias_to_fp16 = const()[name = tensor<string, []>("layers_0_fc1_bias_to_fp16"), val = tensor<fp16, [5120]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(173282944)))]; |
|
tensor<fp16, [1, 5120, 1, 1]> input_7_cast_fp16 = conv(bias = layers_0_fc1_bias_to_fp16, dilations = input_7_dilations_0, groups = input_7_groups_0, pad = input_7_pad_0, pad_type = input_7_pad_type_0, strides = input_7_strides_0, weight = layers_0_fc1_weight_to_fp16, x = input_5_cast_fp16)[name = tensor<string, []>("input_7_cast_fp16")]; |
|
tensor<string, []> input_9_mode_0 = const()[name = tensor<string, []>("input_9_mode_0"), val = tensor<string, []>("EXACT")]; |
|
tensor<fp16, [1, 5120, 1, 1]> input_9_cast_fp16 = gelu(mode = input_9_mode_0, x = input_7_cast_fp16)[name = tensor<string, []>("input_9_cast_fp16")]; |
|
tensor<string, []> hidden_states_3_pad_type_0 = const()[name = tensor<string, []>("hidden_states_3_pad_type_0"), val = tensor<string, []>("valid")]; |
|
tensor<int32, [2]> hidden_states_3_strides_0 = const()[name = tensor<string, []>("hidden_states_3_strides_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, [4]> hidden_states_3_pad_0 = const()[name = tensor<string, []>("hidden_states_3_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [2]> hidden_states_3_dilations_0 = const()[name = tensor<string, []>("hidden_states_3_dilations_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, []> hidden_states_3_groups_0 = const()[name = tensor<string, []>("hidden_states_3_groups_0"), val = tensor<int32, []>(1)]; |
|
tensor<fp16, [1280, 5120, 1, 1]> layers_0_fc2_weight_to_fp16 = const()[name = tensor<string, []>("layers_0_fc2_weight_to_fp16"), val = tensor<fp16, [1280, 5120, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(173293248)))]; |
|
tensor<fp16, [1280]> layers_0_fc2_bias_to_fp16 = const()[name = tensor<string, []>("layers_0_fc2_bias_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(186400512)))]; |
|
tensor<fp16, [1, 1280, 1, 1]> hidden_states_3_cast_fp16 = conv(bias = layers_0_fc2_bias_to_fp16, dilations = hidden_states_3_dilations_0, groups = hidden_states_3_groups_0, pad = hidden_states_3_pad_0, pad_type = hidden_states_3_pad_type_0, strides = hidden_states_3_strides_0, weight = layers_0_fc2_weight_to_fp16, x = input_9_cast_fp16)[name = tensor<string, []>("hidden_states_3_cast_fp16")]; |
|
tensor<fp16, [1, 1280, 1, 1]> inputs_7_cast_fp16 = add(x = inputs_5_cast_fp16, y = hidden_states_3_cast_fp16)[name = tensor<string, []>("inputs_7_cast_fp16")]; |
|
tensor<int32, []> var_270 = const()[name = tensor<string, []>("op_270"), val = tensor<int32, []>(3)]; |
|
tensor<int32, [1]> out_7_axes_0 = const()[name = tensor<string, []>("out_7_axes_0"), val = tensor<int32, [1]>([1])]; |
|
tensor<fp16, []> var_296_to_fp16 = const()[name = tensor<string, []>("op_296_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)]; |
|
tensor<fp16, [1, 1280, 1, 1]> out_7_cast_fp16 = layer_norm(axes = out_7_axes_0, epsilon = var_296_to_fp16, x = inputs_7_cast_fp16)[name = tensor<string, []>("out_7_cast_fp16")]; |
|
tensor<fp16, [1280]> obj_15_gamma_0_to_fp16 = const()[name = tensor<string, []>("obj_15_gamma_0_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(186403136)))]; |
|
tensor<fp16, [1280]> obj_15_beta_0_to_fp16 = const()[name = tensor<string, []>("obj_15_beta_0_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(186405760)))]; |
|
tensor<fp16, []> obj_15_epsilon_0_to_fp16 = const()[name = tensor<string, []>("obj_15_epsilon_0_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)]; |
|
tensor<fp16, [1, 1280, 1, 1]> obj_15_cast_fp16 = batch_norm(beta = obj_15_beta_0_to_fp16, epsilon = obj_15_epsilon_0_to_fp16, gamma = obj_15_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_7_cast_fp16)[name = tensor<string, []>("obj_15_cast_fp16")]; |
|
tensor<string, []> query_5_pad_type_0 = const()[name = tensor<string, []>("query_5_pad_type_0"), val = tensor<string, []>("valid")]; |
|
tensor<int32, [2]> query_5_strides_0 = const()[name = tensor<string, []>("query_5_strides_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, [4]> query_5_pad_0 = const()[name = tensor<string, []>("query_5_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [2]> query_5_dilations_0 = const()[name = tensor<string, []>("query_5_dilations_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, []> query_5_groups_0 = const()[name = tensor<string, []>("query_5_groups_0"), val = tensor<int32, []>(1)]; |
|
tensor<fp16, [1280, 1280, 1, 1]> layers_1_self_attn_q_proj_weight_to_fp16 = const()[name = tensor<string, []>("layers_1_self_attn_q_proj_weight_to_fp16"), val = tensor<fp16, [1280, 1280, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(186408384)))]; |
|
tensor<fp16, [1280]> layers_1_self_attn_q_proj_bias_to_fp16 = const()[name = tensor<string, []>("layers_1_self_attn_q_proj_bias_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(189685248)))]; |
|
tensor<fp16, [1, 1280, 1, 1]> query_5_cast_fp16 = conv(bias = layers_1_self_attn_q_proj_bias_to_fp16, dilations = query_5_dilations_0, groups = query_5_groups_0, pad = query_5_pad_0, pad_type = query_5_pad_type_0, strides = query_5_strides_0, weight = layers_1_self_attn_q_proj_weight_to_fp16, x = obj_15_cast_fp16)[name = tensor<string, []>("query_5_cast_fp16")]; |
|
tensor<string, []> current_key_pad_type_0 = const()[name = tensor<string, []>("current_key_pad_type_0"), val = tensor<string, []>("valid")]; |
|
tensor<int32, [2]> current_key_strides_0 = const()[name = tensor<string, []>("current_key_strides_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, [4]> current_key_pad_0 = const()[name = tensor<string, []>("current_key_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [2]> current_key_dilations_0 = const()[name = tensor<string, []>("current_key_dilations_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, []> current_key_groups_0 = const()[name = tensor<string, []>("current_key_groups_0"), val = tensor<int32, []>(1)]; |
|
tensor<fp16, [1280, 1280, 1, 1]> layers_1_self_attn_k_proj_weight_to_fp16 = const()[name = tensor<string, []>("layers_1_self_attn_k_proj_weight_to_fp16"), val = tensor<fp16, [1280, 1280, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(189687872)))]; |
|
tensor<fp16, [1, 1280, 1, 1]> current_key_cast_fp16 = conv(dilations = current_key_dilations_0, groups = current_key_groups_0, pad = current_key_pad_0, pad_type = current_key_pad_type_0, strides = current_key_strides_0, weight = layers_1_self_attn_k_proj_weight_to_fp16, x = obj_15_cast_fp16)[name = tensor<string, []>("current_key_cast_fp16")]; |
|
tensor<string, []> current_value_pad_type_0 = const()[name = tensor<string, []>("current_value_pad_type_0"), val = tensor<string, []>("valid")]; |
|
tensor<int32, [2]> current_value_strides_0 = const()[name = tensor<string, []>("current_value_strides_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, [4]> current_value_pad_0 = const()[name = tensor<string, []>("current_value_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [2]> current_value_dilations_0 = const()[name = tensor<string, []>("current_value_dilations_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, []> current_value_groups_0 = const()[name = tensor<string, []>("current_value_groups_0"), val = tensor<int32, []>(1)]; |
|
tensor<fp16, [1280, 1280, 1, 1]> layers_1_self_attn_v_proj_weight_to_fp16 = const()[name = tensor<string, []>("layers_1_self_attn_v_proj_weight_to_fp16"), val = tensor<fp16, [1280, 1280, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(192964736)))]; |
|
tensor<fp16, [1280]> layers_1_self_attn_v_proj_bias_to_fp16 = const()[name = tensor<string, []>("layers_1_self_attn_v_proj_bias_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(196241600)))]; |
|
tensor<fp16, [1, 1280, 1, 1]> current_value_cast_fp16 = conv(bias = layers_1_self_attn_v_proj_bias_to_fp16, dilations = current_value_dilations_0, groups = current_value_groups_0, pad = current_value_pad_0, pad_type = current_value_pad_type_0, strides = current_value_strides_0, weight = layers_1_self_attn_v_proj_weight_to_fp16, x = obj_15_cast_fp16)[name = tensor<string, []>("current_value_cast_fp16")]; |
|
tensor<fp16, [1, 1280, 1, 448]> var_334_cast_fp16 = mul(x = current_key_cast_fp16, y = var_118_cast_fp16)[name = tensor<string, []>("op_334_cast_fp16")]; |
|
tensor<fp16, [1, 1280, 1, 448]> var_336_cast_fp16 = mul(x = var_43_cast_fp16_1, y = var_121_cast_fp16)[name = tensor<string, []>("op_336_cast_fp16")]; |
|
tensor<fp16, [1, 1280, 1, 448]> key_5_cast_fp16 = add(x = var_334_cast_fp16, y = var_336_cast_fp16)[name = tensor<string, []>("key_5_cast_fp16")]; |
|
tensor<fp16, [1, 1280, 1, 448]> var_338_cast_fp16 = mul(x = current_value_cast_fp16, y = var_118_cast_fp16)[name = tensor<string, []>("op_338_cast_fp16")]; |
|
tensor<fp16, [1, 1280, 1, 448]> var_340_cast_fp16 = mul(x = var_48_cast_fp16_1, y = var_121_cast_fp16)[name = tensor<string, []>("op_340_cast_fp16")]; |
|
tensor<fp16, [1, 1280, 1, 448]> value_5_cast_fp16 = add(x = var_338_cast_fp16, y = var_340_cast_fp16)[name = tensor<string, []>("value_5_cast_fp16")]; |
|
tensor<int32, [4]> var_343 = const()[name = tensor<string, []>("op_343"), val = tensor<int32, [4]>([1, 20, 64, -1])]; |
|
tensor<fp16, [1, 20, 64, 1]> mh_q_5_cast_fp16 = reshape(shape = var_343, x = query_5_cast_fp16)[name = tensor<string, []>("mh_q_5_cast_fp16")]; |
|
tensor<fp16, []> var_345_to_fp16 = const()[name = tensor<string, []>("op_345_to_fp16"), val = tensor<fp16, []>(0x1p-3)]; |
|
tensor<fp16, [1, 20, 64, 1]> var_346_cast_fp16 = mul(x = mh_q_5_cast_fp16, y = var_345_to_fp16)[name = tensor<string, []>("op_346_cast_fp16")]; |
|
tensor<int32, [4]> var_347 = const()[name = tensor<string, []>("op_347"), val = tensor<int32, [4]>([1, 20, 64, -1])]; |
|
tensor<fp16, [1, 20, 64, 448]> var_348_cast_fp16 = reshape(shape = var_347, x = key_5_cast_fp16)[name = tensor<string, []>("op_348_cast_fp16")]; |
|
tensor<bool, []> mh_w_7_transpose_x_0 = const()[name = tensor<string, []>("mh_w_7_transpose_x_0"), val = tensor<bool, []>(true)]; |
|
tensor<bool, []> mh_w_7_transpose_y_0 = const()[name = tensor<string, []>("mh_w_7_transpose_y_0"), val = tensor<bool, []>(false)]; |
|
tensor<fp16, [1, 20, 1, 448]> mh_w_7_cast_fp16 = matmul(transpose_x = mh_w_7_transpose_x_0, transpose_y = mh_w_7_transpose_y_0, x = var_346_cast_fp16, y = var_348_cast_fp16)[name = tensor<string, []>("mh_w_7_cast_fp16")]; |
|
tensor<fp16, [1, 20, 1, 448]> mh_w_9_cast_fp16 = add(x = mh_w_7_cast_fp16, y = var_139_cast_fp16)[name = tensor<string, []>("mh_w_9_cast_fp16")]; |
|
tensor<fp16, [1, 20, 1, 448]> var_356_cast_fp16 = softmax(axis = var_270, x = mh_w_9_cast_fp16)[name = tensor<string, []>("op_356_cast_fp16")]; |
|
tensor<int32, [4]> var_357 = const()[name = tensor<string, []>("op_357"), val = tensor<int32, [4]>([1, 20, 64, -1])]; |
|
tensor<fp16, [1, 20, 64, 448]> var_358_cast_fp16 = reshape(shape = var_357, x = value_5_cast_fp16)[name = tensor<string, []>("op_358_cast_fp16")]; |
|
tensor<bool, []> attn_5_transpose_x_0 = const()[name = tensor<string, []>("attn_5_transpose_x_0"), val = tensor<bool, []>(false)]; |
|
tensor<bool, []> attn_5_transpose_y_0 = const()[name = tensor<string, []>("attn_5_transpose_y_0"), val = tensor<bool, []>(true)]; |
|
tensor<fp16, [1, 20, 64, 1]> attn_5_cast_fp16 = matmul(transpose_x = attn_5_transpose_x_0, transpose_y = attn_5_transpose_y_0, x = var_358_cast_fp16, y = var_356_cast_fp16)[name = tensor<string, []>("attn_5_cast_fp16")]; |
|
tensor<int32, [4]> var_361 = const()[name = tensor<string, []>("op_361"), val = tensor<int32, [4]>([1, 1280, 1, -1])]; |
|
tensor<fp16, [1, 1280, 1, 1]> input_11_cast_fp16 = reshape(shape = var_361, x = attn_5_cast_fp16)[name = tensor<string, []>("input_11_cast_fp16")]; |
|
tensor<string, []> obj_21_pad_type_0 = const()[name = tensor<string, []>("obj_21_pad_type_0"), val = tensor<string, []>("valid")]; |
|
tensor<int32, [2]> obj_21_strides_0 = const()[name = tensor<string, []>("obj_21_strides_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, [4]> obj_21_pad_0 = const()[name = tensor<string, []>("obj_21_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [2]> obj_21_dilations_0 = const()[name = tensor<string, []>("obj_21_dilations_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, []> obj_21_groups_0 = const()[name = tensor<string, []>("obj_21_groups_0"), val = tensor<int32, []>(1)]; |
|
tensor<fp16, [1280, 1280, 1, 1]> layers_1_self_attn_o_proj_weight_to_fp16 = const()[name = tensor<string, []>("layers_1_self_attn_o_proj_weight_to_fp16"), val = tensor<fp16, [1280, 1280, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(196244224)))]; |
|
tensor<fp16, [1280]> layers_1_self_attn_o_proj_bias_to_fp16 = const()[name = tensor<string, []>("layers_1_self_attn_o_proj_bias_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(199521088)))]; |
|
tensor<fp16, [1, 1280, 1, 1]> obj_21_cast_fp16 = conv(bias = layers_1_self_attn_o_proj_bias_to_fp16, dilations = obj_21_dilations_0, groups = obj_21_groups_0, pad = obj_21_pad_0, pad_type = obj_21_pad_type_0, strides = obj_21_strides_0, weight = layers_1_self_attn_o_proj_weight_to_fp16, x = input_11_cast_fp16)[name = tensor<string, []>("obj_21_cast_fp16")]; |
|
tensor<fp16, [1, 1280, 1, 1]> inputs_9_cast_fp16 = add(x = inputs_7_cast_fp16, y = obj_21_cast_fp16)[name = tensor<string, []>("inputs_9_cast_fp16")]; |
|
tensor<int32, [1]> out_9_axes_0 = const()[name = tensor<string, []>("out_9_axes_0"), val = tensor<int32, [1]>([1])]; |
|
tensor<fp16, []> var_383_to_fp16 = const()[name = tensor<string, []>("op_383_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)]; |
|
tensor<fp16, [1, 1280, 1, 1]> out_9_cast_fp16 = layer_norm(axes = out_9_axes_0, epsilon = var_383_to_fp16, x = inputs_9_cast_fp16)[name = tensor<string, []>("out_9_cast_fp16")]; |
|
tensor<fp16, [1280]> obj_23_gamma_0_to_fp16 = const()[name = tensor<string, []>("obj_23_gamma_0_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(199523712)))]; |
|
tensor<fp16, [1280]> obj_23_beta_0_to_fp16 = const()[name = tensor<string, []>("obj_23_beta_0_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(199526336)))]; |
|
tensor<fp16, []> obj_23_epsilon_0_to_fp16 = const()[name = tensor<string, []>("obj_23_epsilon_0_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)]; |
|
tensor<fp16, [1, 1280, 1, 1]> obj_23_cast_fp16 = batch_norm(beta = obj_23_beta_0_to_fp16, epsilon = obj_23_epsilon_0_to_fp16, gamma = obj_23_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_9_cast_fp16)[name = tensor<string, []>("obj_23_cast_fp16")]; |
|
tensor<string, []> query_pad_type_0 = const()[name = tensor<string, []>("query_pad_type_0"), val = tensor<string, []>("valid")]; |
|
tensor<int32, [2]> query_strides_0 = const()[name = tensor<string, []>("query_strides_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, [4]> query_pad_0 = const()[name = tensor<string, []>("query_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [2]> query_dilations_0 = const()[name = tensor<string, []>("query_dilations_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, []> query_groups_0 = const()[name = tensor<string, []>("query_groups_0"), val = tensor<int32, []>(1)]; |
|
tensor<fp16, [1280, 1280, 1, 1]> layers_1_encoder_attn_q_proj_weight_to_fp16 = const()[name = tensor<string, []>("layers_1_encoder_attn_q_proj_weight_to_fp16"), val = tensor<fp16, [1280, 1280, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(199528960)))]; |
|
tensor<fp16, [1280]> layers_1_encoder_attn_q_proj_bias_to_fp16 = const()[name = tensor<string, []>("layers_1_encoder_attn_q_proj_bias_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(202805824)))]; |
|
tensor<fp16, [1, 1280, 1, 1]> query_cast_fp16 = conv(bias = layers_1_encoder_attn_q_proj_bias_to_fp16, dilations = query_dilations_0, groups = query_groups_0, pad = query_pad_0, pad_type = query_pad_type_0, strides = query_strides_0, weight = layers_1_encoder_attn_q_proj_weight_to_fp16, x = obj_23_cast_fp16)[name = tensor<string, []>("query_cast_fp16")]; |
|
tensor<string, []> key_pad_type_0 = const()[name = tensor<string, []>("key_pad_type_0"), val = tensor<string, []>("valid")]; |
|
tensor<int32, [2]> key_strides_0 = const()[name = tensor<string, []>("key_strides_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, [4]> key_pad_0 = const()[name = tensor<string, []>("key_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [2]> key_dilations_0 = const()[name = tensor<string, []>("key_dilations_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, []> key_groups_0 = const()[name = tensor<string, []>("key_groups_0"), val = tensor<int32, []>(1)]; |
|
tensor<fp16, [1280, 1280, 1, 1]> layers_1_encoder_attn_k_proj_weight_to_fp16 = const()[name = tensor<string, []>("layers_1_encoder_attn_k_proj_weight_to_fp16"), val = tensor<fp16, [1280, 1280, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(202808448)))]; |
|
tensor<fp16, [1, 1280, 1, 1500]> key_cast_fp16 = conv(dilations = key_dilations_0, groups = key_groups_0, pad = key_pad_0, pad_type = key_pad_type_0, strides = key_strides_0, weight = layers_1_encoder_attn_k_proj_weight_to_fp16, x = encoder_output_embeds)[name = tensor<string, []>("key_cast_fp16")]; |
|
tensor<string, []> value_pad_type_0 = const()[name = tensor<string, []>("value_pad_type_0"), val = tensor<string, []>("valid")]; |
|
tensor<int32, [2]> value_strides_0 = const()[name = tensor<string, []>("value_strides_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, [4]> value_pad_0 = const()[name = tensor<string, []>("value_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [2]> value_dilations_0 = const()[name = tensor<string, []>("value_dilations_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, []> value_groups_0 = const()[name = tensor<string, []>("value_groups_0"), val = tensor<int32, []>(1)]; |
|
tensor<fp16, [1280, 1280, 1, 1]> layers_1_encoder_attn_v_proj_weight_to_fp16 = const()[name = tensor<string, []>("layers_1_encoder_attn_v_proj_weight_to_fp16"), val = tensor<fp16, [1280, 1280, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(206085312)))]; |
|
tensor<fp16, [1280]> layers_1_encoder_attn_v_proj_bias_to_fp16 = const()[name = tensor<string, []>("layers_1_encoder_attn_v_proj_bias_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(209362176)))]; |
|
tensor<fp16, [1, 1280, 1, 1500]> value_cast_fp16 = conv(bias = layers_1_encoder_attn_v_proj_bias_to_fp16, dilations = value_dilations_0, groups = value_groups_0, pad = value_pad_0, pad_type = value_pad_type_0, strides = value_strides_0, weight = layers_1_encoder_attn_v_proj_weight_to_fp16, x = encoder_output_embeds)[name = tensor<string, []>("value_cast_fp16")]; |
|
tensor<int32, [4]> var_418 = const()[name = tensor<string, []>("op_418"), val = tensor<int32, [4]>([1, 20, 64, -1])]; |
|
tensor<fp16, [1, 20, 64, 1]> mh_q_cast_fp16 = reshape(shape = var_418, x = query_cast_fp16)[name = tensor<string, []>("mh_q_cast_fp16")]; |
|
tensor<fp16, []> var_420_to_fp16 = const()[name = tensor<string, []>("op_420_to_fp16"), val = tensor<fp16, []>(0x1p-3)]; |
|
tensor<fp16, [1, 20, 64, 1]> var_421_cast_fp16 = mul(x = mh_q_cast_fp16, y = var_420_to_fp16)[name = tensor<string, []>("op_421_cast_fp16")]; |
|
tensor<int32, [4]> var_422 = const()[name = tensor<string, []>("op_422"), val = tensor<int32, [4]>([1, 20, 64, -1])]; |
|
tensor<fp16, [1, 20, 64, 1500]> var_423_cast_fp16 = reshape(shape = var_422, x = key_cast_fp16)[name = tensor<string, []>("op_423_cast_fp16")]; |
|
tensor<bool, []> mh_w_transpose_x_0 = const()[name = tensor<string, []>("mh_w_transpose_x_0"), val = tensor<bool, []>(true)]; |
|
tensor<bool, []> mh_w_transpose_y_0 = const()[name = tensor<string, []>("mh_w_transpose_y_0"), val = tensor<bool, []>(false)]; |
|
tensor<fp16, [1, 20, 1, 1500]> mh_w_cast_fp16 = matmul(transpose_x = mh_w_transpose_x_0, transpose_y = mh_w_transpose_y_0, x = var_421_cast_fp16, y = var_423_cast_fp16)[name = tensor<string, []>("mh_w_cast_fp16")]; |
|
tensor<fp16, [1, 20, 1, 1500]> obj_27_cast_fp16 = softmax(axis = var_270, x = mh_w_cast_fp16)[name = tensor<string, []>("obj_27_cast_fp16")]; |
|
tensor<int32, [4]> var_427 = const()[name = tensor<string, []>("op_427"), val = tensor<int32, [4]>([1, 20, 64, -1])]; |
|
tensor<fp16, [1, 20, 64, 1500]> var_428_cast_fp16 = reshape(shape = var_427, x = value_cast_fp16)[name = tensor<string, []>("op_428_cast_fp16")]; |
|
tensor<bool, []> attn_transpose_x_0 = const()[name = tensor<string, []>("attn_transpose_x_0"), val = tensor<bool, []>(false)]; |
|
tensor<bool, []> attn_transpose_y_0 = const()[name = tensor<string, []>("attn_transpose_y_0"), val = tensor<bool, []>(true)]; |
|
tensor<fp16, [1, 20, 64, 1]> attn_cast_fp16 = matmul(transpose_x = attn_transpose_x_0, transpose_y = attn_transpose_y_0, x = var_428_cast_fp16, y = obj_27_cast_fp16)[name = tensor<string, []>("attn_cast_fp16")]; |
|
tensor<int32, [4]> var_431 = const()[name = tensor<string, []>("op_431"), val = tensor<int32, [4]>([1, 1280, 1, -1])]; |
|
tensor<fp16, [1, 1280, 1, 1]> input_13_cast_fp16 = reshape(shape = var_431, x = attn_cast_fp16)[name = tensor<string, []>("input_13_cast_fp16")]; |
|
tensor<string, []> obj_25_pad_type_0 = const()[name = tensor<string, []>("obj_25_pad_type_0"), val = tensor<string, []>("valid")]; |
|
tensor<int32, [2]> obj_25_strides_0 = const()[name = tensor<string, []>("obj_25_strides_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, [4]> obj_25_pad_0 = const()[name = tensor<string, []>("obj_25_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [2]> obj_25_dilations_0 = const()[name = tensor<string, []>("obj_25_dilations_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, []> obj_25_groups_0 = const()[name = tensor<string, []>("obj_25_groups_0"), val = tensor<int32, []>(1)]; |
|
tensor<fp16, [1280, 1280, 1, 1]> layers_1_encoder_attn_o_proj_weight_to_fp16 = const()[name = tensor<string, []>("layers_1_encoder_attn_o_proj_weight_to_fp16"), val = tensor<fp16, [1280, 1280, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(209364800)))]; |
|
tensor<fp16, [1280]> layers_1_encoder_attn_o_proj_bias_to_fp16 = const()[name = tensor<string, []>("layers_1_encoder_attn_o_proj_bias_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(212641664)))]; |
|
tensor<fp16, [1, 1280, 1, 1]> obj_25_cast_fp16 = conv(bias = layers_1_encoder_attn_o_proj_bias_to_fp16, dilations = obj_25_dilations_0, groups = obj_25_groups_0, pad = obj_25_pad_0, pad_type = obj_25_pad_type_0, strides = obj_25_strides_0, weight = layers_1_encoder_attn_o_proj_weight_to_fp16, x = input_13_cast_fp16)[name = tensor<string, []>("obj_25_cast_fp16")]; |
|
tensor<fp16, [1, 1280, 1, 1]> inputs_11_cast_fp16 = add(x = inputs_9_cast_fp16, y = obj_25_cast_fp16)[name = tensor<string, []>("inputs_11_cast_fp16")]; |
|
tensor<int32, [1]> out_11_axes_0 = const()[name = tensor<string, []>("out_11_axes_0"), val = tensor<int32, [1]>([1])]; |
|
tensor<fp16, []> var_452_to_fp16 = const()[name = tensor<string, []>("op_452_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)]; |
|
tensor<fp16, [1, 1280, 1, 1]> out_11_cast_fp16 = layer_norm(axes = out_11_axes_0, epsilon = var_452_to_fp16, x = inputs_11_cast_fp16)[name = tensor<string, []>("out_11_cast_fp16")]; |
|
tensor<fp16, [1280]> input_15_gamma_0_to_fp16 = const()[name = tensor<string, []>("input_15_gamma_0_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(212644288)))]; |
|
tensor<fp16, [1280]> input_15_beta_0_to_fp16 = const()[name = tensor<string, []>("input_15_beta_0_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(212646912)))]; |
|
tensor<fp16, []> input_15_epsilon_0_to_fp16 = const()[name = tensor<string, []>("input_15_epsilon_0_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)]; |
|
tensor<fp16, [1, 1280, 1, 1]> input_15_cast_fp16 = batch_norm(beta = input_15_beta_0_to_fp16, epsilon = input_15_epsilon_0_to_fp16, gamma = input_15_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_11_cast_fp16)[name = tensor<string, []>("input_15_cast_fp16")]; |
|
tensor<string, []> input_17_pad_type_0 = const()[name = tensor<string, []>("input_17_pad_type_0"), val = tensor<string, []>("valid")]; |
|
tensor<int32, [2]> input_17_strides_0 = const()[name = tensor<string, []>("input_17_strides_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, [4]> input_17_pad_0 = const()[name = tensor<string, []>("input_17_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [2]> input_17_dilations_0 = const()[name = tensor<string, []>("input_17_dilations_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, []> input_17_groups_0 = const()[name = tensor<string, []>("input_17_groups_0"), val = tensor<int32, []>(1)]; |
|
tensor<fp16, [5120, 1280, 1, 1]> layers_1_fc1_weight_to_fp16 = const()[name = tensor<string, []>("layers_1_fc1_weight_to_fp16"), val = tensor<fp16, [5120, 1280, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(212649536)))]; |
|
tensor<fp16, [5120]> layers_1_fc1_bias_to_fp16 = const()[name = tensor<string, []>("layers_1_fc1_bias_to_fp16"), val = tensor<fp16, [5120]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(225756800)))]; |
|
tensor<fp16, [1, 5120, 1, 1]> input_17_cast_fp16 = conv(bias = layers_1_fc1_bias_to_fp16, dilations = input_17_dilations_0, groups = input_17_groups_0, pad = input_17_pad_0, pad_type = input_17_pad_type_0, strides = input_17_strides_0, weight = layers_1_fc1_weight_to_fp16, x = input_15_cast_fp16)[name = tensor<string, []>("input_17_cast_fp16")]; |
|
tensor<string, []> input_mode_0 = const()[name = tensor<string, []>("input_mode_0"), val = tensor<string, []>("EXACT")]; |
|
tensor<fp16, [1, 5120, 1, 1]> input_cast_fp16 = gelu(mode = input_mode_0, x = input_17_cast_fp16)[name = tensor<string, []>("input_cast_fp16")]; |
|
tensor<string, []> hidden_states_5_pad_type_0 = const()[name = tensor<string, []>("hidden_states_5_pad_type_0"), val = tensor<string, []>("valid")]; |
|
tensor<int32, [2]> hidden_states_5_strides_0 = const()[name = tensor<string, []>("hidden_states_5_strides_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, [4]> hidden_states_5_pad_0 = const()[name = tensor<string, []>("hidden_states_5_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [2]> hidden_states_5_dilations_0 = const()[name = tensor<string, []>("hidden_states_5_dilations_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, []> hidden_states_5_groups_0 = const()[name = tensor<string, []>("hidden_states_5_groups_0"), val = tensor<int32, []>(1)]; |
|
tensor<fp16, [1280, 5120, 1, 1]> layers_1_fc2_weight_to_fp16 = const()[name = tensor<string, []>("layers_1_fc2_weight_to_fp16"), val = tensor<fp16, [1280, 5120, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(225767104)))]; |
|
tensor<fp16, [1280]> layers_1_fc2_bias_to_fp16 = const()[name = tensor<string, []>("layers_1_fc2_bias_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(238874368)))]; |
|
tensor<fp16, [1, 1280, 1, 1]> hidden_states_5_cast_fp16 = conv(bias = layers_1_fc2_bias_to_fp16, dilations = hidden_states_5_dilations_0, groups = hidden_states_5_groups_0, pad = hidden_states_5_pad_0, pad_type = hidden_states_5_pad_type_0, strides = hidden_states_5_strides_0, weight = layers_1_fc2_weight_to_fp16, x = input_cast_fp16)[name = tensor<string, []>("hidden_states_5_cast_fp16")]; |
|
tensor<fp16, [1, 1280, 1, 1]> inputs_cast_fp16 = add(x = inputs_11_cast_fp16, y = hidden_states_5_cast_fp16)[name = tensor<string, []>("inputs_cast_fp16")]; |
|
tensor<int32, [1]> out_axes_0 = const()[name = tensor<string, []>("out_axes_0"), val = tensor<int32, [1]>([1])]; |
|
tensor<fp16, []> var_495_to_fp16 = const()[name = tensor<string, []>("op_495_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)]; |
|
tensor<fp16, [1, 1280, 1, 1]> out_cast_fp16 = layer_norm(axes = out_axes_0, epsilon = var_495_to_fp16, x = inputs_cast_fp16)[name = tensor<string, []>("out_cast_fp16")]; |
|
tensor<fp16, [1280]> hidden_states_gamma_0_to_fp16 = const()[name = tensor<string, []>("hidden_states_gamma_0_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(238876992)))]; |
|
tensor<fp16, [1280]> hidden_states_beta_0_to_fp16 = const()[name = tensor<string, []>("hidden_states_beta_0_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(238879616)))]; |
|
tensor<fp16, []> hidden_states_epsilon_0_to_fp16 = const()[name = tensor<string, []>("hidden_states_epsilon_0_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)]; |
|
tensor<fp16, [1, 1280, 1, 1]> hidden_states_cast_fp16 = batch_norm(beta = hidden_states_beta_0_to_fp16, epsilon = hidden_states_epsilon_0_to_fp16, gamma = hidden_states_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_cast_fp16)[name = tensor<string, []>("hidden_states_cast_fp16")]; |
|
tensor<int32, [1]> var_506_axes_0 = const()[name = tensor<string, []>("op_506_axes_0"), val = tensor<int32, [1]>([2])]; |
|
tensor<fp16, [1, 1280, 1]> var_506_cast_fp16 = squeeze(axes = var_506_axes_0, x = hidden_states_cast_fp16)[name = tensor<string, []>("op_506_cast_fp16")]; |
|
tensor<int32, [3]> var_509_perm_0 = const()[name = tensor<string, []>("op_509_perm_0"), val = tensor<int32, [3]>([0, 2, 1])]; |
|
tensor<fp16, [51866]> linear_0_bias_0_to_fp16 = const()[name = tensor<string, []>("linear_0_bias_0_to_fp16"), val = tensor<fp16, [51866]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(238882240)))]; |
|
tensor<fp16, [1, 1, 1280]> var_509_cast_fp16 = transpose(perm = var_509_perm_0, x = var_506_cast_fp16)[name = tensor<string, []>("transpose_0")]; |
|
tensor<fp16, [1, 1, 51866]> logits = linear(bias = linear_0_bias_0_to_fp16, weight = embed_tokens_weight_to_fp16, x = var_509_cast_fp16)[name = tensor<string, []>("linear_0_cast_fp16")]; |
|
tensor<int32, []> var_513 = const()[name = tensor<string, []>("op_513"), val = tensor<int32, []>(1)]; |
|
tensor<bool, []> obj_31_interleave_0 = const()[name = tensor<string, []>("obj_31_interleave_0"), val = tensor<bool, []>(false)]; |
|
tensor<fp16, [1, 2560, 1, 1]> key_cache_updates = concat(axis = var_513, interleave = obj_31_interleave_0, values = (current_key_1_cast_fp16, current_key_cast_fp16))[name = tensor<string, []>("obj_31_cast_fp16")]; |
|
tensor<int32, []> var_516 = const()[name = tensor<string, []>("op_516"), val = tensor<int32, []>(1)]; |
|
tensor<bool, []> obj_33_interleave_0 = const()[name = tensor<string, []>("obj_33_interleave_0"), val = tensor<bool, []>(false)]; |
|
tensor<fp16, [1, 2560, 1, 1]> value_cache_updates = concat(axis = var_516, interleave = obj_33_interleave_0, values = (current_value_1_cast_fp16, current_value_cast_fp16))[name = tensor<string, []>("obj_33_cast_fp16")]; |
|
tensor<int32, [4]> var_527_begin_0 = const()[name = tensor<string, []>("op_527_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [4]> var_527_end_0 = const()[name = tensor<string, []>("op_527_end_0"), val = tensor<int32, [4]>([1, 1, 1, 1500])]; |
|
tensor<bool, [4]> var_527_end_mask_0 = const()[name = tensor<string, []>("op_527_end_mask_0"), val = tensor<bool, [4]>([true, false, true, true])]; |
|
tensor<fp16, [1, 1, 1, 1500]> var_527_cast_fp16 = slice_by_index(begin = var_527_begin_0, end = var_527_end_0, end_mask = var_527_end_mask_0, x = obj_27_cast_fp16)[name = tensor<string, []>("op_527_cast_fp16")]; |
|
tensor<int32, [4]> var_530_begin_0 = const()[name = tensor<string, []>("op_530_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [4]> var_530_end_0 = const()[name = tensor<string, []>("op_530_end_0"), val = tensor<int32, [4]>([1, 1, 1, 1500])]; |
|
tensor<bool, [4]> var_530_end_mask_0 = const()[name = tensor<string, []>("op_530_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])]; |
|
tensor<bool, [4]> var_530_squeeze_mask_0 = const()[name = tensor<string, []>("op_530_squeeze_mask_0"), val = tensor<bool, [4]>([false, false, true, false])]; |
|
tensor<fp16, [1, 1, 1500]> var_530_cast_fp16 = slice_by_index(begin = var_530_begin_0, end = var_530_end_0, end_mask = var_530_end_mask_0, squeeze_mask = var_530_squeeze_mask_0, x = var_527_cast_fp16)[name = tensor<string, []>("op_530_cast_fp16")]; |
|
tensor<int32, [4]> var_545_begin_0 = const()[name = tensor<string, []>("op_545_begin_0"), val = tensor<int32, [4]>([0, 1, 0, 0])]; |
|
tensor<int32, [4]> var_545_end_0 = const()[name = tensor<string, []>("op_545_end_0"), val = tensor<int32, [4]>([1, 2, 1, 1500])]; |
|
tensor<bool, [4]> var_545_end_mask_0 = const()[name = tensor<string, []>("op_545_end_mask_0"), val = tensor<bool, [4]>([true, false, true, true])]; |
|
tensor<fp16, [1, 1, 1, 1500]> var_545_cast_fp16 = slice_by_index(begin = var_545_begin_0, end = var_545_end_0, end_mask = var_545_end_mask_0, x = obj_27_cast_fp16)[name = tensor<string, []>("op_545_cast_fp16")]; |
|
tensor<int32, [4]> var_548_begin_0 = const()[name = tensor<string, []>("op_548_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [4]> var_548_end_0 = const()[name = tensor<string, []>("op_548_end_0"), val = tensor<int32, [4]>([1, 1, 1, 1500])]; |
|
tensor<bool, [4]> var_548_end_mask_0 = const()[name = tensor<string, []>("op_548_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])]; |
|
tensor<bool, [4]> var_548_squeeze_mask_0 = const()[name = tensor<string, []>("op_548_squeeze_mask_0"), val = tensor<bool, [4]>([false, false, true, false])]; |
|
tensor<fp16, [1, 1, 1500]> var_548_cast_fp16 = slice_by_index(begin = var_548_begin_0, end = var_548_end_0, end_mask = var_548_end_mask_0, squeeze_mask = var_548_squeeze_mask_0, x = var_545_cast_fp16)[name = tensor<string, []>("op_548_cast_fp16")]; |
|
tensor<int32, [4]> var_563_begin_0 = const()[name = tensor<string, []>("op_563_begin_0"), val = tensor<int32, [4]>([0, 2, 0, 0])]; |
|
tensor<int32, [4]> var_563_end_0 = const()[name = tensor<string, []>("op_563_end_0"), val = tensor<int32, [4]>([1, 3, 1, 1500])]; |
|
tensor<bool, [4]> var_563_end_mask_0 = const()[name = tensor<string, []>("op_563_end_mask_0"), val = tensor<bool, [4]>([true, false, true, true])]; |
|
tensor<fp16, [1, 1, 1, 1500]> var_563_cast_fp16 = slice_by_index(begin = var_563_begin_0, end = var_563_end_0, end_mask = var_563_end_mask_0, x = obj_27_cast_fp16)[name = tensor<string, []>("op_563_cast_fp16")]; |
|
tensor<int32, [4]> var_566_begin_0 = const()[name = tensor<string, []>("op_566_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [4]> var_566_end_0 = const()[name = tensor<string, []>("op_566_end_0"), val = tensor<int32, [4]>([1, 1, 1, 1500])]; |
|
tensor<bool, [4]> var_566_end_mask_0 = const()[name = tensor<string, []>("op_566_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])]; |
|
tensor<bool, [4]> var_566_squeeze_mask_0 = const()[name = tensor<string, []>("op_566_squeeze_mask_0"), val = tensor<bool, [4]>([false, false, true, false])]; |
|
tensor<fp16, [1, 1, 1500]> var_566_cast_fp16 = slice_by_index(begin = var_566_begin_0, end = var_566_end_0, end_mask = var_566_end_mask_0, squeeze_mask = var_566_squeeze_mask_0, x = var_563_cast_fp16)[name = tensor<string, []>("op_566_cast_fp16")]; |
|
tensor<int32, [4]> var_581_begin_0 = const()[name = tensor<string, []>("op_581_begin_0"), val = tensor<int32, [4]>([0, 3, 0, 0])]; |
|
tensor<int32, [4]> var_581_end_0 = const()[name = tensor<string, []>("op_581_end_0"), val = tensor<int32, [4]>([1, 4, 1, 1500])]; |
|
tensor<bool, [4]> var_581_end_mask_0 = const()[name = tensor<string, []>("op_581_end_mask_0"), val = tensor<bool, [4]>([true, false, true, true])]; |
|
tensor<fp16, [1, 1, 1, 1500]> var_581_cast_fp16 = slice_by_index(begin = var_581_begin_0, end = var_581_end_0, end_mask = var_581_end_mask_0, x = obj_27_cast_fp16)[name = tensor<string, []>("op_581_cast_fp16")]; |
|
tensor<int32, [4]> var_584_begin_0 = const()[name = tensor<string, []>("op_584_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [4]> var_584_end_0 = const()[name = tensor<string, []>("op_584_end_0"), val = tensor<int32, [4]>([1, 1, 1, 1500])]; |
|
tensor<bool, [4]> var_584_end_mask_0 = const()[name = tensor<string, []>("op_584_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])]; |
|
tensor<bool, [4]> var_584_squeeze_mask_0 = const()[name = tensor<string, []>("op_584_squeeze_mask_0"), val = tensor<bool, [4]>([false, false, true, false])]; |
|
tensor<fp16, [1, 1, 1500]> var_584_cast_fp16 = slice_by_index(begin = var_584_begin_0, end = var_584_end_0, end_mask = var_584_end_mask_0, squeeze_mask = var_584_squeeze_mask_0, x = var_581_cast_fp16)[name = tensor<string, []>("op_584_cast_fp16")]; |
|
tensor<int32, [4]> var_599_begin_0 = const()[name = tensor<string, []>("op_599_begin_0"), val = tensor<int32, [4]>([0, 4, 0, 0])]; |
|
tensor<int32, [4]> var_599_end_0 = const()[name = tensor<string, []>("op_599_end_0"), val = tensor<int32, [4]>([1, 5, 1, 1500])]; |
|
tensor<bool, [4]> var_599_end_mask_0 = const()[name = tensor<string, []>("op_599_end_mask_0"), val = tensor<bool, [4]>([true, false, true, true])]; |
|
tensor<fp16, [1, 1, 1, 1500]> var_599_cast_fp16 = slice_by_index(begin = var_599_begin_0, end = var_599_end_0, end_mask = var_599_end_mask_0, x = obj_27_cast_fp16)[name = tensor<string, []>("op_599_cast_fp16")]; |
|
tensor<int32, [4]> var_602_begin_0 = const()[name = tensor<string, []>("op_602_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [4]> var_602_end_0 = const()[name = tensor<string, []>("op_602_end_0"), val = tensor<int32, [4]>([1, 1, 1, 1500])]; |
|
tensor<bool, [4]> var_602_end_mask_0 = const()[name = tensor<string, []>("op_602_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])]; |
|
tensor<bool, [4]> var_602_squeeze_mask_0 = const()[name = tensor<string, []>("op_602_squeeze_mask_0"), val = tensor<bool, [4]>([false, false, true, false])]; |
|
tensor<fp16, [1, 1, 1500]> var_602_cast_fp16 = slice_by_index(begin = var_602_begin_0, end = var_602_end_0, end_mask = var_602_end_mask_0, squeeze_mask = var_602_squeeze_mask_0, x = var_599_cast_fp16)[name = tensor<string, []>("op_602_cast_fp16")]; |
|
tensor<int32, [4]> var_617_begin_0 = const()[name = tensor<string, []>("op_617_begin_0"), val = tensor<int32, [4]>([0, 5, 0, 0])]; |
|
tensor<int32, [4]> var_617_end_0 = const()[name = tensor<string, []>("op_617_end_0"), val = tensor<int32, [4]>([1, 6, 1, 1500])]; |
|
tensor<bool, [4]> var_617_end_mask_0 = const()[name = tensor<string, []>("op_617_end_mask_0"), val = tensor<bool, [4]>([true, false, true, true])]; |
|
tensor<fp16, [1, 1, 1, 1500]> var_617_cast_fp16 = slice_by_index(begin = var_617_begin_0, end = var_617_end_0, end_mask = var_617_end_mask_0, x = obj_27_cast_fp16)[name = tensor<string, []>("op_617_cast_fp16")]; |
|
tensor<int32, [4]> var_620_begin_0 = const()[name = tensor<string, []>("op_620_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [4]> var_620_end_0 = const()[name = tensor<string, []>("op_620_end_0"), val = tensor<int32, [4]>([1, 1, 1, 1500])]; |
|
tensor<bool, [4]> var_620_end_mask_0 = const()[name = tensor<string, []>("op_620_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])]; |
|
tensor<bool, [4]> var_620_squeeze_mask_0 = const()[name = tensor<string, []>("op_620_squeeze_mask_0"), val = tensor<bool, [4]>([false, false, true, false])]; |
|
tensor<fp16, [1, 1, 1500]> var_620_cast_fp16 = slice_by_index(begin = var_620_begin_0, end = var_620_end_0, end_mask = var_620_end_mask_0, squeeze_mask = var_620_squeeze_mask_0, x = var_617_cast_fp16)[name = tensor<string, []>("op_620_cast_fp16")]; |
|
tensor<int32, [4]> var_635_begin_0 = const()[name = tensor<string, []>("op_635_begin_0"), val = tensor<int32, [4]>([0, 6, 0, 0])]; |
|
tensor<int32, [4]> var_635_end_0 = const()[name = tensor<string, []>("op_635_end_0"), val = tensor<int32, [4]>([1, 7, 1, 1500])]; |
|
tensor<bool, [4]> var_635_end_mask_0 = const()[name = tensor<string, []>("op_635_end_mask_0"), val = tensor<bool, [4]>([true, false, true, true])]; |
|
tensor<fp16, [1, 1, 1, 1500]> var_635_cast_fp16 = slice_by_index(begin = var_635_begin_0, end = var_635_end_0, end_mask = var_635_end_mask_0, x = obj_27_cast_fp16)[name = tensor<string, []>("op_635_cast_fp16")]; |
|
tensor<int32, [4]> var_638_begin_0 = const()[name = tensor<string, []>("op_638_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [4]> var_638_end_0 = const()[name = tensor<string, []>("op_638_end_0"), val = tensor<int32, [4]>([1, 1, 1, 1500])]; |
|
tensor<bool, [4]> var_638_end_mask_0 = const()[name = tensor<string, []>("op_638_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])]; |
|
tensor<bool, [4]> var_638_squeeze_mask_0 = const()[name = tensor<string, []>("op_638_squeeze_mask_0"), val = tensor<bool, [4]>([false, false, true, false])]; |
|
tensor<fp16, [1, 1, 1500]> var_638_cast_fp16 = slice_by_index(begin = var_638_begin_0, end = var_638_end_0, end_mask = var_638_end_mask_0, squeeze_mask = var_638_squeeze_mask_0, x = var_635_cast_fp16)[name = tensor<string, []>("op_638_cast_fp16")]; |
|
tensor<int32, [4]> var_653_begin_0 = const()[name = tensor<string, []>("op_653_begin_0"), val = tensor<int32, [4]>([0, 7, 0, 0])]; |
|
tensor<int32, [4]> var_653_end_0 = const()[name = tensor<string, []>("op_653_end_0"), val = tensor<int32, [4]>([1, 8, 1, 1500])]; |
|
tensor<bool, [4]> var_653_end_mask_0 = const()[name = tensor<string, []>("op_653_end_mask_0"), val = tensor<bool, [4]>([true, false, true, true])]; |
|
tensor<fp16, [1, 1, 1, 1500]> var_653_cast_fp16 = slice_by_index(begin = var_653_begin_0, end = var_653_end_0, end_mask = var_653_end_mask_0, x = obj_27_cast_fp16)[name = tensor<string, []>("op_653_cast_fp16")]; |
|
tensor<int32, [4]> var_656_begin_0 = const()[name = tensor<string, []>("op_656_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [4]> var_656_end_0 = const()[name = tensor<string, []>("op_656_end_0"), val = tensor<int32, [4]>([1, 1, 1, 1500])]; |
|
tensor<bool, [4]> var_656_end_mask_0 = const()[name = tensor<string, []>("op_656_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])]; |
|
tensor<bool, [4]> var_656_squeeze_mask_0 = const()[name = tensor<string, []>("op_656_squeeze_mask_0"), val = tensor<bool, [4]>([false, false, true, false])]; |
|
tensor<fp16, [1, 1, 1500]> var_656_cast_fp16 = slice_by_index(begin = var_656_begin_0, end = var_656_end_0, end_mask = var_656_end_mask_0, squeeze_mask = var_656_squeeze_mask_0, x = var_653_cast_fp16)[name = tensor<string, []>("op_656_cast_fp16")]; |
|
tensor<int32, [4]> var_671_begin_0 = const()[name = tensor<string, []>("op_671_begin_0"), val = tensor<int32, [4]>([0, 8, 0, 0])]; |
|
tensor<int32, [4]> var_671_end_0 = const()[name = tensor<string, []>("op_671_end_0"), val = tensor<int32, [4]>([1, 9, 1, 1500])]; |
|
tensor<bool, [4]> var_671_end_mask_0 = const()[name = tensor<string, []>("op_671_end_mask_0"), val = tensor<bool, [4]>([true, false, true, true])]; |
|
tensor<fp16, [1, 1, 1, 1500]> var_671_cast_fp16 = slice_by_index(begin = var_671_begin_0, end = var_671_end_0, end_mask = var_671_end_mask_0, x = obj_27_cast_fp16)[name = tensor<string, []>("op_671_cast_fp16")]; |
|
tensor<int32, [4]> var_674_begin_0 = const()[name = tensor<string, []>("op_674_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [4]> var_674_end_0 = const()[name = tensor<string, []>("op_674_end_0"), val = tensor<int32, [4]>([1, 1, 1, 1500])]; |
|
tensor<bool, [4]> var_674_end_mask_0 = const()[name = tensor<string, []>("op_674_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])]; |
|
tensor<bool, [4]> var_674_squeeze_mask_0 = const()[name = tensor<string, []>("op_674_squeeze_mask_0"), val = tensor<bool, [4]>([false, false, true, false])]; |
|
tensor<fp16, [1, 1, 1500]> var_674_cast_fp16 = slice_by_index(begin = var_674_begin_0, end = var_674_end_0, end_mask = var_674_end_mask_0, squeeze_mask = var_674_squeeze_mask_0, x = var_671_cast_fp16)[name = tensor<string, []>("op_674_cast_fp16")]; |
|
tensor<int32, [4]> var_689_begin_0 = const()[name = tensor<string, []>("op_689_begin_0"), val = tensor<int32, [4]>([0, 9, 0, 0])]; |
|
tensor<int32, [4]> var_689_end_0 = const()[name = tensor<string, []>("op_689_end_0"), val = tensor<int32, [4]>([1, 10, 1, 1500])]; |
|
tensor<bool, [4]> var_689_end_mask_0 = const()[name = tensor<string, []>("op_689_end_mask_0"), val = tensor<bool, [4]>([true, false, true, true])]; |
|
tensor<fp16, [1, 1, 1, 1500]> var_689_cast_fp16 = slice_by_index(begin = var_689_begin_0, end = var_689_end_0, end_mask = var_689_end_mask_0, x = obj_27_cast_fp16)[name = tensor<string, []>("op_689_cast_fp16")]; |
|
tensor<int32, [4]> var_692_begin_0 = const()[name = tensor<string, []>("op_692_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [4]> var_692_end_0 = const()[name = tensor<string, []>("op_692_end_0"), val = tensor<int32, [4]>([1, 1, 1, 1500])]; |
|
tensor<bool, [4]> var_692_end_mask_0 = const()[name = tensor<string, []>("op_692_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])]; |
|
tensor<bool, [4]> var_692_squeeze_mask_0 = const()[name = tensor<string, []>("op_692_squeeze_mask_0"), val = tensor<bool, [4]>([false, false, true, false])]; |
|
tensor<fp16, [1, 1, 1500]> var_692_cast_fp16 = slice_by_index(begin = var_692_begin_0, end = var_692_end_0, end_mask = var_692_end_mask_0, squeeze_mask = var_692_squeeze_mask_0, x = var_689_cast_fp16)[name = tensor<string, []>("op_692_cast_fp16")]; |
|
tensor<int32, [4]> var_707_begin_0 = const()[name = tensor<string, []>("op_707_begin_0"), val = tensor<int32, [4]>([0, 10, 0, 0])]; |
|
tensor<int32, [4]> var_707_end_0 = const()[name = tensor<string, []>("op_707_end_0"), val = tensor<int32, [4]>([1, 11, 1, 1500])]; |
|
tensor<bool, [4]> var_707_end_mask_0 = const()[name = tensor<string, []>("op_707_end_mask_0"), val = tensor<bool, [4]>([true, false, true, true])]; |
|
tensor<fp16, [1, 1, 1, 1500]> var_707_cast_fp16 = slice_by_index(begin = var_707_begin_0, end = var_707_end_0, end_mask = var_707_end_mask_0, x = obj_27_cast_fp16)[name = tensor<string, []>("op_707_cast_fp16")]; |
|
tensor<int32, [4]> var_710_begin_0 = const()[name = tensor<string, []>("op_710_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [4]> var_710_end_0 = const()[name = tensor<string, []>("op_710_end_0"), val = tensor<int32, [4]>([1, 1, 1, 1500])]; |
|
tensor<bool, [4]> var_710_end_mask_0 = const()[name = tensor<string, []>("op_710_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])]; |
|
tensor<bool, [4]> var_710_squeeze_mask_0 = const()[name = tensor<string, []>("op_710_squeeze_mask_0"), val = tensor<bool, [4]>([false, false, true, false])]; |
|
tensor<fp16, [1, 1, 1500]> var_710_cast_fp16 = slice_by_index(begin = var_710_begin_0, end = var_710_end_0, end_mask = var_710_end_mask_0, squeeze_mask = var_710_squeeze_mask_0, x = var_707_cast_fp16)[name = tensor<string, []>("op_710_cast_fp16")]; |
|
tensor<int32, [4]> var_725_begin_0 = const()[name = tensor<string, []>("op_725_begin_0"), val = tensor<int32, [4]>([0, 11, 0, 0])]; |
|
tensor<int32, [4]> var_725_end_0 = const()[name = tensor<string, []>("op_725_end_0"), val = tensor<int32, [4]>([1, 12, 1, 1500])]; |
|
tensor<bool, [4]> var_725_end_mask_0 = const()[name = tensor<string, []>("op_725_end_mask_0"), val = tensor<bool, [4]>([true, false, true, true])]; |
|
tensor<fp16, [1, 1, 1, 1500]> var_725_cast_fp16 = slice_by_index(begin = var_725_begin_0, end = var_725_end_0, end_mask = var_725_end_mask_0, x = obj_27_cast_fp16)[name = tensor<string, []>("op_725_cast_fp16")]; |
|
tensor<int32, [4]> var_728_begin_0 = const()[name = tensor<string, []>("op_728_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [4]> var_728_end_0 = const()[name = tensor<string, []>("op_728_end_0"), val = tensor<int32, [4]>([1, 1, 1, 1500])]; |
|
tensor<bool, [4]> var_728_end_mask_0 = const()[name = tensor<string, []>("op_728_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])]; |
|
tensor<bool, [4]> var_728_squeeze_mask_0 = const()[name = tensor<string, []>("op_728_squeeze_mask_0"), val = tensor<bool, [4]>([false, false, true, false])]; |
|
tensor<fp16, [1, 1, 1500]> var_728_cast_fp16 = slice_by_index(begin = var_728_begin_0, end = var_728_end_0, end_mask = var_728_end_mask_0, squeeze_mask = var_728_squeeze_mask_0, x = var_725_cast_fp16)[name = tensor<string, []>("op_728_cast_fp16")]; |
|
tensor<int32, [4]> var_743_begin_0 = const()[name = tensor<string, []>("op_743_begin_0"), val = tensor<int32, [4]>([0, 12, 0, 0])]; |
|
tensor<int32, [4]> var_743_end_0 = const()[name = tensor<string, []>("op_743_end_0"), val = tensor<int32, [4]>([1, 13, 1, 1500])]; |
|
tensor<bool, [4]> var_743_end_mask_0 = const()[name = tensor<string, []>("op_743_end_mask_0"), val = tensor<bool, [4]>([true, false, true, true])]; |
|
tensor<fp16, [1, 1, 1, 1500]> var_743_cast_fp16 = slice_by_index(begin = var_743_begin_0, end = var_743_end_0, end_mask = var_743_end_mask_0, x = obj_27_cast_fp16)[name = tensor<string, []>("op_743_cast_fp16")]; |
|
tensor<int32, [4]> var_746_begin_0 = const()[name = tensor<string, []>("op_746_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [4]> var_746_end_0 = const()[name = tensor<string, []>("op_746_end_0"), val = tensor<int32, [4]>([1, 1, 1, 1500])]; |
|
tensor<bool, [4]> var_746_end_mask_0 = const()[name = tensor<string, []>("op_746_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])]; |
|
tensor<bool, [4]> var_746_squeeze_mask_0 = const()[name = tensor<string, []>("op_746_squeeze_mask_0"), val = tensor<bool, [4]>([false, false, true, false])]; |
|
tensor<fp16, [1, 1, 1500]> var_746_cast_fp16 = slice_by_index(begin = var_746_begin_0, end = var_746_end_0, end_mask = var_746_end_mask_0, squeeze_mask = var_746_squeeze_mask_0, x = var_743_cast_fp16)[name = tensor<string, []>("op_746_cast_fp16")]; |
|
tensor<int32, [4]> var_761_begin_0 = const()[name = tensor<string, []>("op_761_begin_0"), val = tensor<int32, [4]>([0, 13, 0, 0])]; |
|
tensor<int32, [4]> var_761_end_0 = const()[name = tensor<string, []>("op_761_end_0"), val = tensor<int32, [4]>([1, 14, 1, 1500])]; |
|
tensor<bool, [4]> var_761_end_mask_0 = const()[name = tensor<string, []>("op_761_end_mask_0"), val = tensor<bool, [4]>([true, false, true, true])]; |
|
tensor<fp16, [1, 1, 1, 1500]> var_761_cast_fp16 = slice_by_index(begin = var_761_begin_0, end = var_761_end_0, end_mask = var_761_end_mask_0, x = obj_27_cast_fp16)[name = tensor<string, []>("op_761_cast_fp16")]; |
|
tensor<int32, [4]> var_764_begin_0 = const()[name = tensor<string, []>("op_764_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [4]> var_764_end_0 = const()[name = tensor<string, []>("op_764_end_0"), val = tensor<int32, [4]>([1, 1, 1, 1500])]; |
|
tensor<bool, [4]> var_764_end_mask_0 = const()[name = tensor<string, []>("op_764_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])]; |
|
tensor<bool, [4]> var_764_squeeze_mask_0 = const()[name = tensor<string, []>("op_764_squeeze_mask_0"), val = tensor<bool, [4]>([false, false, true, false])]; |
|
tensor<fp16, [1, 1, 1500]> var_764_cast_fp16 = slice_by_index(begin = var_764_begin_0, end = var_764_end_0, end_mask = var_764_end_mask_0, squeeze_mask = var_764_squeeze_mask_0, x = var_761_cast_fp16)[name = tensor<string, []>("op_764_cast_fp16")]; |
|
tensor<int32, [4]> var_779_begin_0 = const()[name = tensor<string, []>("op_779_begin_0"), val = tensor<int32, [4]>([0, 14, 0, 0])]; |
|
tensor<int32, [4]> var_779_end_0 = const()[name = tensor<string, []>("op_779_end_0"), val = tensor<int32, [4]>([1, 15, 1, 1500])]; |
|
tensor<bool, [4]> var_779_end_mask_0 = const()[name = tensor<string, []>("op_779_end_mask_0"), val = tensor<bool, [4]>([true, false, true, true])]; |
|
tensor<fp16, [1, 1, 1, 1500]> var_779_cast_fp16 = slice_by_index(begin = var_779_begin_0, end = var_779_end_0, end_mask = var_779_end_mask_0, x = obj_27_cast_fp16)[name = tensor<string, []>("op_779_cast_fp16")]; |
|
tensor<int32, [4]> var_782_begin_0 = const()[name = tensor<string, []>("op_782_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [4]> var_782_end_0 = const()[name = tensor<string, []>("op_782_end_0"), val = tensor<int32, [4]>([1, 1, 1, 1500])]; |
|
tensor<bool, [4]> var_782_end_mask_0 = const()[name = tensor<string, []>("op_782_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])]; |
|
tensor<bool, [4]> var_782_squeeze_mask_0 = const()[name = tensor<string, []>("op_782_squeeze_mask_0"), val = tensor<bool, [4]>([false, false, true, false])]; |
|
tensor<fp16, [1, 1, 1500]> var_782_cast_fp16 = slice_by_index(begin = var_782_begin_0, end = var_782_end_0, end_mask = var_782_end_mask_0, squeeze_mask = var_782_squeeze_mask_0, x = var_779_cast_fp16)[name = tensor<string, []>("op_782_cast_fp16")]; |
|
tensor<int32, [4]> var_797_begin_0 = const()[name = tensor<string, []>("op_797_begin_0"), val = tensor<int32, [4]>([0, 15, 0, 0])]; |
|
tensor<int32, [4]> var_797_end_0 = const()[name = tensor<string, []>("op_797_end_0"), val = tensor<int32, [4]>([1, 16, 1, 1500])]; |
|
tensor<bool, [4]> var_797_end_mask_0 = const()[name = tensor<string, []>("op_797_end_mask_0"), val = tensor<bool, [4]>([true, false, true, true])]; |
|
tensor<fp16, [1, 1, 1, 1500]> var_797_cast_fp16 = slice_by_index(begin = var_797_begin_0, end = var_797_end_0, end_mask = var_797_end_mask_0, x = obj_27_cast_fp16)[name = tensor<string, []>("op_797_cast_fp16")]; |
|
tensor<int32, [4]> var_800_begin_0 = const()[name = tensor<string, []>("op_800_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [4]> var_800_end_0 = const()[name = tensor<string, []>("op_800_end_0"), val = tensor<int32, [4]>([1, 1, 1, 1500])]; |
|
tensor<bool, [4]> var_800_end_mask_0 = const()[name = tensor<string, []>("op_800_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])]; |
|
tensor<bool, [4]> var_800_squeeze_mask_0 = const()[name = tensor<string, []>("op_800_squeeze_mask_0"), val = tensor<bool, [4]>([false, false, true, false])]; |
|
tensor<fp16, [1, 1, 1500]> var_800_cast_fp16 = slice_by_index(begin = var_800_begin_0, end = var_800_end_0, end_mask = var_800_end_mask_0, squeeze_mask = var_800_squeeze_mask_0, x = var_797_cast_fp16)[name = tensor<string, []>("op_800_cast_fp16")]; |
|
tensor<int32, [4]> var_815_begin_0 = const()[name = tensor<string, []>("op_815_begin_0"), val = tensor<int32, [4]>([0, 16, 0, 0])]; |
|
tensor<int32, [4]> var_815_end_0 = const()[name = tensor<string, []>("op_815_end_0"), val = tensor<int32, [4]>([1, 17, 1, 1500])]; |
|
tensor<bool, [4]> var_815_end_mask_0 = const()[name = tensor<string, []>("op_815_end_mask_0"), val = tensor<bool, [4]>([true, false, true, true])]; |
|
tensor<fp16, [1, 1, 1, 1500]> var_815_cast_fp16 = slice_by_index(begin = var_815_begin_0, end = var_815_end_0, end_mask = var_815_end_mask_0, x = obj_27_cast_fp16)[name = tensor<string, []>("op_815_cast_fp16")]; |
|
tensor<int32, [4]> var_818_begin_0 = const()[name = tensor<string, []>("op_818_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [4]> var_818_end_0 = const()[name = tensor<string, []>("op_818_end_0"), val = tensor<int32, [4]>([1, 1, 1, 1500])]; |
|
tensor<bool, [4]> var_818_end_mask_0 = const()[name = tensor<string, []>("op_818_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])]; |
|
tensor<bool, [4]> var_818_squeeze_mask_0 = const()[name = tensor<string, []>("op_818_squeeze_mask_0"), val = tensor<bool, [4]>([false, false, true, false])]; |
|
tensor<fp16, [1, 1, 1500]> var_818_cast_fp16 = slice_by_index(begin = var_818_begin_0, end = var_818_end_0, end_mask = var_818_end_mask_0, squeeze_mask = var_818_squeeze_mask_0, x = var_815_cast_fp16)[name = tensor<string, []>("op_818_cast_fp16")]; |
|
tensor<int32, [4]> var_833_begin_0 = const()[name = tensor<string, []>("op_833_begin_0"), val = tensor<int32, [4]>([0, 17, 0, 0])]; |
|
tensor<int32, [4]> var_833_end_0 = const()[name = tensor<string, []>("op_833_end_0"), val = tensor<int32, [4]>([1, 18, 1, 1500])]; |
|
tensor<bool, [4]> var_833_end_mask_0 = const()[name = tensor<string, []>("op_833_end_mask_0"), val = tensor<bool, [4]>([true, false, true, true])]; |
|
tensor<fp16, [1, 1, 1, 1500]> var_833_cast_fp16 = slice_by_index(begin = var_833_begin_0, end = var_833_end_0, end_mask = var_833_end_mask_0, x = obj_27_cast_fp16)[name = tensor<string, []>("op_833_cast_fp16")]; |
|
tensor<int32, [4]> var_836_begin_0 = const()[name = tensor<string, []>("op_836_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [4]> var_836_end_0 = const()[name = tensor<string, []>("op_836_end_0"), val = tensor<int32, [4]>([1, 1, 1, 1500])]; |
|
tensor<bool, [4]> var_836_end_mask_0 = const()[name = tensor<string, []>("op_836_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])]; |
|
tensor<bool, [4]> var_836_squeeze_mask_0 = const()[name = tensor<string, []>("op_836_squeeze_mask_0"), val = tensor<bool, [4]>([false, false, true, false])]; |
|
tensor<fp16, [1, 1, 1500]> var_836_cast_fp16 = slice_by_index(begin = var_836_begin_0, end = var_836_end_0, end_mask = var_836_end_mask_0, squeeze_mask = var_836_squeeze_mask_0, x = var_833_cast_fp16)[name = tensor<string, []>("op_836_cast_fp16")]; |
|
tensor<int32, [4]> var_851_begin_0 = const()[name = tensor<string, []>("op_851_begin_0"), val = tensor<int32, [4]>([0, 18, 0, 0])]; |
|
tensor<int32, [4]> var_851_end_0 = const()[name = tensor<string, []>("op_851_end_0"), val = tensor<int32, [4]>([1, 19, 1, 1500])]; |
|
tensor<bool, [4]> var_851_end_mask_0 = const()[name = tensor<string, []>("op_851_end_mask_0"), val = tensor<bool, [4]>([true, false, true, true])]; |
|
tensor<fp16, [1, 1, 1, 1500]> var_851_cast_fp16 = slice_by_index(begin = var_851_begin_0, end = var_851_end_0, end_mask = var_851_end_mask_0, x = obj_27_cast_fp16)[name = tensor<string, []>("op_851_cast_fp16")]; |
|
tensor<int32, [4]> var_854_begin_0 = const()[name = tensor<string, []>("op_854_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [4]> var_854_end_0 = const()[name = tensor<string, []>("op_854_end_0"), val = tensor<int32, [4]>([1, 1, 1, 1500])]; |
|
tensor<bool, [4]> var_854_end_mask_0 = const()[name = tensor<string, []>("op_854_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])]; |
|
tensor<bool, [4]> var_854_squeeze_mask_0 = const()[name = tensor<string, []>("op_854_squeeze_mask_0"), val = tensor<bool, [4]>([false, false, true, false])]; |
|
tensor<fp16, [1, 1, 1500]> var_854_cast_fp16 = slice_by_index(begin = var_854_begin_0, end = var_854_end_0, end_mask = var_854_end_mask_0, squeeze_mask = var_854_squeeze_mask_0, x = var_851_cast_fp16)[name = tensor<string, []>("op_854_cast_fp16")]; |
|
tensor<int32, [4]> var_869_begin_0 = const()[name = tensor<string, []>("op_869_begin_0"), val = tensor<int32, [4]>([0, 19, 0, 0])]; |
|
tensor<int32, [4]> var_869_end_0 = const()[name = tensor<string, []>("op_869_end_0"), val = tensor<int32, [4]>([1, 20, 1, 1500])]; |
|
tensor<bool, [4]> var_869_end_mask_0 = const()[name = tensor<string, []>("op_869_end_mask_0"), val = tensor<bool, [4]>([true, false, true, true])]; |
|
tensor<fp16, [1, 1, 1, 1500]> var_869_cast_fp16 = slice_by_index(begin = var_869_begin_0, end = var_869_end_0, end_mask = var_869_end_mask_0, x = obj_27_cast_fp16)[name = tensor<string, []>("op_869_cast_fp16")]; |
|
tensor<int32, [4]> var_872_begin_0 = const()[name = tensor<string, []>("op_872_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [4]> var_872_end_0 = const()[name = tensor<string, []>("op_872_end_0"), val = tensor<int32, [4]>([1, 1, 1, 1500])]; |
|
tensor<bool, [4]> var_872_end_mask_0 = const()[name = tensor<string, []>("op_872_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])]; |
|
tensor<bool, [4]> var_872_squeeze_mask_0 = const()[name = tensor<string, []>("op_872_squeeze_mask_0"), val = tensor<bool, [4]>([false, false, true, false])]; |
|
tensor<fp16, [1, 1, 1500]> var_872_cast_fp16 = slice_by_index(begin = var_872_begin_0, end = var_872_end_0, end_mask = var_872_end_mask_0, squeeze_mask = var_872_squeeze_mask_0, x = var_869_cast_fp16)[name = tensor<string, []>("op_872_cast_fp16")]; |
|
tensor<int32, []> var_879 = const()[name = tensor<string, []>("op_879"), val = tensor<int32, []>(1)]; |
|
tensor<bool, []> var_880_interleave_0 = const()[name = tensor<string, []>("op_880_interleave_0"), val = tensor<bool, []>(false)]; |
|
tensor<fp16, [1, 20, 1500]> var_880_cast_fp16 = concat(axis = var_879, interleave = var_880_interleave_0, values = (var_530_cast_fp16, var_548_cast_fp16, var_566_cast_fp16, var_584_cast_fp16, var_602_cast_fp16, var_620_cast_fp16, var_638_cast_fp16, var_656_cast_fp16, var_674_cast_fp16, var_692_cast_fp16, var_710_cast_fp16, var_728_cast_fp16, var_746_cast_fp16, var_764_cast_fp16, var_782_cast_fp16, var_800_cast_fp16, var_818_cast_fp16, var_836_cast_fp16, var_854_cast_fp16, var_872_cast_fp16))[name = tensor<string, []>("op_880_cast_fp16")]; |
|
tensor<bool, []> var_883 = const()[name = tensor<string, []>("op_883"), val = tensor<bool, []>(false)]; |
|
tensor<int32, [1]> obj_axes_0 = const()[name = tensor<string, []>("obj_axes_0"), val = tensor<int32, [1]>([1])]; |
|
tensor<fp16, [1, 1500]> alignment_heads_weights = reduce_mean(axes = obj_axes_0, keep_dims = var_883, x = var_880_cast_fp16)[name = tensor<string, []>("obj_cast_fp16")]; |
|
} -> (logits, key_cache_updates, value_cache_updates, alignment_heads_weights); |
|
} |