diff --git "a/split_einsum_v2/compiled/TextEncoder.mlmodelc/model.mil" "b/split_einsum_v2/compiled/TextEncoder.mlmodelc/model.mil" new file mode 100644--- /dev/null +++ "b/split_einsum_v2/compiled/TextEncoder.mlmodelc/model.mil" @@ -0,0 +1,1642 @@ +program(1.0) +[buildInfo = dict, tensor>({{"coremlc-component-MIL", "3304.5.2"}, {"coremlc-version", "3304.6.2"}})] +{ + func main(tensor input_ids) { + tensor var_5 = const()[name = tensor("op_5"), val = tensor(-1)]; + tensor var_6 = const()[name = tensor("op_6"), val = tensor(false)]; + tensor cast_1_dtype_0 = const()[name = tensor("cast_1_dtype_0"), val = tensor("int32")]; + tensor inputs_embeds_axis_0 = const()[name = tensor("inputs_embeds_axis_0"), val = tensor(0)]; + tensor inputs_embeds_batch_dims_0 = const()[name = tensor("inputs_embeds_batch_dims_0"), val = tensor(0)]; + tensor text_encoder_text_model_embeddings_token_embedding_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_embeddings_token_embedding_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(64)))]; + tensor cast_2 = cast(dtype = cast_1_dtype_0, x = input_ids)[name = tensor("cast_2")]; + tensor inputs_embeds_cast = gather(axis = inputs_embeds_axis_0, batch_dims = inputs_embeds_batch_dims_0, indices = cast_2, x = text_encoder_text_model_embeddings_token_embedding_weight_to_fp16)[name = tensor("inputs_embeds_cast")]; + tensor position_embeddings_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(101187712))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(101227200))), name = tensor("position_embeddings_to_fp16_palettized"), shape = tensor([1, 77, 1024])]; + tensor input_3_cast = add(x = inputs_embeds_cast, y = position_embeddings_to_fp16_palettized)[name = tensor("input_3_cast")]; + tensor hidden_states_1_axes_0 = const()[name = tensor("hidden_states_1_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_0_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_0_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(101227328)))]; + tensor text_encoder_text_model_encoder_layers_0_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_0_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(101229440)))]; + tensor var_15_to_fp16 = const()[name = tensor("op_15_to_fp16"), val = tensor(0x1.5p-17)]; + tensor hidden_states_1_cast = layer_norm(axes = hidden_states_1_axes_0, beta = text_encoder_text_model_encoder_layers_0_layer_norm1_bias_to_fp16, epsilon = var_15_to_fp16, gamma = text_encoder_text_model_encoder_layers_0_layer_norm1_weight_to_fp16, x = input_3_cast)[name = tensor("hidden_states_1_cast")]; + tensor text_encoder_text_model_encoder_layers_0_self_attn_q_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(101231552))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(101755904))), name = tensor("text_encoder_text_model_encoder_layers_0_self_attn_q_proj_weight_to_fp16_palettized"), shape = tensor([1024, 1024])]; + tensor text_encoder_text_model_encoder_layers_0_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_0_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(101756032)))]; + tensor linear_0_cast = linear(bias = text_encoder_text_model_encoder_layers_0_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_0_self_attn_q_proj_weight_to_fp16_palettized, x = hidden_states_1_cast)[name = tensor("linear_0_cast")]; + tensor var_130_to_fp16 = const()[name = tensor("op_130_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_5_cast = mul(x = linear_0_cast, y = var_130_to_fp16)[name = tensor("tensor_5_cast")]; + tensor text_encoder_text_model_encoder_layers_0_self_attn_k_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(101758144))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(102282496))), name = tensor("text_encoder_text_model_encoder_layers_0_self_attn_k_proj_weight_to_fp16_palettized"), shape = tensor([1024, 1024])]; + tensor text_encoder_text_model_encoder_layers_0_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_0_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(102282624)))]; + tensor linear_1_cast = linear(bias = text_encoder_text_model_encoder_layers_0_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_0_self_attn_k_proj_weight_to_fp16_palettized, x = hidden_states_1_cast)[name = tensor("linear_1_cast")]; + tensor var_135 = const()[name = tensor("op_135"), val = tensor([1, -1, 16, 64])]; + tensor var_136_cast = reshape(shape = var_135, x = linear_1_cast)[name = tensor("op_136_cast")]; + tensor var_137_perm_0 = const()[name = tensor("op_137_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_0_self_attn_v_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(102284736))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(102809088))), name = tensor("text_encoder_text_model_encoder_layers_0_self_attn_v_proj_weight_to_fp16_palettized"), shape = tensor([1024, 1024])]; + tensor text_encoder_text_model_encoder_layers_0_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_0_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(102809216)))]; + tensor linear_2_cast = linear(bias = text_encoder_text_model_encoder_layers_0_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_0_self_attn_v_proj_weight_to_fp16_palettized, x = hidden_states_1_cast)[name = tensor("linear_2_cast")]; + tensor var_142 = const()[name = tensor("op_142"), val = tensor([1, -1, 16, 64])]; + tensor var_143_cast = reshape(shape = var_142, x = linear_2_cast)[name = tensor("op_143_cast")]; + tensor var_144_perm_0 = const()[name = tensor("op_144_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_151 = const()[name = tensor("op_151"), val = tensor([1, 77, 16, 64])]; + tensor var_152_cast = reshape(shape = var_151, x = tensor_5_cast)[name = tensor("op_152_cast")]; + tensor var_153_perm_0 = const()[name = tensor("op_153_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_155 = const()[name = tensor("op_155"), val = tensor([16, -1, 64])]; + tensor transpose_114 = transpose(perm = var_153_perm_0, x = var_152_cast)[name = tensor("transpose_114")]; + tensor query_states_1_cast = reshape(shape = var_155, x = transpose_114)[name = tensor("query_states_1_cast")]; + tensor var_157 = const()[name = tensor("op_157"), val = tensor([16, -1, 64])]; + tensor transpose_113 = transpose(perm = var_137_perm_0, x = var_136_cast)[name = tensor("transpose_113")]; + tensor key_states_3_cast = reshape(shape = var_157, x = transpose_113)[name = tensor("key_states_3_cast")]; + tensor var_159 = const()[name = tensor("op_159"), val = tensor([16, -1, 64])]; + tensor transpose_112 = transpose(perm = var_144_perm_0, x = var_143_cast)[name = tensor("transpose_112")]; + tensor value_states_3_cast = reshape(shape = var_159, x = transpose_112)[name = tensor("value_states_3_cast")]; + tensor var_162_perm_0 = const()[name = tensor("op_162_perm_0"), val = tensor([0, 2, 1])]; + tensor attn_weights_1_transpose_x_0 = const()[name = tensor("attn_weights_1_transpose_x_0"), val = tensor(false)]; + tensor attn_weights_1_transpose_y_0 = const()[name = tensor("attn_weights_1_transpose_y_0"), val = tensor(false)]; + tensor transpose_111 = transpose(perm = var_162_perm_0, x = key_states_3_cast)[name = tensor("transpose_111")]; + tensor attn_weights_1_cast = matmul(transpose_x = attn_weights_1_transpose_x_0, transpose_y = attn_weights_1_transpose_y_0, x = query_states_1_cast, y = transpose_111)[name = tensor("attn_weights_1_cast")]; + tensor var_164 = const()[name = tensor("op_164"), val = tensor([1, 16, 77, 77])]; + tensor var_165_cast = reshape(shape = var_164, x = attn_weights_1_cast)[name = tensor("op_165_cast")]; + tensor op_57_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(102811328))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(102814400))), name = tensor("op_57_to_fp16_palettized"), shape = tensor([1, 1, 77, 77])]; + tensor attn_weights_3_cast = add(x = var_165_cast, y = op_57_to_fp16_palettized)[name = tensor("attn_weights_3_cast")]; + tensor var_170 = const()[name = tensor("op_170"), val = tensor([16, 77, 77])]; + tensor input_5_cast = reshape(shape = var_170, x = attn_weights_3_cast)[name = tensor("input_5_cast")]; + tensor input_7_cast = softmax(axis = var_5, x = input_5_cast)[name = tensor("input_7_cast")]; + tensor attn_output_1_transpose_x_0 = const()[name = tensor("attn_output_1_transpose_x_0"), val = tensor(false)]; + tensor attn_output_1_transpose_y_0 = const()[name = tensor("attn_output_1_transpose_y_0"), val = tensor(false)]; + tensor attn_output_1_cast = matmul(transpose_x = attn_output_1_transpose_x_0, transpose_y = attn_output_1_transpose_y_0, x = input_7_cast, y = value_states_3_cast)[name = tensor("attn_output_1_cast")]; + tensor var_175 = const()[name = tensor("op_175"), val = tensor([1, 16, 77, 64])]; + tensor attn_output_3_cast = reshape(shape = var_175, x = attn_output_1_cast)[name = tensor("attn_output_3_cast")]; + tensor attn_output_5_perm_0 = const()[name = tensor("attn_output_5_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_178 = const()[name = tensor("op_178"), val = tensor([1, 77, 1024])]; + tensor transpose_110 = transpose(perm = attn_output_5_perm_0, x = attn_output_3_cast)[name = tensor("transpose_110")]; + tensor input_9_cast = reshape(shape = var_178, x = transpose_110)[name = tensor("input_9_cast")]; + tensor text_encoder_text_model_encoder_layers_0_self_attn_out_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(102814528))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(103338880))), name = tensor("text_encoder_text_model_encoder_layers_0_self_attn_out_proj_weight_to_fp16_palettized"), shape = tensor([1024, 1024])]; + tensor text_encoder_text_model_encoder_layers_0_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_0_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(103339008)))]; + tensor linear_3_cast = linear(bias = text_encoder_text_model_encoder_layers_0_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_0_self_attn_out_proj_weight_to_fp16_palettized, x = input_9_cast)[name = tensor("linear_3_cast")]; + tensor input_11_cast = add(x = input_3_cast, y = linear_3_cast)[name = tensor("input_11_cast")]; + tensor input_13_axes_0 = const()[name = tensor("input_13_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_0_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_0_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(103341120)))]; + tensor text_encoder_text_model_encoder_layers_0_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_0_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(103343232)))]; + tensor input_13_cast = layer_norm(axes = input_13_axes_0, beta = text_encoder_text_model_encoder_layers_0_layer_norm2_bias_to_fp16, epsilon = var_15_to_fp16, gamma = text_encoder_text_model_encoder_layers_0_layer_norm2_weight_to_fp16, x = input_11_cast)[name = tensor("input_13_cast")]; + tensor text_encoder_text_model_encoder_layers_0_mlp_fc1_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(103345344))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(105442560))), name = tensor("text_encoder_text_model_encoder_layers_0_mlp_fc1_weight_to_fp16_palettized"), shape = tensor([4096, 1024])]; + tensor text_encoder_text_model_encoder_layers_0_mlp_fc1_bias_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(105442688))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(105444800))), name = tensor("text_encoder_text_model_encoder_layers_0_mlp_fc1_bias_to_fp16_palettized"), shape = tensor([4096])]; + tensor linear_4_cast = linear(bias = text_encoder_text_model_encoder_layers_0_mlp_fc1_bias_to_fp16_palettized, weight = text_encoder_text_model_encoder_layers_0_mlp_fc1_weight_to_fp16_palettized, x = input_13_cast)[name = tensor("linear_4_cast")]; + tensor input_17_mode_0 = const()[name = tensor("input_17_mode_0"), val = tensor("EXACT")]; + tensor input_17_cast = gelu(mode = input_17_mode_0, x = linear_4_cast)[name = tensor("input_17_cast")]; + tensor text_encoder_text_model_encoder_layers_0_mlp_fc2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(105444928))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(107542144))), name = tensor("text_encoder_text_model_encoder_layers_0_mlp_fc2_weight_to_fp16_palettized"), shape = tensor([1024, 4096])]; + tensor text_encoder_text_model_encoder_layers_0_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_0_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(107542272)))]; + tensor linear_5_cast = linear(bias = text_encoder_text_model_encoder_layers_0_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_0_mlp_fc2_weight_to_fp16_palettized, x = input_17_cast)[name = tensor("linear_5_cast")]; + tensor input_19_cast = add(x = input_11_cast, y = linear_5_cast)[name = tensor("input_19_cast")]; + tensor hidden_states_7_axes_0 = const()[name = tensor("hidden_states_7_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_1_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_1_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(107544384)))]; + tensor text_encoder_text_model_encoder_layers_1_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_1_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(107546496)))]; + tensor hidden_states_7_cast = layer_norm(axes = hidden_states_7_axes_0, beta = text_encoder_text_model_encoder_layers_1_layer_norm1_bias_to_fp16, epsilon = var_15_to_fp16, gamma = text_encoder_text_model_encoder_layers_1_layer_norm1_weight_to_fp16, x = input_19_cast)[name = tensor("hidden_states_7_cast")]; + tensor text_encoder_text_model_encoder_layers_1_self_attn_q_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(107548608))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(108072960))), name = tensor("text_encoder_text_model_encoder_layers_1_self_attn_q_proj_weight_to_fp16_palettized"), shape = tensor([1024, 1024])]; + tensor text_encoder_text_model_encoder_layers_1_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_1_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(108073088)))]; + tensor linear_6_cast = linear(bias = text_encoder_text_model_encoder_layers_1_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_1_self_attn_q_proj_weight_to_fp16_palettized, x = hidden_states_7_cast)[name = tensor("linear_6_cast")]; + tensor var_217_to_fp16 = const()[name = tensor("op_217_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_11_cast = mul(x = linear_6_cast, y = var_217_to_fp16)[name = tensor("tensor_11_cast")]; + tensor text_encoder_text_model_encoder_layers_1_self_attn_k_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(108075200))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(108599552))), name = tensor("text_encoder_text_model_encoder_layers_1_self_attn_k_proj_weight_to_fp16_palettized"), shape = tensor([1024, 1024])]; + tensor text_encoder_text_model_encoder_layers_1_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_1_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(108599680)))]; + tensor linear_7_cast = linear(bias = text_encoder_text_model_encoder_layers_1_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_1_self_attn_k_proj_weight_to_fp16_palettized, x = hidden_states_7_cast)[name = tensor("linear_7_cast")]; + tensor var_222 = const()[name = tensor("op_222"), val = tensor([1, -1, 16, 64])]; + tensor var_223_cast = reshape(shape = var_222, x = linear_7_cast)[name = tensor("op_223_cast")]; + tensor var_224_perm_0 = const()[name = tensor("op_224_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_1_self_attn_v_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(108601792))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(109126144))), name = tensor("text_encoder_text_model_encoder_layers_1_self_attn_v_proj_weight_to_fp16_palettized"), shape = tensor([1024, 1024])]; + tensor text_encoder_text_model_encoder_layers_1_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_1_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(109126272)))]; + tensor linear_8_cast = linear(bias = text_encoder_text_model_encoder_layers_1_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_1_self_attn_v_proj_weight_to_fp16_palettized, x = hidden_states_7_cast)[name = tensor("linear_8_cast")]; + tensor var_229 = const()[name = tensor("op_229"), val = tensor([1, -1, 16, 64])]; + tensor var_230_cast = reshape(shape = var_229, x = linear_8_cast)[name = tensor("op_230_cast")]; + tensor var_231_perm_0 = const()[name = tensor("op_231_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_238 = const()[name = tensor("op_238"), val = tensor([1, 77, 16, 64])]; + tensor var_239_cast = reshape(shape = var_238, x = tensor_11_cast)[name = tensor("op_239_cast")]; + tensor var_240_perm_0 = const()[name = tensor("op_240_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_242 = const()[name = tensor("op_242"), val = tensor([16, -1, 64])]; + tensor transpose_109 = transpose(perm = var_240_perm_0, x = var_239_cast)[name = tensor("transpose_109")]; + tensor query_states_3_cast = reshape(shape = var_242, x = transpose_109)[name = tensor("query_states_3_cast")]; + tensor var_244 = const()[name = tensor("op_244"), val = tensor([16, -1, 64])]; + tensor transpose_108 = transpose(perm = var_224_perm_0, x = var_223_cast)[name = tensor("transpose_108")]; + tensor key_states_7_cast = reshape(shape = var_244, x = transpose_108)[name = tensor("key_states_7_cast")]; + tensor var_246 = const()[name = tensor("op_246"), val = tensor([16, -1, 64])]; + tensor transpose_107 = transpose(perm = var_231_perm_0, x = var_230_cast)[name = tensor("transpose_107")]; + tensor value_states_7_cast = reshape(shape = var_246, x = transpose_107)[name = tensor("value_states_7_cast")]; + tensor var_249_perm_0 = const()[name = tensor("op_249_perm_0"), val = tensor([0, 2, 1])]; + tensor attn_weights_7_transpose_x_0 = const()[name = tensor("attn_weights_7_transpose_x_0"), val = tensor(false)]; + tensor attn_weights_7_transpose_y_0 = const()[name = tensor("attn_weights_7_transpose_y_0"), val = tensor(false)]; + tensor transpose_106 = transpose(perm = var_249_perm_0, x = key_states_7_cast)[name = tensor("transpose_106")]; + tensor attn_weights_7_cast = matmul(transpose_x = attn_weights_7_transpose_x_0, transpose_y = attn_weights_7_transpose_y_0, x = query_states_3_cast, y = transpose_106)[name = tensor("attn_weights_7_cast")]; + tensor var_251 = const()[name = tensor("op_251"), val = tensor([1, 16, 77, 77])]; + tensor var_252_cast = reshape(shape = var_251, x = attn_weights_7_cast)[name = tensor("op_252_cast")]; + tensor attn_weights_9_cast = add(x = var_252_cast, y = op_57_to_fp16_palettized)[name = tensor("attn_weights_9_cast")]; + tensor var_257 = const()[name = tensor("op_257"), val = tensor([16, 77, 77])]; + tensor input_21_cast = reshape(shape = var_257, x = attn_weights_9_cast)[name = tensor("input_21_cast")]; + tensor input_23_cast = softmax(axis = var_5, x = input_21_cast)[name = tensor("input_23_cast")]; + tensor attn_output_7_transpose_x_0 = const()[name = tensor("attn_output_7_transpose_x_0"), val = tensor(false)]; + tensor attn_output_7_transpose_y_0 = const()[name = tensor("attn_output_7_transpose_y_0"), val = tensor(false)]; + tensor attn_output_7_cast = matmul(transpose_x = attn_output_7_transpose_x_0, transpose_y = attn_output_7_transpose_y_0, x = input_23_cast, y = value_states_7_cast)[name = tensor("attn_output_7_cast")]; + tensor var_262 = const()[name = tensor("op_262"), val = tensor([1, 16, 77, 64])]; + tensor attn_output_9_cast = reshape(shape = var_262, x = attn_output_7_cast)[name = tensor("attn_output_9_cast")]; + tensor attn_output_11_perm_0 = const()[name = tensor("attn_output_11_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_265 = const()[name = tensor("op_265"), val = tensor([1, 77, 1024])]; + tensor transpose_105 = transpose(perm = attn_output_11_perm_0, x = attn_output_9_cast)[name = tensor("transpose_105")]; + tensor input_25_cast = reshape(shape = var_265, x = transpose_105)[name = tensor("input_25_cast")]; + tensor text_encoder_text_model_encoder_layers_1_self_attn_out_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(109128384))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(109652736))), name = tensor("text_encoder_text_model_encoder_layers_1_self_attn_out_proj_weight_to_fp16_palettized"), shape = tensor([1024, 1024])]; + tensor text_encoder_text_model_encoder_layers_1_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_1_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(109652864)))]; + tensor linear_9_cast = linear(bias = text_encoder_text_model_encoder_layers_1_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_1_self_attn_out_proj_weight_to_fp16_palettized, x = input_25_cast)[name = tensor("linear_9_cast")]; + tensor input_27_cast = add(x = input_19_cast, y = linear_9_cast)[name = tensor("input_27_cast")]; + tensor input_29_axes_0 = const()[name = tensor("input_29_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_1_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_1_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(109654976)))]; + tensor text_encoder_text_model_encoder_layers_1_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_1_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(109657088)))]; + tensor input_29_cast = layer_norm(axes = input_29_axes_0, beta = text_encoder_text_model_encoder_layers_1_layer_norm2_bias_to_fp16, epsilon = var_15_to_fp16, gamma = text_encoder_text_model_encoder_layers_1_layer_norm2_weight_to_fp16, x = input_27_cast)[name = tensor("input_29_cast")]; + tensor text_encoder_text_model_encoder_layers_1_mlp_fc1_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(109659200))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(111756416))), name = tensor("text_encoder_text_model_encoder_layers_1_mlp_fc1_weight_to_fp16_palettized"), shape = tensor([4096, 1024])]; + tensor text_encoder_text_model_encoder_layers_1_mlp_fc1_bias_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(111756544))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(111758656))), name = tensor("text_encoder_text_model_encoder_layers_1_mlp_fc1_bias_to_fp16_palettized"), shape = tensor([4096])]; + tensor linear_10_cast = linear(bias = text_encoder_text_model_encoder_layers_1_mlp_fc1_bias_to_fp16_palettized, weight = text_encoder_text_model_encoder_layers_1_mlp_fc1_weight_to_fp16_palettized, x = input_29_cast)[name = tensor("linear_10_cast")]; + tensor input_33_mode_0 = const()[name = tensor("input_33_mode_0"), val = tensor("EXACT")]; + tensor input_33_cast = gelu(mode = input_33_mode_0, x = linear_10_cast)[name = tensor("input_33_cast")]; + tensor text_encoder_text_model_encoder_layers_1_mlp_fc2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(111758784))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(113856000))), name = tensor("text_encoder_text_model_encoder_layers_1_mlp_fc2_weight_to_fp16_palettized"), shape = tensor([1024, 4096])]; + tensor text_encoder_text_model_encoder_layers_1_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_1_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(113856128)))]; + tensor linear_11_cast = linear(bias = text_encoder_text_model_encoder_layers_1_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_1_mlp_fc2_weight_to_fp16_palettized, x = input_33_cast)[name = tensor("linear_11_cast")]; + tensor input_35_cast = add(x = input_27_cast, y = linear_11_cast)[name = tensor("input_35_cast")]; + tensor hidden_states_13_axes_0 = const()[name = tensor("hidden_states_13_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_2_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_2_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(113858240)))]; + tensor text_encoder_text_model_encoder_layers_2_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_2_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(113860352)))]; + tensor hidden_states_13_cast = layer_norm(axes = hidden_states_13_axes_0, beta = text_encoder_text_model_encoder_layers_2_layer_norm1_bias_to_fp16, epsilon = var_15_to_fp16, gamma = text_encoder_text_model_encoder_layers_2_layer_norm1_weight_to_fp16, x = input_35_cast)[name = tensor("hidden_states_13_cast")]; + tensor text_encoder_text_model_encoder_layers_2_self_attn_q_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(113862464))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(114386816))), name = tensor("text_encoder_text_model_encoder_layers_2_self_attn_q_proj_weight_to_fp16_palettized"), shape = tensor([1024, 1024])]; + tensor text_encoder_text_model_encoder_layers_2_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_2_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(114386944)))]; + tensor linear_12_cast = linear(bias = text_encoder_text_model_encoder_layers_2_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_2_self_attn_q_proj_weight_to_fp16_palettized, x = hidden_states_13_cast)[name = tensor("linear_12_cast")]; + tensor var_304_to_fp16 = const()[name = tensor("op_304_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_17_cast = mul(x = linear_12_cast, y = var_304_to_fp16)[name = tensor("tensor_17_cast")]; + tensor text_encoder_text_model_encoder_layers_2_self_attn_k_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(114389056))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(114913408))), name = tensor("text_encoder_text_model_encoder_layers_2_self_attn_k_proj_weight_to_fp16_palettized"), shape = tensor([1024, 1024])]; + tensor text_encoder_text_model_encoder_layers_2_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_2_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(114913536)))]; + tensor linear_13_cast = linear(bias = text_encoder_text_model_encoder_layers_2_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_2_self_attn_k_proj_weight_to_fp16_palettized, x = hidden_states_13_cast)[name = tensor("linear_13_cast")]; + tensor var_309 = const()[name = tensor("op_309"), val = tensor([1, -1, 16, 64])]; + tensor var_310_cast = reshape(shape = var_309, x = linear_13_cast)[name = tensor("op_310_cast")]; + tensor var_311_perm_0 = const()[name = tensor("op_311_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_2_self_attn_v_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(114915648))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(115440000))), name = tensor("text_encoder_text_model_encoder_layers_2_self_attn_v_proj_weight_to_fp16_palettized"), shape = tensor([1024, 1024])]; + tensor text_encoder_text_model_encoder_layers_2_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_2_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(115440128)))]; + tensor linear_14_cast = linear(bias = text_encoder_text_model_encoder_layers_2_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_2_self_attn_v_proj_weight_to_fp16_palettized, x = hidden_states_13_cast)[name = tensor("linear_14_cast")]; + tensor var_316 = const()[name = tensor("op_316"), val = tensor([1, -1, 16, 64])]; + tensor var_317_cast = reshape(shape = var_316, x = linear_14_cast)[name = tensor("op_317_cast")]; + tensor var_318_perm_0 = const()[name = tensor("op_318_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_325 = const()[name = tensor("op_325"), val = tensor([1, 77, 16, 64])]; + tensor var_326_cast = reshape(shape = var_325, x = tensor_17_cast)[name = tensor("op_326_cast")]; + tensor var_327_perm_0 = const()[name = tensor("op_327_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_329 = const()[name = tensor("op_329"), val = tensor([16, -1, 64])]; + tensor transpose_104 = transpose(perm = var_327_perm_0, x = var_326_cast)[name = tensor("transpose_104")]; + tensor query_states_5_cast = reshape(shape = var_329, x = transpose_104)[name = tensor("query_states_5_cast")]; + tensor var_331 = const()[name = tensor("op_331"), val = tensor([16, -1, 64])]; + tensor transpose_103 = transpose(perm = var_311_perm_0, x = var_310_cast)[name = tensor("transpose_103")]; + tensor key_states_11_cast = reshape(shape = var_331, x = transpose_103)[name = tensor("key_states_11_cast")]; + tensor var_333 = const()[name = tensor("op_333"), val = tensor([16, -1, 64])]; + tensor transpose_102 = transpose(perm = var_318_perm_0, x = var_317_cast)[name = tensor("transpose_102")]; + tensor value_states_11_cast = reshape(shape = var_333, x = transpose_102)[name = tensor("value_states_11_cast")]; + tensor var_336_perm_0 = const()[name = tensor("op_336_perm_0"), val = tensor([0, 2, 1])]; + tensor attn_weights_13_transpose_x_0 = const()[name = tensor("attn_weights_13_transpose_x_0"), val = tensor(false)]; + tensor attn_weights_13_transpose_y_0 = const()[name = tensor("attn_weights_13_transpose_y_0"), val = tensor(false)]; + tensor transpose_101 = transpose(perm = var_336_perm_0, x = key_states_11_cast)[name = tensor("transpose_101")]; + tensor attn_weights_13_cast = matmul(transpose_x = attn_weights_13_transpose_x_0, transpose_y = attn_weights_13_transpose_y_0, x = query_states_5_cast, y = transpose_101)[name = tensor("attn_weights_13_cast")]; + tensor var_338 = const()[name = tensor("op_338"), val = tensor([1, 16, 77, 77])]; + tensor var_339_cast = reshape(shape = var_338, x = attn_weights_13_cast)[name = tensor("op_339_cast")]; + tensor attn_weights_15_cast = add(x = var_339_cast, y = op_57_to_fp16_palettized)[name = tensor("attn_weights_15_cast")]; + tensor var_344 = const()[name = tensor("op_344"), val = tensor([16, 77, 77])]; + tensor input_37_cast = reshape(shape = var_344, x = attn_weights_15_cast)[name = tensor("input_37_cast")]; + tensor input_39_cast = softmax(axis = var_5, x = input_37_cast)[name = tensor("input_39_cast")]; + tensor attn_output_13_transpose_x_0 = const()[name = tensor("attn_output_13_transpose_x_0"), val = tensor(false)]; + tensor attn_output_13_transpose_y_0 = const()[name = tensor("attn_output_13_transpose_y_0"), val = tensor(false)]; + tensor attn_output_13_cast = matmul(transpose_x = attn_output_13_transpose_x_0, transpose_y = attn_output_13_transpose_y_0, x = input_39_cast, y = value_states_11_cast)[name = tensor("attn_output_13_cast")]; + tensor var_349 = const()[name = tensor("op_349"), val = tensor([1, 16, 77, 64])]; + tensor attn_output_15_cast = reshape(shape = var_349, x = attn_output_13_cast)[name = tensor("attn_output_15_cast")]; + tensor attn_output_17_perm_0 = const()[name = tensor("attn_output_17_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_352 = const()[name = tensor("op_352"), val = tensor([1, 77, 1024])]; + tensor transpose_100 = transpose(perm = attn_output_17_perm_0, x = attn_output_15_cast)[name = tensor("transpose_100")]; + tensor input_41_cast = reshape(shape = var_352, x = transpose_100)[name = tensor("input_41_cast")]; + tensor text_encoder_text_model_encoder_layers_2_self_attn_out_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(115442240))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(115966592))), name = tensor("text_encoder_text_model_encoder_layers_2_self_attn_out_proj_weight_to_fp16_palettized"), shape = tensor([1024, 1024])]; + tensor text_encoder_text_model_encoder_layers_2_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_2_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(115966720)))]; + tensor linear_15_cast = linear(bias = text_encoder_text_model_encoder_layers_2_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_2_self_attn_out_proj_weight_to_fp16_palettized, x = input_41_cast)[name = tensor("linear_15_cast")]; + tensor input_43_cast = add(x = input_35_cast, y = linear_15_cast)[name = tensor("input_43_cast")]; + tensor input_45_axes_0 = const()[name = tensor("input_45_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_2_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_2_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(115968832)))]; + tensor text_encoder_text_model_encoder_layers_2_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_2_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(115970944)))]; + tensor input_45_cast = layer_norm(axes = input_45_axes_0, beta = text_encoder_text_model_encoder_layers_2_layer_norm2_bias_to_fp16, epsilon = var_15_to_fp16, gamma = text_encoder_text_model_encoder_layers_2_layer_norm2_weight_to_fp16, x = input_43_cast)[name = tensor("input_45_cast")]; + tensor text_encoder_text_model_encoder_layers_2_mlp_fc1_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(115973056))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(118070272))), name = tensor("text_encoder_text_model_encoder_layers_2_mlp_fc1_weight_to_fp16_palettized"), shape = tensor([4096, 1024])]; + tensor text_encoder_text_model_encoder_layers_2_mlp_fc1_bias_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(118070400))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(118072512))), name = tensor("text_encoder_text_model_encoder_layers_2_mlp_fc1_bias_to_fp16_palettized"), shape = tensor([4096])]; + tensor linear_16_cast = linear(bias = text_encoder_text_model_encoder_layers_2_mlp_fc1_bias_to_fp16_palettized, weight = text_encoder_text_model_encoder_layers_2_mlp_fc1_weight_to_fp16_palettized, x = input_45_cast)[name = tensor("linear_16_cast")]; + tensor input_49_mode_0 = const()[name = tensor("input_49_mode_0"), val = tensor("EXACT")]; + tensor input_49_cast = gelu(mode = input_49_mode_0, x = linear_16_cast)[name = tensor("input_49_cast")]; + tensor text_encoder_text_model_encoder_layers_2_mlp_fc2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(118072640))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(120169856))), name = tensor("text_encoder_text_model_encoder_layers_2_mlp_fc2_weight_to_fp16_palettized"), shape = tensor([1024, 4096])]; + tensor text_encoder_text_model_encoder_layers_2_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_2_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(120169984)))]; + tensor linear_17_cast = linear(bias = text_encoder_text_model_encoder_layers_2_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_2_mlp_fc2_weight_to_fp16_palettized, x = input_49_cast)[name = tensor("linear_17_cast")]; + tensor input_51_cast = add(x = input_43_cast, y = linear_17_cast)[name = tensor("input_51_cast")]; + tensor hidden_states_19_axes_0 = const()[name = tensor("hidden_states_19_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_3_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_3_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(120172096)))]; + tensor text_encoder_text_model_encoder_layers_3_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_3_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(120174208)))]; + tensor hidden_states_19_cast = layer_norm(axes = hidden_states_19_axes_0, beta = text_encoder_text_model_encoder_layers_3_layer_norm1_bias_to_fp16, epsilon = var_15_to_fp16, gamma = text_encoder_text_model_encoder_layers_3_layer_norm1_weight_to_fp16, x = input_51_cast)[name = tensor("hidden_states_19_cast")]; + tensor text_encoder_text_model_encoder_layers_3_self_attn_q_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(120176320))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(120700672))), name = tensor("text_encoder_text_model_encoder_layers_3_self_attn_q_proj_weight_to_fp16_palettized"), shape = tensor([1024, 1024])]; + tensor text_encoder_text_model_encoder_layers_3_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_3_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(120700800)))]; + tensor linear_18_cast = linear(bias = text_encoder_text_model_encoder_layers_3_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_3_self_attn_q_proj_weight_to_fp16_palettized, x = hidden_states_19_cast)[name = tensor("linear_18_cast")]; + tensor var_391_to_fp16 = const()[name = tensor("op_391_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_23_cast = mul(x = linear_18_cast, y = var_391_to_fp16)[name = tensor("tensor_23_cast")]; + tensor text_encoder_text_model_encoder_layers_3_self_attn_k_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(120702912))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(121227264))), name = tensor("text_encoder_text_model_encoder_layers_3_self_attn_k_proj_weight_to_fp16_palettized"), shape = tensor([1024, 1024])]; + tensor text_encoder_text_model_encoder_layers_3_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_3_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(121227392)))]; + tensor linear_19_cast = linear(bias = text_encoder_text_model_encoder_layers_3_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_3_self_attn_k_proj_weight_to_fp16_palettized, x = hidden_states_19_cast)[name = tensor("linear_19_cast")]; + tensor var_396 = const()[name = tensor("op_396"), val = tensor([1, -1, 16, 64])]; + tensor var_397_cast = reshape(shape = var_396, x = linear_19_cast)[name = tensor("op_397_cast")]; + tensor var_398_perm_0 = const()[name = tensor("op_398_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_3_self_attn_v_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(121229504))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(121753856))), name = tensor("text_encoder_text_model_encoder_layers_3_self_attn_v_proj_weight_to_fp16_palettized"), shape = tensor([1024, 1024])]; + tensor text_encoder_text_model_encoder_layers_3_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_3_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(121753984)))]; + tensor linear_20_cast = linear(bias = text_encoder_text_model_encoder_layers_3_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_3_self_attn_v_proj_weight_to_fp16_palettized, x = hidden_states_19_cast)[name = tensor("linear_20_cast")]; + tensor var_403 = const()[name = tensor("op_403"), val = tensor([1, -1, 16, 64])]; + tensor var_404_cast = reshape(shape = var_403, x = linear_20_cast)[name = tensor("op_404_cast")]; + tensor var_405_perm_0 = const()[name = tensor("op_405_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_412 = const()[name = tensor("op_412"), val = tensor([1, 77, 16, 64])]; + tensor var_413_cast = reshape(shape = var_412, x = tensor_23_cast)[name = tensor("op_413_cast")]; + tensor var_414_perm_0 = const()[name = tensor("op_414_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_416 = const()[name = tensor("op_416"), val = tensor([16, -1, 64])]; + tensor transpose_99 = transpose(perm = var_414_perm_0, x = var_413_cast)[name = tensor("transpose_99")]; + tensor query_states_7_cast = reshape(shape = var_416, x = transpose_99)[name = tensor("query_states_7_cast")]; + tensor var_418 = const()[name = tensor("op_418"), val = tensor([16, -1, 64])]; + tensor transpose_98 = transpose(perm = var_398_perm_0, x = var_397_cast)[name = tensor("transpose_98")]; + tensor key_states_15_cast = reshape(shape = var_418, x = transpose_98)[name = tensor("key_states_15_cast")]; + tensor var_420 = const()[name = tensor("op_420"), val = tensor([16, -1, 64])]; + tensor transpose_97 = transpose(perm = var_405_perm_0, x = var_404_cast)[name = tensor("transpose_97")]; + tensor value_states_15_cast = reshape(shape = var_420, x = transpose_97)[name = tensor("value_states_15_cast")]; + tensor var_423_perm_0 = const()[name = tensor("op_423_perm_0"), val = tensor([0, 2, 1])]; + tensor attn_weights_19_transpose_x_0 = const()[name = tensor("attn_weights_19_transpose_x_0"), val = tensor(false)]; + tensor attn_weights_19_transpose_y_0 = const()[name = tensor("attn_weights_19_transpose_y_0"), val = tensor(false)]; + tensor transpose_96 = transpose(perm = var_423_perm_0, x = key_states_15_cast)[name = tensor("transpose_96")]; + tensor attn_weights_19_cast = matmul(transpose_x = attn_weights_19_transpose_x_0, transpose_y = attn_weights_19_transpose_y_0, x = query_states_7_cast, y = transpose_96)[name = tensor("attn_weights_19_cast")]; + tensor var_425 = const()[name = tensor("op_425"), val = tensor([1, 16, 77, 77])]; + tensor var_426_cast = reshape(shape = var_425, x = attn_weights_19_cast)[name = tensor("op_426_cast")]; + tensor attn_weights_21_cast = add(x = var_426_cast, y = op_57_to_fp16_palettized)[name = tensor("attn_weights_21_cast")]; + tensor var_431 = const()[name = tensor("op_431"), val = tensor([16, 77, 77])]; + tensor input_53_cast = reshape(shape = var_431, x = attn_weights_21_cast)[name = tensor("input_53_cast")]; + tensor input_55_cast = softmax(axis = var_5, x = input_53_cast)[name = tensor("input_55_cast")]; + tensor attn_output_19_transpose_x_0 = const()[name = tensor("attn_output_19_transpose_x_0"), val = tensor(false)]; + tensor attn_output_19_transpose_y_0 = const()[name = tensor("attn_output_19_transpose_y_0"), val = tensor(false)]; + tensor attn_output_19_cast = matmul(transpose_x = attn_output_19_transpose_x_0, transpose_y = attn_output_19_transpose_y_0, x = input_55_cast, y = value_states_15_cast)[name = tensor("attn_output_19_cast")]; + tensor var_436 = const()[name = tensor("op_436"), val = tensor([1, 16, 77, 64])]; + tensor attn_output_21_cast = reshape(shape = var_436, x = attn_output_19_cast)[name = tensor("attn_output_21_cast")]; + tensor attn_output_23_perm_0 = const()[name = tensor("attn_output_23_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_439 = const()[name = tensor("op_439"), val = tensor([1, 77, 1024])]; + tensor transpose_95 = transpose(perm = attn_output_23_perm_0, x = attn_output_21_cast)[name = tensor("transpose_95")]; + tensor input_57_cast = reshape(shape = var_439, x = transpose_95)[name = tensor("input_57_cast")]; + tensor text_encoder_text_model_encoder_layers_3_self_attn_out_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(121756096))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(122280448))), name = tensor("text_encoder_text_model_encoder_layers_3_self_attn_out_proj_weight_to_fp16_palettized"), shape = tensor([1024, 1024])]; + tensor text_encoder_text_model_encoder_layers_3_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_3_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(122280576)))]; + tensor linear_21_cast = linear(bias = text_encoder_text_model_encoder_layers_3_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_3_self_attn_out_proj_weight_to_fp16_palettized, x = input_57_cast)[name = tensor("linear_21_cast")]; + tensor input_59_cast = add(x = input_51_cast, y = linear_21_cast)[name = tensor("input_59_cast")]; + tensor input_61_axes_0 = const()[name = tensor("input_61_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_3_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_3_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(122282688)))]; + tensor text_encoder_text_model_encoder_layers_3_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_3_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(122284800)))]; + tensor input_61_cast = layer_norm(axes = input_61_axes_0, beta = text_encoder_text_model_encoder_layers_3_layer_norm2_bias_to_fp16, epsilon = var_15_to_fp16, gamma = text_encoder_text_model_encoder_layers_3_layer_norm2_weight_to_fp16, x = input_59_cast)[name = tensor("input_61_cast")]; + tensor text_encoder_text_model_encoder_layers_3_mlp_fc1_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(122286912))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(124384128))), name = tensor("text_encoder_text_model_encoder_layers_3_mlp_fc1_weight_to_fp16_palettized"), shape = tensor([4096, 1024])]; + tensor text_encoder_text_model_encoder_layers_3_mlp_fc1_bias_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(124384256))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(124386368))), name = tensor("text_encoder_text_model_encoder_layers_3_mlp_fc1_bias_to_fp16_palettized"), shape = tensor([4096])]; + tensor linear_22_cast = linear(bias = text_encoder_text_model_encoder_layers_3_mlp_fc1_bias_to_fp16_palettized, weight = text_encoder_text_model_encoder_layers_3_mlp_fc1_weight_to_fp16_palettized, x = input_61_cast)[name = tensor("linear_22_cast")]; + tensor input_65_mode_0 = const()[name = tensor("input_65_mode_0"), val = tensor("EXACT")]; + tensor input_65_cast = gelu(mode = input_65_mode_0, x = linear_22_cast)[name = tensor("input_65_cast")]; + tensor text_encoder_text_model_encoder_layers_3_mlp_fc2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(124386496))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(126483712))), name = tensor("text_encoder_text_model_encoder_layers_3_mlp_fc2_weight_to_fp16_palettized"), shape = tensor([1024, 4096])]; + tensor text_encoder_text_model_encoder_layers_3_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_3_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(126483840)))]; + tensor linear_23_cast = linear(bias = text_encoder_text_model_encoder_layers_3_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_3_mlp_fc2_weight_to_fp16_palettized, x = input_65_cast)[name = tensor("linear_23_cast")]; + tensor input_67_cast = add(x = input_59_cast, y = linear_23_cast)[name = tensor("input_67_cast")]; + tensor hidden_states_25_axes_0 = const()[name = tensor("hidden_states_25_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_4_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_4_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(126485952)))]; + tensor text_encoder_text_model_encoder_layers_4_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_4_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(126488064)))]; + tensor hidden_states_25_cast = layer_norm(axes = hidden_states_25_axes_0, beta = text_encoder_text_model_encoder_layers_4_layer_norm1_bias_to_fp16, epsilon = var_15_to_fp16, gamma = text_encoder_text_model_encoder_layers_4_layer_norm1_weight_to_fp16, x = input_67_cast)[name = tensor("hidden_states_25_cast")]; + tensor text_encoder_text_model_encoder_layers_4_self_attn_q_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(126490176))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(127014528))), name = tensor("text_encoder_text_model_encoder_layers_4_self_attn_q_proj_weight_to_fp16_palettized"), shape = tensor([1024, 1024])]; + tensor text_encoder_text_model_encoder_layers_4_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_4_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(127014656)))]; + tensor linear_24_cast = linear(bias = text_encoder_text_model_encoder_layers_4_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_4_self_attn_q_proj_weight_to_fp16_palettized, x = hidden_states_25_cast)[name = tensor("linear_24_cast")]; + tensor var_478_to_fp16 = const()[name = tensor("op_478_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_29_cast = mul(x = linear_24_cast, y = var_478_to_fp16)[name = tensor("tensor_29_cast")]; + tensor text_encoder_text_model_encoder_layers_4_self_attn_k_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(127016768))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(127541120))), name = tensor("text_encoder_text_model_encoder_layers_4_self_attn_k_proj_weight_to_fp16_palettized"), shape = tensor([1024, 1024])]; + tensor text_encoder_text_model_encoder_layers_4_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_4_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(127541248)))]; + tensor linear_25_cast = linear(bias = text_encoder_text_model_encoder_layers_4_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_4_self_attn_k_proj_weight_to_fp16_palettized, x = hidden_states_25_cast)[name = tensor("linear_25_cast")]; + tensor var_483 = const()[name = tensor("op_483"), val = tensor([1, -1, 16, 64])]; + tensor var_484_cast = reshape(shape = var_483, x = linear_25_cast)[name = tensor("op_484_cast")]; + tensor var_485_perm_0 = const()[name = tensor("op_485_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_4_self_attn_v_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(127543360))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(128067712))), name = tensor("text_encoder_text_model_encoder_layers_4_self_attn_v_proj_weight_to_fp16_palettized"), shape = tensor([1024, 1024])]; + tensor text_encoder_text_model_encoder_layers_4_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_4_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(128067840)))]; + tensor linear_26_cast = linear(bias = text_encoder_text_model_encoder_layers_4_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_4_self_attn_v_proj_weight_to_fp16_palettized, x = hidden_states_25_cast)[name = tensor("linear_26_cast")]; + tensor var_490 = const()[name = tensor("op_490"), val = tensor([1, -1, 16, 64])]; + tensor var_491_cast = reshape(shape = var_490, x = linear_26_cast)[name = tensor("op_491_cast")]; + tensor var_492_perm_0 = const()[name = tensor("op_492_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_499 = const()[name = tensor("op_499"), val = tensor([1, 77, 16, 64])]; + tensor var_500_cast = reshape(shape = var_499, x = tensor_29_cast)[name = tensor("op_500_cast")]; + tensor var_501_perm_0 = const()[name = tensor("op_501_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_503 = const()[name = tensor("op_503"), val = tensor([16, -1, 64])]; + tensor transpose_94 = transpose(perm = var_501_perm_0, x = var_500_cast)[name = tensor("transpose_94")]; + tensor query_states_9_cast = reshape(shape = var_503, x = transpose_94)[name = tensor("query_states_9_cast")]; + tensor var_505 = const()[name = tensor("op_505"), val = tensor([16, -1, 64])]; + tensor transpose_93 = transpose(perm = var_485_perm_0, x = var_484_cast)[name = tensor("transpose_93")]; + tensor key_states_19_cast = reshape(shape = var_505, x = transpose_93)[name = tensor("key_states_19_cast")]; + tensor var_507 = const()[name = tensor("op_507"), val = tensor([16, -1, 64])]; + tensor transpose_92 = transpose(perm = var_492_perm_0, x = var_491_cast)[name = tensor("transpose_92")]; + tensor value_states_19_cast = reshape(shape = var_507, x = transpose_92)[name = tensor("value_states_19_cast")]; + tensor var_510_perm_0 = const()[name = tensor("op_510_perm_0"), val = tensor([0, 2, 1])]; + tensor attn_weights_25_transpose_x_0 = const()[name = tensor("attn_weights_25_transpose_x_0"), val = tensor(false)]; + tensor attn_weights_25_transpose_y_0 = const()[name = tensor("attn_weights_25_transpose_y_0"), val = tensor(false)]; + tensor transpose_91 = transpose(perm = var_510_perm_0, x = key_states_19_cast)[name = tensor("transpose_91")]; + tensor attn_weights_25_cast = matmul(transpose_x = attn_weights_25_transpose_x_0, transpose_y = attn_weights_25_transpose_y_0, x = query_states_9_cast, y = transpose_91)[name = tensor("attn_weights_25_cast")]; + tensor var_512 = const()[name = tensor("op_512"), val = tensor([1, 16, 77, 77])]; + tensor var_513_cast = reshape(shape = var_512, x = attn_weights_25_cast)[name = tensor("op_513_cast")]; + tensor attn_weights_27_cast = add(x = var_513_cast, y = op_57_to_fp16_palettized)[name = tensor("attn_weights_27_cast")]; + tensor var_518 = const()[name = tensor("op_518"), val = tensor([16, 77, 77])]; + tensor input_69_cast = reshape(shape = var_518, x = attn_weights_27_cast)[name = tensor("input_69_cast")]; + tensor input_71_cast = softmax(axis = var_5, x = input_69_cast)[name = tensor("input_71_cast")]; + tensor attn_output_25_transpose_x_0 = const()[name = tensor("attn_output_25_transpose_x_0"), val = tensor(false)]; + tensor attn_output_25_transpose_y_0 = const()[name = tensor("attn_output_25_transpose_y_0"), val = tensor(false)]; + tensor attn_output_25_cast = matmul(transpose_x = attn_output_25_transpose_x_0, transpose_y = attn_output_25_transpose_y_0, x = input_71_cast, y = value_states_19_cast)[name = tensor("attn_output_25_cast")]; + tensor var_523 = const()[name = tensor("op_523"), val = tensor([1, 16, 77, 64])]; + tensor attn_output_27_cast = reshape(shape = var_523, x = attn_output_25_cast)[name = tensor("attn_output_27_cast")]; + tensor attn_output_29_perm_0 = const()[name = tensor("attn_output_29_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_526 = const()[name = tensor("op_526"), val = tensor([1, 77, 1024])]; + tensor transpose_90 = transpose(perm = attn_output_29_perm_0, x = attn_output_27_cast)[name = tensor("transpose_90")]; + tensor input_73_cast = reshape(shape = var_526, x = transpose_90)[name = tensor("input_73_cast")]; + tensor text_encoder_text_model_encoder_layers_4_self_attn_out_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(128069952))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(128594304))), name = tensor("text_encoder_text_model_encoder_layers_4_self_attn_out_proj_weight_to_fp16_palettized"), shape = tensor([1024, 1024])]; + tensor text_encoder_text_model_encoder_layers_4_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_4_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(128594432)))]; + tensor linear_27_cast = linear(bias = text_encoder_text_model_encoder_layers_4_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_4_self_attn_out_proj_weight_to_fp16_palettized, x = input_73_cast)[name = tensor("linear_27_cast")]; + tensor input_75_cast = add(x = input_67_cast, y = linear_27_cast)[name = tensor("input_75_cast")]; + tensor input_77_axes_0 = const()[name = tensor("input_77_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_4_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_4_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(128596544)))]; + tensor text_encoder_text_model_encoder_layers_4_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_4_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(128598656)))]; + tensor input_77_cast = layer_norm(axes = input_77_axes_0, beta = text_encoder_text_model_encoder_layers_4_layer_norm2_bias_to_fp16, epsilon = var_15_to_fp16, gamma = text_encoder_text_model_encoder_layers_4_layer_norm2_weight_to_fp16, x = input_75_cast)[name = tensor("input_77_cast")]; + tensor text_encoder_text_model_encoder_layers_4_mlp_fc1_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(128600768))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(130697984))), name = tensor("text_encoder_text_model_encoder_layers_4_mlp_fc1_weight_to_fp16_palettized"), shape = tensor([4096, 1024])]; + tensor text_encoder_text_model_encoder_layers_4_mlp_fc1_bias_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(130698112))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(130700224))), name = tensor("text_encoder_text_model_encoder_layers_4_mlp_fc1_bias_to_fp16_palettized"), shape = tensor([4096])]; + tensor linear_28_cast = linear(bias = text_encoder_text_model_encoder_layers_4_mlp_fc1_bias_to_fp16_palettized, weight = text_encoder_text_model_encoder_layers_4_mlp_fc1_weight_to_fp16_palettized, x = input_77_cast)[name = tensor("linear_28_cast")]; + tensor input_81_mode_0 = const()[name = tensor("input_81_mode_0"), val = tensor("EXACT")]; + tensor input_81_cast = gelu(mode = input_81_mode_0, x = linear_28_cast)[name = tensor("input_81_cast")]; + tensor text_encoder_text_model_encoder_layers_4_mlp_fc2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(130700352))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(132797568))), name = tensor("text_encoder_text_model_encoder_layers_4_mlp_fc2_weight_to_fp16_palettized"), shape = tensor([1024, 4096])]; + tensor text_encoder_text_model_encoder_layers_4_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_4_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(132797696)))]; + tensor linear_29_cast = linear(bias = text_encoder_text_model_encoder_layers_4_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_4_mlp_fc2_weight_to_fp16_palettized, x = input_81_cast)[name = tensor("linear_29_cast")]; + tensor input_83_cast = add(x = input_75_cast, y = linear_29_cast)[name = tensor("input_83_cast")]; + tensor hidden_states_31_axes_0 = const()[name = tensor("hidden_states_31_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_5_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_5_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(132799808)))]; + tensor text_encoder_text_model_encoder_layers_5_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_5_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(132801920)))]; + tensor hidden_states_31_cast = layer_norm(axes = hidden_states_31_axes_0, beta = text_encoder_text_model_encoder_layers_5_layer_norm1_bias_to_fp16, epsilon = var_15_to_fp16, gamma = text_encoder_text_model_encoder_layers_5_layer_norm1_weight_to_fp16, x = input_83_cast)[name = tensor("hidden_states_31_cast")]; + tensor text_encoder_text_model_encoder_layers_5_self_attn_q_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(132804032))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(133328384))), name = tensor("text_encoder_text_model_encoder_layers_5_self_attn_q_proj_weight_to_fp16_palettized"), shape = tensor([1024, 1024])]; + tensor text_encoder_text_model_encoder_layers_5_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_5_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(133328512)))]; + tensor linear_30_cast = linear(bias = text_encoder_text_model_encoder_layers_5_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_5_self_attn_q_proj_weight_to_fp16_palettized, x = hidden_states_31_cast)[name = tensor("linear_30_cast")]; + tensor var_565_to_fp16 = const()[name = tensor("op_565_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_35_cast = mul(x = linear_30_cast, y = var_565_to_fp16)[name = tensor("tensor_35_cast")]; + tensor text_encoder_text_model_encoder_layers_5_self_attn_k_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(133330624))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(133854976))), name = tensor("text_encoder_text_model_encoder_layers_5_self_attn_k_proj_weight_to_fp16_palettized"), shape = tensor([1024, 1024])]; + tensor text_encoder_text_model_encoder_layers_5_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_5_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(133855104)))]; + tensor linear_31_cast = linear(bias = text_encoder_text_model_encoder_layers_5_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_5_self_attn_k_proj_weight_to_fp16_palettized, x = hidden_states_31_cast)[name = tensor("linear_31_cast")]; + tensor var_570 = const()[name = tensor("op_570"), val = tensor([1, -1, 16, 64])]; + tensor var_571_cast = reshape(shape = var_570, x = linear_31_cast)[name = tensor("op_571_cast")]; + tensor var_572_perm_0 = const()[name = tensor("op_572_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_5_self_attn_v_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(133857216))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(134381568))), name = tensor("text_encoder_text_model_encoder_layers_5_self_attn_v_proj_weight_to_fp16_palettized"), shape = tensor([1024, 1024])]; + tensor text_encoder_text_model_encoder_layers_5_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_5_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(134381696)))]; + tensor linear_32_cast = linear(bias = text_encoder_text_model_encoder_layers_5_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_5_self_attn_v_proj_weight_to_fp16_palettized, x = hidden_states_31_cast)[name = tensor("linear_32_cast")]; + tensor var_577 = const()[name = tensor("op_577"), val = tensor([1, -1, 16, 64])]; + tensor var_578_cast = reshape(shape = var_577, x = linear_32_cast)[name = tensor("op_578_cast")]; + tensor var_579_perm_0 = const()[name = tensor("op_579_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_586 = const()[name = tensor("op_586"), val = tensor([1, 77, 16, 64])]; + tensor var_587_cast = reshape(shape = var_586, x = tensor_35_cast)[name = tensor("op_587_cast")]; + tensor var_588_perm_0 = const()[name = tensor("op_588_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_590 = const()[name = tensor("op_590"), val = tensor([16, -1, 64])]; + tensor transpose_89 = transpose(perm = var_588_perm_0, x = var_587_cast)[name = tensor("transpose_89")]; + tensor query_states_11_cast = reshape(shape = var_590, x = transpose_89)[name = tensor("query_states_11_cast")]; + tensor var_592 = const()[name = tensor("op_592"), val = tensor([16, -1, 64])]; + tensor transpose_88 = transpose(perm = var_572_perm_0, x = var_571_cast)[name = tensor("transpose_88")]; + tensor key_states_23_cast = reshape(shape = var_592, x = transpose_88)[name = tensor("key_states_23_cast")]; + tensor var_594 = const()[name = tensor("op_594"), val = tensor([16, -1, 64])]; + tensor transpose_87 = transpose(perm = var_579_perm_0, x = var_578_cast)[name = tensor("transpose_87")]; + tensor value_states_23_cast = reshape(shape = var_594, x = transpose_87)[name = tensor("value_states_23_cast")]; + tensor var_597_perm_0 = const()[name = tensor("op_597_perm_0"), val = tensor([0, 2, 1])]; + tensor attn_weights_31_transpose_x_0 = const()[name = tensor("attn_weights_31_transpose_x_0"), val = tensor(false)]; + tensor attn_weights_31_transpose_y_0 = const()[name = tensor("attn_weights_31_transpose_y_0"), val = tensor(false)]; + tensor transpose_86 = transpose(perm = var_597_perm_0, x = key_states_23_cast)[name = tensor("transpose_86")]; + tensor attn_weights_31_cast = matmul(transpose_x = attn_weights_31_transpose_x_0, transpose_y = attn_weights_31_transpose_y_0, x = query_states_11_cast, y = transpose_86)[name = tensor("attn_weights_31_cast")]; + tensor var_599 = const()[name = tensor("op_599"), val = tensor([1, 16, 77, 77])]; + tensor var_600_cast = reshape(shape = var_599, x = attn_weights_31_cast)[name = tensor("op_600_cast")]; + tensor attn_weights_33_cast = add(x = var_600_cast, y = op_57_to_fp16_palettized)[name = tensor("attn_weights_33_cast")]; + tensor var_605 = const()[name = tensor("op_605"), val = tensor([16, 77, 77])]; + tensor input_85_cast = reshape(shape = var_605, x = attn_weights_33_cast)[name = tensor("input_85_cast")]; + tensor input_87_cast = softmax(axis = var_5, x = input_85_cast)[name = tensor("input_87_cast")]; + tensor attn_output_31_transpose_x_0 = const()[name = tensor("attn_output_31_transpose_x_0"), val = tensor(false)]; + tensor attn_output_31_transpose_y_0 = const()[name = tensor("attn_output_31_transpose_y_0"), val = tensor(false)]; + tensor attn_output_31_cast = matmul(transpose_x = attn_output_31_transpose_x_0, transpose_y = attn_output_31_transpose_y_0, x = input_87_cast, y = value_states_23_cast)[name = tensor("attn_output_31_cast")]; + tensor var_610 = const()[name = tensor("op_610"), val = tensor([1, 16, 77, 64])]; + tensor attn_output_33_cast = reshape(shape = var_610, x = attn_output_31_cast)[name = tensor("attn_output_33_cast")]; + tensor attn_output_35_perm_0 = const()[name = tensor("attn_output_35_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_613 = const()[name = tensor("op_613"), val = tensor([1, 77, 1024])]; + tensor transpose_85 = transpose(perm = attn_output_35_perm_0, x = attn_output_33_cast)[name = tensor("transpose_85")]; + tensor input_89_cast = reshape(shape = var_613, x = transpose_85)[name = tensor("input_89_cast")]; + tensor text_encoder_text_model_encoder_layers_5_self_attn_out_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(134383808))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(134908160))), name = tensor("text_encoder_text_model_encoder_layers_5_self_attn_out_proj_weight_to_fp16_palettized"), shape = tensor([1024, 1024])]; + tensor text_encoder_text_model_encoder_layers_5_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_5_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(134908288)))]; + tensor linear_33_cast = linear(bias = text_encoder_text_model_encoder_layers_5_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_5_self_attn_out_proj_weight_to_fp16_palettized, x = input_89_cast)[name = tensor("linear_33_cast")]; + tensor input_91_cast = add(x = input_83_cast, y = linear_33_cast)[name = tensor("input_91_cast")]; + tensor input_93_axes_0 = const()[name = tensor("input_93_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_5_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_5_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(134910400)))]; + tensor text_encoder_text_model_encoder_layers_5_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_5_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(134912512)))]; + tensor input_93_cast = layer_norm(axes = input_93_axes_0, beta = text_encoder_text_model_encoder_layers_5_layer_norm2_bias_to_fp16, epsilon = var_15_to_fp16, gamma = text_encoder_text_model_encoder_layers_5_layer_norm2_weight_to_fp16, x = input_91_cast)[name = tensor("input_93_cast")]; + tensor text_encoder_text_model_encoder_layers_5_mlp_fc1_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(134914624))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(137011840))), name = tensor("text_encoder_text_model_encoder_layers_5_mlp_fc1_weight_to_fp16_palettized"), shape = tensor([4096, 1024])]; + tensor text_encoder_text_model_encoder_layers_5_mlp_fc1_bias_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(137011968))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(137014080))), name = tensor("text_encoder_text_model_encoder_layers_5_mlp_fc1_bias_to_fp16_palettized"), shape = tensor([4096])]; + tensor linear_34_cast = linear(bias = text_encoder_text_model_encoder_layers_5_mlp_fc1_bias_to_fp16_palettized, weight = text_encoder_text_model_encoder_layers_5_mlp_fc1_weight_to_fp16_palettized, x = input_93_cast)[name = tensor("linear_34_cast")]; + tensor input_97_mode_0 = const()[name = tensor("input_97_mode_0"), val = tensor("EXACT")]; + tensor input_97_cast = gelu(mode = input_97_mode_0, x = linear_34_cast)[name = tensor("input_97_cast")]; + tensor text_encoder_text_model_encoder_layers_5_mlp_fc2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(137014208))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(139111424))), name = tensor("text_encoder_text_model_encoder_layers_5_mlp_fc2_weight_to_fp16_palettized"), shape = tensor([1024, 4096])]; + tensor text_encoder_text_model_encoder_layers_5_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_5_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(139111552)))]; + tensor linear_35_cast = linear(bias = text_encoder_text_model_encoder_layers_5_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_5_mlp_fc2_weight_to_fp16_palettized, x = input_97_cast)[name = tensor("linear_35_cast")]; + tensor input_99_cast = add(x = input_91_cast, y = linear_35_cast)[name = tensor("input_99_cast")]; + tensor hidden_states_37_axes_0 = const()[name = tensor("hidden_states_37_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_6_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_6_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(139113664)))]; + tensor text_encoder_text_model_encoder_layers_6_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_6_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(139115776)))]; + tensor hidden_states_37_cast = layer_norm(axes = hidden_states_37_axes_0, beta = text_encoder_text_model_encoder_layers_6_layer_norm1_bias_to_fp16, epsilon = var_15_to_fp16, gamma = text_encoder_text_model_encoder_layers_6_layer_norm1_weight_to_fp16, x = input_99_cast)[name = tensor("hidden_states_37_cast")]; + tensor text_encoder_text_model_encoder_layers_6_self_attn_q_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(139117888))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(139642240))), name = tensor("text_encoder_text_model_encoder_layers_6_self_attn_q_proj_weight_to_fp16_palettized"), shape = tensor([1024, 1024])]; + tensor text_encoder_text_model_encoder_layers_6_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_6_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(139642368)))]; + tensor linear_36_cast = linear(bias = text_encoder_text_model_encoder_layers_6_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_6_self_attn_q_proj_weight_to_fp16_palettized, x = hidden_states_37_cast)[name = tensor("linear_36_cast")]; + tensor var_652_to_fp16 = const()[name = tensor("op_652_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_41_cast = mul(x = linear_36_cast, y = var_652_to_fp16)[name = tensor("tensor_41_cast")]; + tensor text_encoder_text_model_encoder_layers_6_self_attn_k_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(139644480))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(140168832))), name = tensor("text_encoder_text_model_encoder_layers_6_self_attn_k_proj_weight_to_fp16_palettized"), shape = tensor([1024, 1024])]; + tensor text_encoder_text_model_encoder_layers_6_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_6_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(140168960)))]; + tensor linear_37_cast = linear(bias = text_encoder_text_model_encoder_layers_6_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_6_self_attn_k_proj_weight_to_fp16_palettized, x = hidden_states_37_cast)[name = tensor("linear_37_cast")]; + tensor var_657 = const()[name = tensor("op_657"), val = tensor([1, -1, 16, 64])]; + tensor var_658_cast = reshape(shape = var_657, x = linear_37_cast)[name = tensor("op_658_cast")]; + tensor var_659_perm_0 = const()[name = tensor("op_659_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_6_self_attn_v_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(140171072))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(140695424))), name = tensor("text_encoder_text_model_encoder_layers_6_self_attn_v_proj_weight_to_fp16_palettized"), shape = tensor([1024, 1024])]; + tensor text_encoder_text_model_encoder_layers_6_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_6_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(140695552)))]; + tensor linear_38_cast = linear(bias = text_encoder_text_model_encoder_layers_6_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_6_self_attn_v_proj_weight_to_fp16_palettized, x = hidden_states_37_cast)[name = tensor("linear_38_cast")]; + tensor var_664 = const()[name = tensor("op_664"), val = tensor([1, -1, 16, 64])]; + tensor var_665_cast = reshape(shape = var_664, x = linear_38_cast)[name = tensor("op_665_cast")]; + tensor var_666_perm_0 = const()[name = tensor("op_666_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_673 = const()[name = tensor("op_673"), val = tensor([1, 77, 16, 64])]; + tensor var_674_cast = reshape(shape = var_673, x = tensor_41_cast)[name = tensor("op_674_cast")]; + tensor var_675_perm_0 = const()[name = tensor("op_675_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_677 = const()[name = tensor("op_677"), val = tensor([16, -1, 64])]; + tensor transpose_84 = transpose(perm = var_675_perm_0, x = var_674_cast)[name = tensor("transpose_84")]; + tensor query_states_13_cast = reshape(shape = var_677, x = transpose_84)[name = tensor("query_states_13_cast")]; + tensor var_679 = const()[name = tensor("op_679"), val = tensor([16, -1, 64])]; + tensor transpose_83 = transpose(perm = var_659_perm_0, x = var_658_cast)[name = tensor("transpose_83")]; + tensor key_states_27_cast = reshape(shape = var_679, x = transpose_83)[name = tensor("key_states_27_cast")]; + tensor var_681 = const()[name = tensor("op_681"), val = tensor([16, -1, 64])]; + tensor transpose_82 = transpose(perm = var_666_perm_0, x = var_665_cast)[name = tensor("transpose_82")]; + tensor value_states_27_cast = reshape(shape = var_681, x = transpose_82)[name = tensor("value_states_27_cast")]; + tensor var_684_perm_0 = const()[name = tensor("op_684_perm_0"), val = tensor([0, 2, 1])]; + tensor attn_weights_37_transpose_x_0 = const()[name = tensor("attn_weights_37_transpose_x_0"), val = tensor(false)]; + tensor attn_weights_37_transpose_y_0 = const()[name = tensor("attn_weights_37_transpose_y_0"), val = tensor(false)]; + tensor transpose_81 = transpose(perm = var_684_perm_0, x = key_states_27_cast)[name = tensor("transpose_81")]; + tensor attn_weights_37_cast = matmul(transpose_x = attn_weights_37_transpose_x_0, transpose_y = attn_weights_37_transpose_y_0, x = query_states_13_cast, y = transpose_81)[name = tensor("attn_weights_37_cast")]; + tensor var_686 = const()[name = tensor("op_686"), val = tensor([1, 16, 77, 77])]; + tensor var_687_cast = reshape(shape = var_686, x = attn_weights_37_cast)[name = tensor("op_687_cast")]; + tensor attn_weights_39_cast = add(x = var_687_cast, y = op_57_to_fp16_palettized)[name = tensor("attn_weights_39_cast")]; + tensor var_692 = const()[name = tensor("op_692"), val = tensor([16, 77, 77])]; + tensor input_101_cast = reshape(shape = var_692, x = attn_weights_39_cast)[name = tensor("input_101_cast")]; + tensor input_103_cast = softmax(axis = var_5, x = input_101_cast)[name = tensor("input_103_cast")]; + tensor attn_output_37_transpose_x_0 = const()[name = tensor("attn_output_37_transpose_x_0"), val = tensor(false)]; + tensor attn_output_37_transpose_y_0 = const()[name = tensor("attn_output_37_transpose_y_0"), val = tensor(false)]; + tensor attn_output_37_cast = matmul(transpose_x = attn_output_37_transpose_x_0, transpose_y = attn_output_37_transpose_y_0, x = input_103_cast, y = value_states_27_cast)[name = tensor("attn_output_37_cast")]; + tensor var_697 = const()[name = tensor("op_697"), val = tensor([1, 16, 77, 64])]; + tensor attn_output_39_cast = reshape(shape = var_697, x = attn_output_37_cast)[name = tensor("attn_output_39_cast")]; + tensor attn_output_41_perm_0 = const()[name = tensor("attn_output_41_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_700 = const()[name = tensor("op_700"), val = tensor([1, 77, 1024])]; + tensor transpose_80 = transpose(perm = attn_output_41_perm_0, x = attn_output_39_cast)[name = tensor("transpose_80")]; + tensor input_105_cast = reshape(shape = var_700, x = transpose_80)[name = tensor("input_105_cast")]; + tensor text_encoder_text_model_encoder_layers_6_self_attn_out_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(140697664))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(141222016))), name = tensor("text_encoder_text_model_encoder_layers_6_self_attn_out_proj_weight_to_fp16_palettized"), shape = tensor([1024, 1024])]; + tensor text_encoder_text_model_encoder_layers_6_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_6_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(141222144)))]; + tensor linear_39_cast = linear(bias = text_encoder_text_model_encoder_layers_6_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_6_self_attn_out_proj_weight_to_fp16_palettized, x = input_105_cast)[name = tensor("linear_39_cast")]; + tensor input_107_cast = add(x = input_99_cast, y = linear_39_cast)[name = tensor("input_107_cast")]; + tensor input_109_axes_0 = const()[name = tensor("input_109_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_6_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_6_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(141224256)))]; + tensor text_encoder_text_model_encoder_layers_6_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_6_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(141226368)))]; + tensor input_109_cast = layer_norm(axes = input_109_axes_0, beta = text_encoder_text_model_encoder_layers_6_layer_norm2_bias_to_fp16, epsilon = var_15_to_fp16, gamma = text_encoder_text_model_encoder_layers_6_layer_norm2_weight_to_fp16, x = input_107_cast)[name = tensor("input_109_cast")]; + tensor text_encoder_text_model_encoder_layers_6_mlp_fc1_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(141228480))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(143325696))), name = tensor("text_encoder_text_model_encoder_layers_6_mlp_fc1_weight_to_fp16_palettized"), shape = tensor([4096, 1024])]; + tensor text_encoder_text_model_encoder_layers_6_mlp_fc1_bias_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(143325824))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(143327936))), name = tensor("text_encoder_text_model_encoder_layers_6_mlp_fc1_bias_to_fp16_palettized"), shape = tensor([4096])]; + tensor linear_40_cast = linear(bias = text_encoder_text_model_encoder_layers_6_mlp_fc1_bias_to_fp16_palettized, weight = text_encoder_text_model_encoder_layers_6_mlp_fc1_weight_to_fp16_palettized, x = input_109_cast)[name = tensor("linear_40_cast")]; + tensor input_113_mode_0 = const()[name = tensor("input_113_mode_0"), val = tensor("EXACT")]; + tensor input_113_cast = gelu(mode = input_113_mode_0, x = linear_40_cast)[name = tensor("input_113_cast")]; + tensor text_encoder_text_model_encoder_layers_6_mlp_fc2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(143328064))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(145425280))), name = tensor("text_encoder_text_model_encoder_layers_6_mlp_fc2_weight_to_fp16_palettized"), shape = tensor([1024, 4096])]; + tensor text_encoder_text_model_encoder_layers_6_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_6_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(145425408)))]; + tensor linear_41_cast = linear(bias = text_encoder_text_model_encoder_layers_6_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_6_mlp_fc2_weight_to_fp16_palettized, x = input_113_cast)[name = tensor("linear_41_cast")]; + tensor input_115_cast = add(x = input_107_cast, y = linear_41_cast)[name = tensor("input_115_cast")]; + tensor hidden_states_43_axes_0 = const()[name = tensor("hidden_states_43_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_7_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_7_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(145427520)))]; + tensor text_encoder_text_model_encoder_layers_7_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_7_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(145429632)))]; + tensor hidden_states_43_cast = layer_norm(axes = hidden_states_43_axes_0, beta = text_encoder_text_model_encoder_layers_7_layer_norm1_bias_to_fp16, epsilon = var_15_to_fp16, gamma = text_encoder_text_model_encoder_layers_7_layer_norm1_weight_to_fp16, x = input_115_cast)[name = tensor("hidden_states_43_cast")]; + tensor text_encoder_text_model_encoder_layers_7_self_attn_q_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(145431744))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(145956096))), name = tensor("text_encoder_text_model_encoder_layers_7_self_attn_q_proj_weight_to_fp16_palettized"), shape = tensor([1024, 1024])]; + tensor text_encoder_text_model_encoder_layers_7_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_7_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(145956224)))]; + tensor linear_42_cast = linear(bias = text_encoder_text_model_encoder_layers_7_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_7_self_attn_q_proj_weight_to_fp16_palettized, x = hidden_states_43_cast)[name = tensor("linear_42_cast")]; + tensor var_739_to_fp16 = const()[name = tensor("op_739_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_47_cast = mul(x = linear_42_cast, y = var_739_to_fp16)[name = tensor("tensor_47_cast")]; + tensor text_encoder_text_model_encoder_layers_7_self_attn_k_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(145958336))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(146482688))), name = tensor("text_encoder_text_model_encoder_layers_7_self_attn_k_proj_weight_to_fp16_palettized"), shape = tensor([1024, 1024])]; + tensor text_encoder_text_model_encoder_layers_7_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_7_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(146482816)))]; + tensor linear_43_cast = linear(bias = text_encoder_text_model_encoder_layers_7_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_7_self_attn_k_proj_weight_to_fp16_palettized, x = hidden_states_43_cast)[name = tensor("linear_43_cast")]; + tensor var_744 = const()[name = tensor("op_744"), val = tensor([1, -1, 16, 64])]; + tensor var_745_cast = reshape(shape = var_744, x = linear_43_cast)[name = tensor("op_745_cast")]; + tensor var_746_perm_0 = const()[name = tensor("op_746_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_7_self_attn_v_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(146484928))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(147009280))), name = tensor("text_encoder_text_model_encoder_layers_7_self_attn_v_proj_weight_to_fp16_palettized"), shape = tensor([1024, 1024])]; + tensor text_encoder_text_model_encoder_layers_7_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_7_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(147009408)))]; + tensor linear_44_cast = linear(bias = text_encoder_text_model_encoder_layers_7_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_7_self_attn_v_proj_weight_to_fp16_palettized, x = hidden_states_43_cast)[name = tensor("linear_44_cast")]; + tensor var_751 = const()[name = tensor("op_751"), val = tensor([1, -1, 16, 64])]; + tensor var_752_cast = reshape(shape = var_751, x = linear_44_cast)[name = tensor("op_752_cast")]; + tensor var_753_perm_0 = const()[name = tensor("op_753_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_760 = const()[name = tensor("op_760"), val = tensor([1, 77, 16, 64])]; + tensor var_761_cast = reshape(shape = var_760, x = tensor_47_cast)[name = tensor("op_761_cast")]; + tensor var_762_perm_0 = const()[name = tensor("op_762_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_764 = const()[name = tensor("op_764"), val = tensor([16, -1, 64])]; + tensor transpose_79 = transpose(perm = var_762_perm_0, x = var_761_cast)[name = tensor("transpose_79")]; + tensor query_states_15_cast = reshape(shape = var_764, x = transpose_79)[name = tensor("query_states_15_cast")]; + tensor var_766 = const()[name = tensor("op_766"), val = tensor([16, -1, 64])]; + tensor transpose_78 = transpose(perm = var_746_perm_0, x = var_745_cast)[name = tensor("transpose_78")]; + tensor key_states_31_cast = reshape(shape = var_766, x = transpose_78)[name = tensor("key_states_31_cast")]; + tensor var_768 = const()[name = tensor("op_768"), val = tensor([16, -1, 64])]; + tensor transpose_77 = transpose(perm = var_753_perm_0, x = var_752_cast)[name = tensor("transpose_77")]; + tensor value_states_31_cast = reshape(shape = var_768, x = transpose_77)[name = tensor("value_states_31_cast")]; + tensor var_771_perm_0 = const()[name = tensor("op_771_perm_0"), val = tensor([0, 2, 1])]; + tensor attn_weights_43_transpose_x_0 = const()[name = tensor("attn_weights_43_transpose_x_0"), val = tensor(false)]; + tensor attn_weights_43_transpose_y_0 = const()[name = tensor("attn_weights_43_transpose_y_0"), val = tensor(false)]; + tensor transpose_76 = transpose(perm = var_771_perm_0, x = key_states_31_cast)[name = tensor("transpose_76")]; + tensor attn_weights_43_cast = matmul(transpose_x = attn_weights_43_transpose_x_0, transpose_y = attn_weights_43_transpose_y_0, x = query_states_15_cast, y = transpose_76)[name = tensor("attn_weights_43_cast")]; + tensor var_773 = const()[name = tensor("op_773"), val = tensor([1, 16, 77, 77])]; + tensor var_774_cast = reshape(shape = var_773, x = attn_weights_43_cast)[name = tensor("op_774_cast")]; + tensor attn_weights_45_cast = add(x = var_774_cast, y = op_57_to_fp16_palettized)[name = tensor("attn_weights_45_cast")]; + tensor var_779 = const()[name = tensor("op_779"), val = tensor([16, 77, 77])]; + tensor input_117_cast = reshape(shape = var_779, x = attn_weights_45_cast)[name = tensor("input_117_cast")]; + tensor input_119_cast = softmax(axis = var_5, x = input_117_cast)[name = tensor("input_119_cast")]; + tensor attn_output_43_transpose_x_0 = const()[name = tensor("attn_output_43_transpose_x_0"), val = tensor(false)]; + tensor attn_output_43_transpose_y_0 = const()[name = tensor("attn_output_43_transpose_y_0"), val = tensor(false)]; + tensor attn_output_43_cast = matmul(transpose_x = attn_output_43_transpose_x_0, transpose_y = attn_output_43_transpose_y_0, x = input_119_cast, y = value_states_31_cast)[name = tensor("attn_output_43_cast")]; + tensor var_784 = const()[name = tensor("op_784"), val = tensor([1, 16, 77, 64])]; + tensor attn_output_45_cast = reshape(shape = var_784, x = attn_output_43_cast)[name = tensor("attn_output_45_cast")]; + tensor attn_output_47_perm_0 = const()[name = tensor("attn_output_47_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_787 = const()[name = tensor("op_787"), val = tensor([1, 77, 1024])]; + tensor transpose_75 = transpose(perm = attn_output_47_perm_0, x = attn_output_45_cast)[name = tensor("transpose_75")]; + tensor input_121_cast = reshape(shape = var_787, x = transpose_75)[name = tensor("input_121_cast")]; + tensor text_encoder_text_model_encoder_layers_7_self_attn_out_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(147011520))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(147535872))), name = tensor("text_encoder_text_model_encoder_layers_7_self_attn_out_proj_weight_to_fp16_palettized"), shape = tensor([1024, 1024])]; + tensor text_encoder_text_model_encoder_layers_7_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_7_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(147536000)))]; + tensor linear_45_cast = linear(bias = text_encoder_text_model_encoder_layers_7_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_7_self_attn_out_proj_weight_to_fp16_palettized, x = input_121_cast)[name = tensor("linear_45_cast")]; + tensor input_123_cast = add(x = input_115_cast, y = linear_45_cast)[name = tensor("input_123_cast")]; + tensor input_125_axes_0 = const()[name = tensor("input_125_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_7_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_7_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(147538112)))]; + tensor text_encoder_text_model_encoder_layers_7_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_7_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(147540224)))]; + tensor input_125_cast = layer_norm(axes = input_125_axes_0, beta = text_encoder_text_model_encoder_layers_7_layer_norm2_bias_to_fp16, epsilon = var_15_to_fp16, gamma = text_encoder_text_model_encoder_layers_7_layer_norm2_weight_to_fp16, x = input_123_cast)[name = tensor("input_125_cast")]; + tensor text_encoder_text_model_encoder_layers_7_mlp_fc1_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(147542336))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(149639552))), name = tensor("text_encoder_text_model_encoder_layers_7_mlp_fc1_weight_to_fp16_palettized"), shape = tensor([4096, 1024])]; + tensor text_encoder_text_model_encoder_layers_7_mlp_fc1_bias_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(149639680))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(149641792))), name = tensor("text_encoder_text_model_encoder_layers_7_mlp_fc1_bias_to_fp16_palettized"), shape = tensor([4096])]; + tensor linear_46_cast = linear(bias = text_encoder_text_model_encoder_layers_7_mlp_fc1_bias_to_fp16_palettized, weight = text_encoder_text_model_encoder_layers_7_mlp_fc1_weight_to_fp16_palettized, x = input_125_cast)[name = tensor("linear_46_cast")]; + tensor input_129_mode_0 = const()[name = tensor("input_129_mode_0"), val = tensor("EXACT")]; + tensor input_129_cast = gelu(mode = input_129_mode_0, x = linear_46_cast)[name = tensor("input_129_cast")]; + tensor text_encoder_text_model_encoder_layers_7_mlp_fc2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(149641920))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(151739136))), name = tensor("text_encoder_text_model_encoder_layers_7_mlp_fc2_weight_to_fp16_palettized"), shape = tensor([1024, 4096])]; + tensor text_encoder_text_model_encoder_layers_7_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_7_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(151739264)))]; + tensor linear_47_cast = linear(bias = text_encoder_text_model_encoder_layers_7_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_7_mlp_fc2_weight_to_fp16_palettized, x = input_129_cast)[name = tensor("linear_47_cast")]; + tensor input_131_cast = add(x = input_123_cast, y = linear_47_cast)[name = tensor("input_131_cast")]; + tensor hidden_states_49_axes_0 = const()[name = tensor("hidden_states_49_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_8_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_8_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(151741376)))]; + tensor text_encoder_text_model_encoder_layers_8_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_8_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(151743488)))]; + tensor hidden_states_49_cast = layer_norm(axes = hidden_states_49_axes_0, beta = text_encoder_text_model_encoder_layers_8_layer_norm1_bias_to_fp16, epsilon = var_15_to_fp16, gamma = text_encoder_text_model_encoder_layers_8_layer_norm1_weight_to_fp16, x = input_131_cast)[name = tensor("hidden_states_49_cast")]; + tensor text_encoder_text_model_encoder_layers_8_self_attn_q_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(151745600))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(152269952))), name = tensor("text_encoder_text_model_encoder_layers_8_self_attn_q_proj_weight_to_fp16_palettized"), shape = tensor([1024, 1024])]; + tensor text_encoder_text_model_encoder_layers_8_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_8_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(152270080)))]; + tensor linear_48_cast = linear(bias = text_encoder_text_model_encoder_layers_8_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_8_self_attn_q_proj_weight_to_fp16_palettized, x = hidden_states_49_cast)[name = tensor("linear_48_cast")]; + tensor var_826_to_fp16 = const()[name = tensor("op_826_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_53_cast = mul(x = linear_48_cast, y = var_826_to_fp16)[name = tensor("tensor_53_cast")]; + tensor text_encoder_text_model_encoder_layers_8_self_attn_k_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(152272192))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(152796544))), name = tensor("text_encoder_text_model_encoder_layers_8_self_attn_k_proj_weight_to_fp16_palettized"), shape = tensor([1024, 1024])]; + tensor text_encoder_text_model_encoder_layers_8_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_8_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(152796672)))]; + tensor linear_49_cast = linear(bias = text_encoder_text_model_encoder_layers_8_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_8_self_attn_k_proj_weight_to_fp16_palettized, x = hidden_states_49_cast)[name = tensor("linear_49_cast")]; + tensor var_831 = const()[name = tensor("op_831"), val = tensor([1, -1, 16, 64])]; + tensor var_832_cast = reshape(shape = var_831, x = linear_49_cast)[name = tensor("op_832_cast")]; + tensor var_833_perm_0 = const()[name = tensor("op_833_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_8_self_attn_v_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(152798784))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(153323136))), name = tensor("text_encoder_text_model_encoder_layers_8_self_attn_v_proj_weight_to_fp16_palettized"), shape = tensor([1024, 1024])]; + tensor text_encoder_text_model_encoder_layers_8_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_8_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(153323264)))]; + tensor linear_50_cast = linear(bias = text_encoder_text_model_encoder_layers_8_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_8_self_attn_v_proj_weight_to_fp16_palettized, x = hidden_states_49_cast)[name = tensor("linear_50_cast")]; + tensor var_838 = const()[name = tensor("op_838"), val = tensor([1, -1, 16, 64])]; + tensor var_839_cast = reshape(shape = var_838, x = linear_50_cast)[name = tensor("op_839_cast")]; + tensor var_840_perm_0 = const()[name = tensor("op_840_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_847 = const()[name = tensor("op_847"), val = tensor([1, 77, 16, 64])]; + tensor var_848_cast = reshape(shape = var_847, x = tensor_53_cast)[name = tensor("op_848_cast")]; + tensor var_849_perm_0 = const()[name = tensor("op_849_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_851 = const()[name = tensor("op_851"), val = tensor([16, -1, 64])]; + tensor transpose_74 = transpose(perm = var_849_perm_0, x = var_848_cast)[name = tensor("transpose_74")]; + tensor query_states_17_cast = reshape(shape = var_851, x = transpose_74)[name = tensor("query_states_17_cast")]; + tensor var_853 = const()[name = tensor("op_853"), val = tensor([16, -1, 64])]; + tensor transpose_73 = transpose(perm = var_833_perm_0, x = var_832_cast)[name = tensor("transpose_73")]; + tensor key_states_35_cast = reshape(shape = var_853, x = transpose_73)[name = tensor("key_states_35_cast")]; + tensor var_855 = const()[name = tensor("op_855"), val = tensor([16, -1, 64])]; + tensor transpose_72 = transpose(perm = var_840_perm_0, x = var_839_cast)[name = tensor("transpose_72")]; + tensor value_states_35_cast = reshape(shape = var_855, x = transpose_72)[name = tensor("value_states_35_cast")]; + tensor var_858_perm_0 = const()[name = tensor("op_858_perm_0"), val = tensor([0, 2, 1])]; + tensor attn_weights_49_transpose_x_0 = const()[name = tensor("attn_weights_49_transpose_x_0"), val = tensor(false)]; + tensor attn_weights_49_transpose_y_0 = const()[name = tensor("attn_weights_49_transpose_y_0"), val = tensor(false)]; + tensor transpose_71 = transpose(perm = var_858_perm_0, x = key_states_35_cast)[name = tensor("transpose_71")]; + tensor attn_weights_49_cast = matmul(transpose_x = attn_weights_49_transpose_x_0, transpose_y = attn_weights_49_transpose_y_0, x = query_states_17_cast, y = transpose_71)[name = tensor("attn_weights_49_cast")]; + tensor var_860 = const()[name = tensor("op_860"), val = tensor([1, 16, 77, 77])]; + tensor var_861_cast = reshape(shape = var_860, x = attn_weights_49_cast)[name = tensor("op_861_cast")]; + tensor attn_weights_51_cast = add(x = var_861_cast, y = op_57_to_fp16_palettized)[name = tensor("attn_weights_51_cast")]; + tensor var_866 = const()[name = tensor("op_866"), val = tensor([16, 77, 77])]; + tensor input_133_cast = reshape(shape = var_866, x = attn_weights_51_cast)[name = tensor("input_133_cast")]; + tensor input_135_cast = softmax(axis = var_5, x = input_133_cast)[name = tensor("input_135_cast")]; + tensor attn_output_49_transpose_x_0 = const()[name = tensor("attn_output_49_transpose_x_0"), val = tensor(false)]; + tensor attn_output_49_transpose_y_0 = const()[name = tensor("attn_output_49_transpose_y_0"), val = tensor(false)]; + tensor attn_output_49_cast = matmul(transpose_x = attn_output_49_transpose_x_0, transpose_y = attn_output_49_transpose_y_0, x = input_135_cast, y = value_states_35_cast)[name = tensor("attn_output_49_cast")]; + tensor var_871 = const()[name = tensor("op_871"), val = tensor([1, 16, 77, 64])]; + tensor attn_output_51_cast = reshape(shape = var_871, x = attn_output_49_cast)[name = tensor("attn_output_51_cast")]; + tensor attn_output_53_perm_0 = const()[name = tensor("attn_output_53_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_874 = const()[name = tensor("op_874"), val = tensor([1, 77, 1024])]; + tensor transpose_70 = transpose(perm = attn_output_53_perm_0, x = attn_output_51_cast)[name = tensor("transpose_70")]; + tensor input_137_cast = reshape(shape = var_874, x = transpose_70)[name = tensor("input_137_cast")]; + tensor text_encoder_text_model_encoder_layers_8_self_attn_out_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(153325376))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(153849728))), name = tensor("text_encoder_text_model_encoder_layers_8_self_attn_out_proj_weight_to_fp16_palettized"), shape = tensor([1024, 1024])]; + tensor text_encoder_text_model_encoder_layers_8_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_8_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(153849856)))]; + tensor linear_51_cast = linear(bias = text_encoder_text_model_encoder_layers_8_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_8_self_attn_out_proj_weight_to_fp16_palettized, x = input_137_cast)[name = tensor("linear_51_cast")]; + tensor input_139_cast = add(x = input_131_cast, y = linear_51_cast)[name = tensor("input_139_cast")]; + tensor input_141_axes_0 = const()[name = tensor("input_141_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_8_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_8_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(153851968)))]; + tensor text_encoder_text_model_encoder_layers_8_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_8_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(153854080)))]; + tensor input_141_cast = layer_norm(axes = input_141_axes_0, beta = text_encoder_text_model_encoder_layers_8_layer_norm2_bias_to_fp16, epsilon = var_15_to_fp16, gamma = text_encoder_text_model_encoder_layers_8_layer_norm2_weight_to_fp16, x = input_139_cast)[name = tensor("input_141_cast")]; + tensor text_encoder_text_model_encoder_layers_8_mlp_fc1_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(153856192))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(155953408))), name = tensor("text_encoder_text_model_encoder_layers_8_mlp_fc1_weight_to_fp16_palettized"), shape = tensor([4096, 1024])]; + tensor text_encoder_text_model_encoder_layers_8_mlp_fc1_bias_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(155953536))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(155955648))), name = tensor("text_encoder_text_model_encoder_layers_8_mlp_fc1_bias_to_fp16_palettized"), shape = tensor([4096])]; + tensor linear_52_cast = linear(bias = text_encoder_text_model_encoder_layers_8_mlp_fc1_bias_to_fp16_palettized, weight = text_encoder_text_model_encoder_layers_8_mlp_fc1_weight_to_fp16_palettized, x = input_141_cast)[name = tensor("linear_52_cast")]; + tensor input_145_mode_0 = const()[name = tensor("input_145_mode_0"), val = tensor("EXACT")]; + tensor input_145_cast = gelu(mode = input_145_mode_0, x = linear_52_cast)[name = tensor("input_145_cast")]; + tensor text_encoder_text_model_encoder_layers_8_mlp_fc2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(155955776))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(158052992))), name = tensor("text_encoder_text_model_encoder_layers_8_mlp_fc2_weight_to_fp16_palettized"), shape = tensor([1024, 4096])]; + tensor text_encoder_text_model_encoder_layers_8_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_8_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(158053120)))]; + tensor linear_53_cast = linear(bias = text_encoder_text_model_encoder_layers_8_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_8_mlp_fc2_weight_to_fp16_palettized, x = input_145_cast)[name = tensor("linear_53_cast")]; + tensor input_147_cast = add(x = input_139_cast, y = linear_53_cast)[name = tensor("input_147_cast")]; + tensor hidden_states_55_axes_0 = const()[name = tensor("hidden_states_55_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_9_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_9_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(158055232)))]; + tensor text_encoder_text_model_encoder_layers_9_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_9_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(158057344)))]; + tensor hidden_states_55_cast = layer_norm(axes = hidden_states_55_axes_0, beta = text_encoder_text_model_encoder_layers_9_layer_norm1_bias_to_fp16, epsilon = var_15_to_fp16, gamma = text_encoder_text_model_encoder_layers_9_layer_norm1_weight_to_fp16, x = input_147_cast)[name = tensor("hidden_states_55_cast")]; + tensor text_encoder_text_model_encoder_layers_9_self_attn_q_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(158059456))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(158583808))), name = tensor("text_encoder_text_model_encoder_layers_9_self_attn_q_proj_weight_to_fp16_palettized"), shape = tensor([1024, 1024])]; + tensor text_encoder_text_model_encoder_layers_9_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_9_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(158583936)))]; + tensor linear_54_cast = linear(bias = text_encoder_text_model_encoder_layers_9_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_9_self_attn_q_proj_weight_to_fp16_palettized, x = hidden_states_55_cast)[name = tensor("linear_54_cast")]; + tensor var_913_to_fp16 = const()[name = tensor("op_913_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_59_cast = mul(x = linear_54_cast, y = var_913_to_fp16)[name = tensor("tensor_59_cast")]; + tensor text_encoder_text_model_encoder_layers_9_self_attn_k_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(158586048))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(159110400))), name = tensor("text_encoder_text_model_encoder_layers_9_self_attn_k_proj_weight_to_fp16_palettized"), shape = tensor([1024, 1024])]; + tensor text_encoder_text_model_encoder_layers_9_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_9_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(159110528)))]; + tensor linear_55_cast = linear(bias = text_encoder_text_model_encoder_layers_9_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_9_self_attn_k_proj_weight_to_fp16_palettized, x = hidden_states_55_cast)[name = tensor("linear_55_cast")]; + tensor var_918 = const()[name = tensor("op_918"), val = tensor([1, -1, 16, 64])]; + tensor var_919_cast = reshape(shape = var_918, x = linear_55_cast)[name = tensor("op_919_cast")]; + tensor var_920_perm_0 = const()[name = tensor("op_920_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_9_self_attn_v_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(159112640))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(159636992))), name = tensor("text_encoder_text_model_encoder_layers_9_self_attn_v_proj_weight_to_fp16_palettized"), shape = tensor([1024, 1024])]; + tensor text_encoder_text_model_encoder_layers_9_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_9_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(159637120)))]; + tensor linear_56_cast = linear(bias = text_encoder_text_model_encoder_layers_9_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_9_self_attn_v_proj_weight_to_fp16_palettized, x = hidden_states_55_cast)[name = tensor("linear_56_cast")]; + tensor var_925 = const()[name = tensor("op_925"), val = tensor([1, -1, 16, 64])]; + tensor var_926_cast = reshape(shape = var_925, x = linear_56_cast)[name = tensor("op_926_cast")]; + tensor var_927_perm_0 = const()[name = tensor("op_927_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_934 = const()[name = tensor("op_934"), val = tensor([1, 77, 16, 64])]; + tensor var_935_cast = reshape(shape = var_934, x = tensor_59_cast)[name = tensor("op_935_cast")]; + tensor var_936_perm_0 = const()[name = tensor("op_936_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_938 = const()[name = tensor("op_938"), val = tensor([16, -1, 64])]; + tensor transpose_69 = transpose(perm = var_936_perm_0, x = var_935_cast)[name = tensor("transpose_69")]; + tensor query_states_19_cast = reshape(shape = var_938, x = transpose_69)[name = tensor("query_states_19_cast")]; + tensor var_940 = const()[name = tensor("op_940"), val = tensor([16, -1, 64])]; + tensor transpose_68 = transpose(perm = var_920_perm_0, x = var_919_cast)[name = tensor("transpose_68")]; + tensor key_states_39_cast = reshape(shape = var_940, x = transpose_68)[name = tensor("key_states_39_cast")]; + tensor var_942 = const()[name = tensor("op_942"), val = tensor([16, -1, 64])]; + tensor transpose_67 = transpose(perm = var_927_perm_0, x = var_926_cast)[name = tensor("transpose_67")]; + tensor value_states_39_cast = reshape(shape = var_942, x = transpose_67)[name = tensor("value_states_39_cast")]; + tensor var_945_perm_0 = const()[name = tensor("op_945_perm_0"), val = tensor([0, 2, 1])]; + tensor attn_weights_55_transpose_x_0 = const()[name = tensor("attn_weights_55_transpose_x_0"), val = tensor(false)]; + tensor attn_weights_55_transpose_y_0 = const()[name = tensor("attn_weights_55_transpose_y_0"), val = tensor(false)]; + tensor transpose_66 = transpose(perm = var_945_perm_0, x = key_states_39_cast)[name = tensor("transpose_66")]; + tensor attn_weights_55_cast = matmul(transpose_x = attn_weights_55_transpose_x_0, transpose_y = attn_weights_55_transpose_y_0, x = query_states_19_cast, y = transpose_66)[name = tensor("attn_weights_55_cast")]; + tensor var_947 = const()[name = tensor("op_947"), val = tensor([1, 16, 77, 77])]; + tensor var_948_cast = reshape(shape = var_947, x = attn_weights_55_cast)[name = tensor("op_948_cast")]; + tensor attn_weights_57_cast = add(x = var_948_cast, y = op_57_to_fp16_palettized)[name = tensor("attn_weights_57_cast")]; + tensor var_953 = const()[name = tensor("op_953"), val = tensor([16, 77, 77])]; + tensor input_149_cast = reshape(shape = var_953, x = attn_weights_57_cast)[name = tensor("input_149_cast")]; + tensor input_151_cast = softmax(axis = var_5, x = input_149_cast)[name = tensor("input_151_cast")]; + tensor attn_output_55_transpose_x_0 = const()[name = tensor("attn_output_55_transpose_x_0"), val = tensor(false)]; + tensor attn_output_55_transpose_y_0 = const()[name = tensor("attn_output_55_transpose_y_0"), val = tensor(false)]; + tensor attn_output_55_cast = matmul(transpose_x = attn_output_55_transpose_x_0, transpose_y = attn_output_55_transpose_y_0, x = input_151_cast, y = value_states_39_cast)[name = tensor("attn_output_55_cast")]; + tensor var_958 = const()[name = tensor("op_958"), val = tensor([1, 16, 77, 64])]; + tensor attn_output_57_cast = reshape(shape = var_958, x = attn_output_55_cast)[name = tensor("attn_output_57_cast")]; + tensor attn_output_59_perm_0 = const()[name = tensor("attn_output_59_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_961 = const()[name = tensor("op_961"), val = tensor([1, 77, 1024])]; + tensor transpose_65 = transpose(perm = attn_output_59_perm_0, x = attn_output_57_cast)[name = tensor("transpose_65")]; + tensor input_153_cast = reshape(shape = var_961, x = transpose_65)[name = tensor("input_153_cast")]; + tensor text_encoder_text_model_encoder_layers_9_self_attn_out_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(159639232))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(160163584))), name = tensor("text_encoder_text_model_encoder_layers_9_self_attn_out_proj_weight_to_fp16_palettized"), shape = tensor([1024, 1024])]; + tensor text_encoder_text_model_encoder_layers_9_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_9_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(160163712)))]; + tensor linear_57_cast = linear(bias = text_encoder_text_model_encoder_layers_9_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_9_self_attn_out_proj_weight_to_fp16_palettized, x = input_153_cast)[name = tensor("linear_57_cast")]; + tensor input_155_cast = add(x = input_147_cast, y = linear_57_cast)[name = tensor("input_155_cast")]; + tensor input_157_axes_0 = const()[name = tensor("input_157_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_9_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_9_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(160165824)))]; + tensor text_encoder_text_model_encoder_layers_9_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_9_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(160167936)))]; + tensor input_157_cast = layer_norm(axes = input_157_axes_0, beta = text_encoder_text_model_encoder_layers_9_layer_norm2_bias_to_fp16, epsilon = var_15_to_fp16, gamma = text_encoder_text_model_encoder_layers_9_layer_norm2_weight_to_fp16, x = input_155_cast)[name = tensor("input_157_cast")]; + tensor text_encoder_text_model_encoder_layers_9_mlp_fc1_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(160170048))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(162267264))), name = tensor("text_encoder_text_model_encoder_layers_9_mlp_fc1_weight_to_fp16_palettized"), shape = tensor([4096, 1024])]; + tensor text_encoder_text_model_encoder_layers_9_mlp_fc1_bias_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(162267392))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(162269504))), name = tensor("text_encoder_text_model_encoder_layers_9_mlp_fc1_bias_to_fp16_palettized"), shape = tensor([4096])]; + tensor linear_58_cast = linear(bias = text_encoder_text_model_encoder_layers_9_mlp_fc1_bias_to_fp16_palettized, weight = text_encoder_text_model_encoder_layers_9_mlp_fc1_weight_to_fp16_palettized, x = input_157_cast)[name = tensor("linear_58_cast")]; + tensor input_161_mode_0 = const()[name = tensor("input_161_mode_0"), val = tensor("EXACT")]; + tensor input_161_cast = gelu(mode = input_161_mode_0, x = linear_58_cast)[name = tensor("input_161_cast")]; + tensor text_encoder_text_model_encoder_layers_9_mlp_fc2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(162269632))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(164366848))), name = tensor("text_encoder_text_model_encoder_layers_9_mlp_fc2_weight_to_fp16_palettized"), shape = tensor([1024, 4096])]; + tensor text_encoder_text_model_encoder_layers_9_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_9_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(164366976)))]; + tensor linear_59_cast = linear(bias = text_encoder_text_model_encoder_layers_9_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_9_mlp_fc2_weight_to_fp16_palettized, x = input_161_cast)[name = tensor("linear_59_cast")]; + tensor input_163_cast = add(x = input_155_cast, y = linear_59_cast)[name = tensor("input_163_cast")]; + tensor hidden_states_61_axes_0 = const()[name = tensor("hidden_states_61_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_10_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_10_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(164369088)))]; + tensor text_encoder_text_model_encoder_layers_10_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_10_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(164371200)))]; + tensor hidden_states_61_cast = layer_norm(axes = hidden_states_61_axes_0, beta = text_encoder_text_model_encoder_layers_10_layer_norm1_bias_to_fp16, epsilon = var_15_to_fp16, gamma = text_encoder_text_model_encoder_layers_10_layer_norm1_weight_to_fp16, x = input_163_cast)[name = tensor("hidden_states_61_cast")]; + tensor text_encoder_text_model_encoder_layers_10_self_attn_q_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(164373312))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(164897664))), name = tensor("text_encoder_text_model_encoder_layers_10_self_attn_q_proj_weight_to_fp16_palettized"), shape = tensor([1024, 1024])]; + tensor text_encoder_text_model_encoder_layers_10_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_10_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(164897792)))]; + tensor linear_60_cast = linear(bias = text_encoder_text_model_encoder_layers_10_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_10_self_attn_q_proj_weight_to_fp16_palettized, x = hidden_states_61_cast)[name = tensor("linear_60_cast")]; + tensor var_1000_to_fp16 = const()[name = tensor("op_1000_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_65_cast = mul(x = linear_60_cast, y = var_1000_to_fp16)[name = tensor("tensor_65_cast")]; + tensor text_encoder_text_model_encoder_layers_10_self_attn_k_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(164899904))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(165424256))), name = tensor("text_encoder_text_model_encoder_layers_10_self_attn_k_proj_weight_to_fp16_palettized"), shape = tensor([1024, 1024])]; + tensor text_encoder_text_model_encoder_layers_10_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_10_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(165424384)))]; + tensor linear_61_cast = linear(bias = text_encoder_text_model_encoder_layers_10_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_10_self_attn_k_proj_weight_to_fp16_palettized, x = hidden_states_61_cast)[name = tensor("linear_61_cast")]; + tensor var_1005 = const()[name = tensor("op_1005"), val = tensor([1, -1, 16, 64])]; + tensor var_1006_cast = reshape(shape = var_1005, x = linear_61_cast)[name = tensor("op_1006_cast")]; + tensor var_1007_perm_0 = const()[name = tensor("op_1007_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_10_self_attn_v_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(165426496))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(165950848))), name = tensor("text_encoder_text_model_encoder_layers_10_self_attn_v_proj_weight_to_fp16_palettized"), shape = tensor([1024, 1024])]; + tensor text_encoder_text_model_encoder_layers_10_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_10_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(165950976)))]; + tensor linear_62_cast = linear(bias = text_encoder_text_model_encoder_layers_10_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_10_self_attn_v_proj_weight_to_fp16_palettized, x = hidden_states_61_cast)[name = tensor("linear_62_cast")]; + tensor var_1012 = const()[name = tensor("op_1012"), val = tensor([1, -1, 16, 64])]; + tensor var_1013_cast = reshape(shape = var_1012, x = linear_62_cast)[name = tensor("op_1013_cast")]; + tensor var_1014_perm_0 = const()[name = tensor("op_1014_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1021 = const()[name = tensor("op_1021"), val = tensor([1, 77, 16, 64])]; + tensor var_1022_cast = reshape(shape = var_1021, x = tensor_65_cast)[name = tensor("op_1022_cast")]; + tensor var_1023_perm_0 = const()[name = tensor("op_1023_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1025 = const()[name = tensor("op_1025"), val = tensor([16, -1, 64])]; + tensor transpose_64 = transpose(perm = var_1023_perm_0, x = var_1022_cast)[name = tensor("transpose_64")]; + tensor query_states_21_cast = reshape(shape = var_1025, x = transpose_64)[name = tensor("query_states_21_cast")]; + tensor var_1027 = const()[name = tensor("op_1027"), val = tensor([16, -1, 64])]; + tensor transpose_63 = transpose(perm = var_1007_perm_0, x = var_1006_cast)[name = tensor("transpose_63")]; + tensor key_states_43_cast = reshape(shape = var_1027, x = transpose_63)[name = tensor("key_states_43_cast")]; + tensor var_1029 = const()[name = tensor("op_1029"), val = tensor([16, -1, 64])]; + tensor transpose_62 = transpose(perm = var_1014_perm_0, x = var_1013_cast)[name = tensor("transpose_62")]; + tensor value_states_43_cast = reshape(shape = var_1029, x = transpose_62)[name = tensor("value_states_43_cast")]; + tensor var_1032_perm_0 = const()[name = tensor("op_1032_perm_0"), val = tensor([0, 2, 1])]; + tensor attn_weights_61_transpose_x_0 = const()[name = tensor("attn_weights_61_transpose_x_0"), val = tensor(false)]; + tensor attn_weights_61_transpose_y_0 = const()[name = tensor("attn_weights_61_transpose_y_0"), val = tensor(false)]; + tensor transpose_61 = transpose(perm = var_1032_perm_0, x = key_states_43_cast)[name = tensor("transpose_61")]; + tensor attn_weights_61_cast = matmul(transpose_x = attn_weights_61_transpose_x_0, transpose_y = attn_weights_61_transpose_y_0, x = query_states_21_cast, y = transpose_61)[name = tensor("attn_weights_61_cast")]; + tensor var_1034 = const()[name = tensor("op_1034"), val = tensor([1, 16, 77, 77])]; + tensor var_1035_cast = reshape(shape = var_1034, x = attn_weights_61_cast)[name = tensor("op_1035_cast")]; + tensor attn_weights_63_cast = add(x = var_1035_cast, y = op_57_to_fp16_palettized)[name = tensor("attn_weights_63_cast")]; + tensor var_1040 = const()[name = tensor("op_1040"), val = tensor([16, 77, 77])]; + tensor input_165_cast = reshape(shape = var_1040, x = attn_weights_63_cast)[name = tensor("input_165_cast")]; + tensor input_167_cast = softmax(axis = var_5, x = input_165_cast)[name = tensor("input_167_cast")]; + tensor attn_output_61_transpose_x_0 = const()[name = tensor("attn_output_61_transpose_x_0"), val = tensor(false)]; + tensor attn_output_61_transpose_y_0 = const()[name = tensor("attn_output_61_transpose_y_0"), val = tensor(false)]; + tensor attn_output_61_cast = matmul(transpose_x = attn_output_61_transpose_x_0, transpose_y = attn_output_61_transpose_y_0, x = input_167_cast, y = value_states_43_cast)[name = tensor("attn_output_61_cast")]; + tensor var_1045 = const()[name = tensor("op_1045"), val = tensor([1, 16, 77, 64])]; + tensor attn_output_63_cast = reshape(shape = var_1045, x = attn_output_61_cast)[name = tensor("attn_output_63_cast")]; + tensor attn_output_65_perm_0 = const()[name = tensor("attn_output_65_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1048 = const()[name = tensor("op_1048"), val = tensor([1, 77, 1024])]; + tensor transpose_60 = transpose(perm = attn_output_65_perm_0, x = attn_output_63_cast)[name = tensor("transpose_60")]; + tensor input_169_cast = reshape(shape = var_1048, x = transpose_60)[name = tensor("input_169_cast")]; + tensor text_encoder_text_model_encoder_layers_10_self_attn_out_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(165953088))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(166477440))), name = tensor("text_encoder_text_model_encoder_layers_10_self_attn_out_proj_weight_to_fp16_palettized"), shape = tensor([1024, 1024])]; + tensor text_encoder_text_model_encoder_layers_10_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_10_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(166477568)))]; + tensor linear_63_cast = linear(bias = text_encoder_text_model_encoder_layers_10_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_10_self_attn_out_proj_weight_to_fp16_palettized, x = input_169_cast)[name = tensor("linear_63_cast")]; + tensor input_171_cast = add(x = input_163_cast, y = linear_63_cast)[name = tensor("input_171_cast")]; + tensor input_173_axes_0 = const()[name = tensor("input_173_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_10_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_10_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(166479680)))]; + tensor text_encoder_text_model_encoder_layers_10_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_10_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(166481792)))]; + tensor input_173_cast = layer_norm(axes = input_173_axes_0, beta = text_encoder_text_model_encoder_layers_10_layer_norm2_bias_to_fp16, epsilon = var_15_to_fp16, gamma = text_encoder_text_model_encoder_layers_10_layer_norm2_weight_to_fp16, x = input_171_cast)[name = tensor("input_173_cast")]; + tensor text_encoder_text_model_encoder_layers_10_mlp_fc1_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(166483904))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(168581120))), name = tensor("text_encoder_text_model_encoder_layers_10_mlp_fc1_weight_to_fp16_palettized"), shape = tensor([4096, 1024])]; + tensor text_encoder_text_model_encoder_layers_10_mlp_fc1_bias_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(168581248))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(168583360))), name = tensor("text_encoder_text_model_encoder_layers_10_mlp_fc1_bias_to_fp16_palettized"), shape = tensor([4096])]; + tensor linear_64_cast = linear(bias = text_encoder_text_model_encoder_layers_10_mlp_fc1_bias_to_fp16_palettized, weight = text_encoder_text_model_encoder_layers_10_mlp_fc1_weight_to_fp16_palettized, x = input_173_cast)[name = tensor("linear_64_cast")]; + tensor input_177_mode_0 = const()[name = tensor("input_177_mode_0"), val = tensor("EXACT")]; + tensor input_177_cast = gelu(mode = input_177_mode_0, x = linear_64_cast)[name = tensor("input_177_cast")]; + tensor text_encoder_text_model_encoder_layers_10_mlp_fc2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(168583488))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(170680704))), name = tensor("text_encoder_text_model_encoder_layers_10_mlp_fc2_weight_to_fp16_palettized"), shape = tensor([1024, 4096])]; + tensor text_encoder_text_model_encoder_layers_10_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_10_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(170680832)))]; + tensor linear_65_cast = linear(bias = text_encoder_text_model_encoder_layers_10_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_10_mlp_fc2_weight_to_fp16_palettized, x = input_177_cast)[name = tensor("linear_65_cast")]; + tensor input_179_cast = add(x = input_171_cast, y = linear_65_cast)[name = tensor("input_179_cast")]; + tensor hidden_states_67_axes_0 = const()[name = tensor("hidden_states_67_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_11_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_11_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(170682944)))]; + tensor text_encoder_text_model_encoder_layers_11_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_11_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(170685056)))]; + tensor hidden_states_67_cast = layer_norm(axes = hidden_states_67_axes_0, beta = text_encoder_text_model_encoder_layers_11_layer_norm1_bias_to_fp16, epsilon = var_15_to_fp16, gamma = text_encoder_text_model_encoder_layers_11_layer_norm1_weight_to_fp16, x = input_179_cast)[name = tensor("hidden_states_67_cast")]; + tensor text_encoder_text_model_encoder_layers_11_self_attn_q_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(170687168))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(171211520))), name = tensor("text_encoder_text_model_encoder_layers_11_self_attn_q_proj_weight_to_fp16_palettized"), shape = tensor([1024, 1024])]; + tensor text_encoder_text_model_encoder_layers_11_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_11_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(171211648)))]; + tensor linear_66_cast = linear(bias = text_encoder_text_model_encoder_layers_11_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_11_self_attn_q_proj_weight_to_fp16_palettized, x = hidden_states_67_cast)[name = tensor("linear_66_cast")]; + tensor var_1087_to_fp16 = const()[name = tensor("op_1087_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_71_cast = mul(x = linear_66_cast, y = var_1087_to_fp16)[name = tensor("tensor_71_cast")]; + tensor text_encoder_text_model_encoder_layers_11_self_attn_k_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(171213760))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(171738112))), name = tensor("text_encoder_text_model_encoder_layers_11_self_attn_k_proj_weight_to_fp16_palettized"), shape = tensor([1024, 1024])]; + tensor text_encoder_text_model_encoder_layers_11_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_11_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(171738240)))]; + tensor linear_67_cast = linear(bias = text_encoder_text_model_encoder_layers_11_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_11_self_attn_k_proj_weight_to_fp16_palettized, x = hidden_states_67_cast)[name = tensor("linear_67_cast")]; + tensor var_1092 = const()[name = tensor("op_1092"), val = tensor([1, -1, 16, 64])]; + tensor var_1093_cast = reshape(shape = var_1092, x = linear_67_cast)[name = tensor("op_1093_cast")]; + tensor var_1094_perm_0 = const()[name = tensor("op_1094_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_11_self_attn_v_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(171740352))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(172264704))), name = tensor("text_encoder_text_model_encoder_layers_11_self_attn_v_proj_weight_to_fp16_palettized"), shape = tensor([1024, 1024])]; + tensor text_encoder_text_model_encoder_layers_11_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_11_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(172264832)))]; + tensor linear_68_cast = linear(bias = text_encoder_text_model_encoder_layers_11_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_11_self_attn_v_proj_weight_to_fp16_palettized, x = hidden_states_67_cast)[name = tensor("linear_68_cast")]; + tensor var_1099 = const()[name = tensor("op_1099"), val = tensor([1, -1, 16, 64])]; + tensor var_1100_cast = reshape(shape = var_1099, x = linear_68_cast)[name = tensor("op_1100_cast")]; + tensor var_1101_perm_0 = const()[name = tensor("op_1101_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1108 = const()[name = tensor("op_1108"), val = tensor([1, 77, 16, 64])]; + tensor var_1109_cast = reshape(shape = var_1108, x = tensor_71_cast)[name = tensor("op_1109_cast")]; + tensor var_1110_perm_0 = const()[name = tensor("op_1110_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1112 = const()[name = tensor("op_1112"), val = tensor([16, -1, 64])]; + tensor transpose_59 = transpose(perm = var_1110_perm_0, x = var_1109_cast)[name = tensor("transpose_59")]; + tensor query_states_23_cast = reshape(shape = var_1112, x = transpose_59)[name = tensor("query_states_23_cast")]; + tensor var_1114 = const()[name = tensor("op_1114"), val = tensor([16, -1, 64])]; + tensor transpose_58 = transpose(perm = var_1094_perm_0, x = var_1093_cast)[name = tensor("transpose_58")]; + tensor key_states_47_cast = reshape(shape = var_1114, x = transpose_58)[name = tensor("key_states_47_cast")]; + tensor var_1116 = const()[name = tensor("op_1116"), val = tensor([16, -1, 64])]; + tensor transpose_57 = transpose(perm = var_1101_perm_0, x = var_1100_cast)[name = tensor("transpose_57")]; + tensor value_states_47_cast = reshape(shape = var_1116, x = transpose_57)[name = tensor("value_states_47_cast")]; + tensor var_1119_perm_0 = const()[name = tensor("op_1119_perm_0"), val = tensor([0, 2, 1])]; + tensor attn_weights_67_transpose_x_0 = const()[name = tensor("attn_weights_67_transpose_x_0"), val = tensor(false)]; + tensor attn_weights_67_transpose_y_0 = const()[name = tensor("attn_weights_67_transpose_y_0"), val = tensor(false)]; + tensor transpose_56 = transpose(perm = var_1119_perm_0, x = key_states_47_cast)[name = tensor("transpose_56")]; + tensor attn_weights_67_cast = matmul(transpose_x = attn_weights_67_transpose_x_0, transpose_y = attn_weights_67_transpose_y_0, x = query_states_23_cast, y = transpose_56)[name = tensor("attn_weights_67_cast")]; + tensor var_1121 = const()[name = tensor("op_1121"), val = tensor([1, 16, 77, 77])]; + tensor var_1122_cast = reshape(shape = var_1121, x = attn_weights_67_cast)[name = tensor("op_1122_cast")]; + tensor attn_weights_69_cast = add(x = var_1122_cast, y = op_57_to_fp16_palettized)[name = tensor("attn_weights_69_cast")]; + tensor var_1127 = const()[name = tensor("op_1127"), val = tensor([16, 77, 77])]; + tensor input_181_cast = reshape(shape = var_1127, x = attn_weights_69_cast)[name = tensor("input_181_cast")]; + tensor input_183_cast = softmax(axis = var_5, x = input_181_cast)[name = tensor("input_183_cast")]; + tensor attn_output_67_transpose_x_0 = const()[name = tensor("attn_output_67_transpose_x_0"), val = tensor(false)]; + tensor attn_output_67_transpose_y_0 = const()[name = tensor("attn_output_67_transpose_y_0"), val = tensor(false)]; + tensor attn_output_67_cast = matmul(transpose_x = attn_output_67_transpose_x_0, transpose_y = attn_output_67_transpose_y_0, x = input_183_cast, y = value_states_47_cast)[name = tensor("attn_output_67_cast")]; + tensor var_1132 = const()[name = tensor("op_1132"), val = tensor([1, 16, 77, 64])]; + tensor attn_output_69_cast = reshape(shape = var_1132, x = attn_output_67_cast)[name = tensor("attn_output_69_cast")]; + tensor attn_output_71_perm_0 = const()[name = tensor("attn_output_71_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1135 = const()[name = tensor("op_1135"), val = tensor([1, 77, 1024])]; + tensor transpose_55 = transpose(perm = attn_output_71_perm_0, x = attn_output_69_cast)[name = tensor("transpose_55")]; + tensor input_185_cast = reshape(shape = var_1135, x = transpose_55)[name = tensor("input_185_cast")]; + tensor text_encoder_text_model_encoder_layers_11_self_attn_out_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(172266944))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(172791296))), name = tensor("text_encoder_text_model_encoder_layers_11_self_attn_out_proj_weight_to_fp16_palettized"), shape = tensor([1024, 1024])]; + tensor text_encoder_text_model_encoder_layers_11_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_11_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(172791424)))]; + tensor linear_69_cast = linear(bias = text_encoder_text_model_encoder_layers_11_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_11_self_attn_out_proj_weight_to_fp16_palettized, x = input_185_cast)[name = tensor("linear_69_cast")]; + tensor input_187_cast = add(x = input_179_cast, y = linear_69_cast)[name = tensor("input_187_cast")]; + tensor input_189_axes_0 = const()[name = tensor("input_189_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_11_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_11_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(172793536)))]; + tensor text_encoder_text_model_encoder_layers_11_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_11_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(172795648)))]; + tensor input_189_cast = layer_norm(axes = input_189_axes_0, beta = text_encoder_text_model_encoder_layers_11_layer_norm2_bias_to_fp16, epsilon = var_15_to_fp16, gamma = text_encoder_text_model_encoder_layers_11_layer_norm2_weight_to_fp16, x = input_187_cast)[name = tensor("input_189_cast")]; + tensor text_encoder_text_model_encoder_layers_11_mlp_fc1_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(172797760))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(174894976))), name = tensor("text_encoder_text_model_encoder_layers_11_mlp_fc1_weight_to_fp16_palettized"), shape = tensor([4096, 1024])]; + tensor text_encoder_text_model_encoder_layers_11_mlp_fc1_bias_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(174895104))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(174897216))), name = tensor("text_encoder_text_model_encoder_layers_11_mlp_fc1_bias_to_fp16_palettized"), shape = tensor([4096])]; + tensor linear_70_cast = linear(bias = text_encoder_text_model_encoder_layers_11_mlp_fc1_bias_to_fp16_palettized, weight = text_encoder_text_model_encoder_layers_11_mlp_fc1_weight_to_fp16_palettized, x = input_189_cast)[name = tensor("linear_70_cast")]; + tensor input_193_mode_0 = const()[name = tensor("input_193_mode_0"), val = tensor("EXACT")]; + tensor input_193_cast = gelu(mode = input_193_mode_0, x = linear_70_cast)[name = tensor("input_193_cast")]; + tensor text_encoder_text_model_encoder_layers_11_mlp_fc2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(174897344))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(176994560))), name = tensor("text_encoder_text_model_encoder_layers_11_mlp_fc2_weight_to_fp16_palettized"), shape = tensor([1024, 4096])]; + tensor text_encoder_text_model_encoder_layers_11_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_11_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(176994688)))]; + tensor linear_71_cast = linear(bias = text_encoder_text_model_encoder_layers_11_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_11_mlp_fc2_weight_to_fp16_palettized, x = input_193_cast)[name = tensor("linear_71_cast")]; + tensor input_195_cast = add(x = input_187_cast, y = linear_71_cast)[name = tensor("input_195_cast")]; + tensor hidden_states_73_axes_0 = const()[name = tensor("hidden_states_73_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_12_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_12_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(176996800)))]; + tensor text_encoder_text_model_encoder_layers_12_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_12_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(176998912)))]; + tensor hidden_states_73_cast = layer_norm(axes = hidden_states_73_axes_0, beta = text_encoder_text_model_encoder_layers_12_layer_norm1_bias_to_fp16, epsilon = var_15_to_fp16, gamma = text_encoder_text_model_encoder_layers_12_layer_norm1_weight_to_fp16, x = input_195_cast)[name = tensor("hidden_states_73_cast")]; + tensor text_encoder_text_model_encoder_layers_12_self_attn_q_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(177001024))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(177525376))), name = tensor("text_encoder_text_model_encoder_layers_12_self_attn_q_proj_weight_to_fp16_palettized"), shape = tensor([1024, 1024])]; + tensor text_encoder_text_model_encoder_layers_12_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_12_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(177525504)))]; + tensor linear_72_cast = linear(bias = text_encoder_text_model_encoder_layers_12_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_12_self_attn_q_proj_weight_to_fp16_palettized, x = hidden_states_73_cast)[name = tensor("linear_72_cast")]; + tensor var_1174_to_fp16 = const()[name = tensor("op_1174_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_77_cast = mul(x = linear_72_cast, y = var_1174_to_fp16)[name = tensor("tensor_77_cast")]; + tensor text_encoder_text_model_encoder_layers_12_self_attn_k_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(177527616))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(178051968))), name = tensor("text_encoder_text_model_encoder_layers_12_self_attn_k_proj_weight_to_fp16_palettized"), shape = tensor([1024, 1024])]; + tensor text_encoder_text_model_encoder_layers_12_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_12_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(178052096)))]; + tensor linear_73_cast = linear(bias = text_encoder_text_model_encoder_layers_12_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_12_self_attn_k_proj_weight_to_fp16_palettized, x = hidden_states_73_cast)[name = tensor("linear_73_cast")]; + tensor var_1179 = const()[name = tensor("op_1179"), val = tensor([1, -1, 16, 64])]; + tensor var_1180_cast = reshape(shape = var_1179, x = linear_73_cast)[name = tensor("op_1180_cast")]; + tensor var_1181_perm_0 = const()[name = tensor("op_1181_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_12_self_attn_v_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(178054208))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(178578560))), name = tensor("text_encoder_text_model_encoder_layers_12_self_attn_v_proj_weight_to_fp16_palettized"), shape = tensor([1024, 1024])]; + tensor text_encoder_text_model_encoder_layers_12_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_12_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(178578688)))]; + tensor linear_74_cast = linear(bias = text_encoder_text_model_encoder_layers_12_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_12_self_attn_v_proj_weight_to_fp16_palettized, x = hidden_states_73_cast)[name = tensor("linear_74_cast")]; + tensor var_1186 = const()[name = tensor("op_1186"), val = tensor([1, -1, 16, 64])]; + tensor var_1187_cast = reshape(shape = var_1186, x = linear_74_cast)[name = tensor("op_1187_cast")]; + tensor var_1188_perm_0 = const()[name = tensor("op_1188_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1195 = const()[name = tensor("op_1195"), val = tensor([1, 77, 16, 64])]; + tensor var_1196_cast = reshape(shape = var_1195, x = tensor_77_cast)[name = tensor("op_1196_cast")]; + tensor var_1197_perm_0 = const()[name = tensor("op_1197_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1199 = const()[name = tensor("op_1199"), val = tensor([16, -1, 64])]; + tensor transpose_54 = transpose(perm = var_1197_perm_0, x = var_1196_cast)[name = tensor("transpose_54")]; + tensor query_states_25_cast = reshape(shape = var_1199, x = transpose_54)[name = tensor("query_states_25_cast")]; + tensor var_1201 = const()[name = tensor("op_1201"), val = tensor([16, -1, 64])]; + tensor transpose_53 = transpose(perm = var_1181_perm_0, x = var_1180_cast)[name = tensor("transpose_53")]; + tensor key_states_51_cast = reshape(shape = var_1201, x = transpose_53)[name = tensor("key_states_51_cast")]; + tensor var_1203 = const()[name = tensor("op_1203"), val = tensor([16, -1, 64])]; + tensor transpose_52 = transpose(perm = var_1188_perm_0, x = var_1187_cast)[name = tensor("transpose_52")]; + tensor value_states_51_cast = reshape(shape = var_1203, x = transpose_52)[name = tensor("value_states_51_cast")]; + tensor var_1206_perm_0 = const()[name = tensor("op_1206_perm_0"), val = tensor([0, 2, 1])]; + tensor attn_weights_73_transpose_x_0 = const()[name = tensor("attn_weights_73_transpose_x_0"), val = tensor(false)]; + tensor attn_weights_73_transpose_y_0 = const()[name = tensor("attn_weights_73_transpose_y_0"), val = tensor(false)]; + tensor transpose_51 = transpose(perm = var_1206_perm_0, x = key_states_51_cast)[name = tensor("transpose_51")]; + tensor attn_weights_73_cast = matmul(transpose_x = attn_weights_73_transpose_x_0, transpose_y = attn_weights_73_transpose_y_0, x = query_states_25_cast, y = transpose_51)[name = tensor("attn_weights_73_cast")]; + tensor var_1208 = const()[name = tensor("op_1208"), val = tensor([1, 16, 77, 77])]; + tensor var_1209_cast = reshape(shape = var_1208, x = attn_weights_73_cast)[name = tensor("op_1209_cast")]; + tensor attn_weights_75_cast = add(x = var_1209_cast, y = op_57_to_fp16_palettized)[name = tensor("attn_weights_75_cast")]; + tensor var_1214 = const()[name = tensor("op_1214"), val = tensor([16, 77, 77])]; + tensor input_197_cast = reshape(shape = var_1214, x = attn_weights_75_cast)[name = tensor("input_197_cast")]; + tensor input_199_cast = softmax(axis = var_5, x = input_197_cast)[name = tensor("input_199_cast")]; + tensor attn_output_73_transpose_x_0 = const()[name = tensor("attn_output_73_transpose_x_0"), val = tensor(false)]; + tensor attn_output_73_transpose_y_0 = const()[name = tensor("attn_output_73_transpose_y_0"), val = tensor(false)]; + tensor attn_output_73_cast = matmul(transpose_x = attn_output_73_transpose_x_0, transpose_y = attn_output_73_transpose_y_0, x = input_199_cast, y = value_states_51_cast)[name = tensor("attn_output_73_cast")]; + tensor var_1219 = const()[name = tensor("op_1219"), val = tensor([1, 16, 77, 64])]; + tensor attn_output_75_cast = reshape(shape = var_1219, x = attn_output_73_cast)[name = tensor("attn_output_75_cast")]; + tensor attn_output_77_perm_0 = const()[name = tensor("attn_output_77_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1222 = const()[name = tensor("op_1222"), val = tensor([1, 77, 1024])]; + tensor transpose_50 = transpose(perm = attn_output_77_perm_0, x = attn_output_75_cast)[name = tensor("transpose_50")]; + tensor input_201_cast = reshape(shape = var_1222, x = transpose_50)[name = tensor("input_201_cast")]; + tensor text_encoder_text_model_encoder_layers_12_self_attn_out_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(178580800))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(179105152))), name = tensor("text_encoder_text_model_encoder_layers_12_self_attn_out_proj_weight_to_fp16_palettized"), shape = tensor([1024, 1024])]; + tensor text_encoder_text_model_encoder_layers_12_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_12_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(179105280)))]; + tensor linear_75_cast = linear(bias = text_encoder_text_model_encoder_layers_12_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_12_self_attn_out_proj_weight_to_fp16_palettized, x = input_201_cast)[name = tensor("linear_75_cast")]; + tensor input_203_cast = add(x = input_195_cast, y = linear_75_cast)[name = tensor("input_203_cast")]; + tensor input_205_axes_0 = const()[name = tensor("input_205_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_12_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_12_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(179107392)))]; + tensor text_encoder_text_model_encoder_layers_12_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_12_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(179109504)))]; + tensor input_205_cast = layer_norm(axes = input_205_axes_0, beta = text_encoder_text_model_encoder_layers_12_layer_norm2_bias_to_fp16, epsilon = var_15_to_fp16, gamma = text_encoder_text_model_encoder_layers_12_layer_norm2_weight_to_fp16, x = input_203_cast)[name = tensor("input_205_cast")]; + tensor text_encoder_text_model_encoder_layers_12_mlp_fc1_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(179111616))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(181208832))), name = tensor("text_encoder_text_model_encoder_layers_12_mlp_fc1_weight_to_fp16_palettized"), shape = tensor([4096, 1024])]; + tensor text_encoder_text_model_encoder_layers_12_mlp_fc1_bias_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(181208960))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(181211072))), name = tensor("text_encoder_text_model_encoder_layers_12_mlp_fc1_bias_to_fp16_palettized"), shape = tensor([4096])]; + tensor linear_76_cast = linear(bias = text_encoder_text_model_encoder_layers_12_mlp_fc1_bias_to_fp16_palettized, weight = text_encoder_text_model_encoder_layers_12_mlp_fc1_weight_to_fp16_palettized, x = input_205_cast)[name = tensor("linear_76_cast")]; + tensor input_209_mode_0 = const()[name = tensor("input_209_mode_0"), val = tensor("EXACT")]; + tensor input_209_cast = gelu(mode = input_209_mode_0, x = linear_76_cast)[name = tensor("input_209_cast")]; + tensor text_encoder_text_model_encoder_layers_12_mlp_fc2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(181211200))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(183308416))), name = tensor("text_encoder_text_model_encoder_layers_12_mlp_fc2_weight_to_fp16_palettized"), shape = tensor([1024, 4096])]; + tensor text_encoder_text_model_encoder_layers_12_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_12_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(183308544)))]; + tensor linear_77_cast = linear(bias = text_encoder_text_model_encoder_layers_12_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_12_mlp_fc2_weight_to_fp16_palettized, x = input_209_cast)[name = tensor("linear_77_cast")]; + tensor input_211_cast = add(x = input_203_cast, y = linear_77_cast)[name = tensor("input_211_cast")]; + tensor hidden_states_79_axes_0 = const()[name = tensor("hidden_states_79_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_13_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_13_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(183310656)))]; + tensor text_encoder_text_model_encoder_layers_13_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_13_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(183312768)))]; + tensor hidden_states_79_cast = layer_norm(axes = hidden_states_79_axes_0, beta = text_encoder_text_model_encoder_layers_13_layer_norm1_bias_to_fp16, epsilon = var_15_to_fp16, gamma = text_encoder_text_model_encoder_layers_13_layer_norm1_weight_to_fp16, x = input_211_cast)[name = tensor("hidden_states_79_cast")]; + tensor text_encoder_text_model_encoder_layers_13_self_attn_q_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(183314880))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(183839232))), name = tensor("text_encoder_text_model_encoder_layers_13_self_attn_q_proj_weight_to_fp16_palettized"), shape = tensor([1024, 1024])]; + tensor text_encoder_text_model_encoder_layers_13_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_13_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(183839360)))]; + tensor linear_78_cast = linear(bias = text_encoder_text_model_encoder_layers_13_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_13_self_attn_q_proj_weight_to_fp16_palettized, x = hidden_states_79_cast)[name = tensor("linear_78_cast")]; + tensor var_1261_to_fp16 = const()[name = tensor("op_1261_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_83_cast = mul(x = linear_78_cast, y = var_1261_to_fp16)[name = tensor("tensor_83_cast")]; + tensor text_encoder_text_model_encoder_layers_13_self_attn_k_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(183841472))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(184365824))), name = tensor("text_encoder_text_model_encoder_layers_13_self_attn_k_proj_weight_to_fp16_palettized"), shape = tensor([1024, 1024])]; + tensor text_encoder_text_model_encoder_layers_13_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_13_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(184365952)))]; + tensor linear_79_cast = linear(bias = text_encoder_text_model_encoder_layers_13_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_13_self_attn_k_proj_weight_to_fp16_palettized, x = hidden_states_79_cast)[name = tensor("linear_79_cast")]; + tensor var_1266 = const()[name = tensor("op_1266"), val = tensor([1, -1, 16, 64])]; + tensor var_1267_cast = reshape(shape = var_1266, x = linear_79_cast)[name = tensor("op_1267_cast")]; + tensor var_1268_perm_0 = const()[name = tensor("op_1268_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_13_self_attn_v_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(184368064))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(184892416))), name = tensor("text_encoder_text_model_encoder_layers_13_self_attn_v_proj_weight_to_fp16_palettized"), shape = tensor([1024, 1024])]; + tensor text_encoder_text_model_encoder_layers_13_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_13_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(184892544)))]; + tensor linear_80_cast = linear(bias = text_encoder_text_model_encoder_layers_13_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_13_self_attn_v_proj_weight_to_fp16_palettized, x = hidden_states_79_cast)[name = tensor("linear_80_cast")]; + tensor var_1273 = const()[name = tensor("op_1273"), val = tensor([1, -1, 16, 64])]; + tensor var_1274_cast = reshape(shape = var_1273, x = linear_80_cast)[name = tensor("op_1274_cast")]; + tensor var_1275_perm_0 = const()[name = tensor("op_1275_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1282 = const()[name = tensor("op_1282"), val = tensor([1, 77, 16, 64])]; + tensor var_1283_cast = reshape(shape = var_1282, x = tensor_83_cast)[name = tensor("op_1283_cast")]; + tensor var_1284_perm_0 = const()[name = tensor("op_1284_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1286 = const()[name = tensor("op_1286"), val = tensor([16, -1, 64])]; + tensor transpose_49 = transpose(perm = var_1284_perm_0, x = var_1283_cast)[name = tensor("transpose_49")]; + tensor query_states_27_cast = reshape(shape = var_1286, x = transpose_49)[name = tensor("query_states_27_cast")]; + tensor var_1288 = const()[name = tensor("op_1288"), val = tensor([16, -1, 64])]; + tensor transpose_48 = transpose(perm = var_1268_perm_0, x = var_1267_cast)[name = tensor("transpose_48")]; + tensor key_states_55_cast = reshape(shape = var_1288, x = transpose_48)[name = tensor("key_states_55_cast")]; + tensor var_1290 = const()[name = tensor("op_1290"), val = tensor([16, -1, 64])]; + tensor transpose_47 = transpose(perm = var_1275_perm_0, x = var_1274_cast)[name = tensor("transpose_47")]; + tensor value_states_55_cast = reshape(shape = var_1290, x = transpose_47)[name = tensor("value_states_55_cast")]; + tensor var_1293_perm_0 = const()[name = tensor("op_1293_perm_0"), val = tensor([0, 2, 1])]; + tensor attn_weights_79_transpose_x_0 = const()[name = tensor("attn_weights_79_transpose_x_0"), val = tensor(false)]; + tensor attn_weights_79_transpose_y_0 = const()[name = tensor("attn_weights_79_transpose_y_0"), val = tensor(false)]; + tensor transpose_46 = transpose(perm = var_1293_perm_0, x = key_states_55_cast)[name = tensor("transpose_46")]; + tensor attn_weights_79_cast = matmul(transpose_x = attn_weights_79_transpose_x_0, transpose_y = attn_weights_79_transpose_y_0, x = query_states_27_cast, y = transpose_46)[name = tensor("attn_weights_79_cast")]; + tensor var_1295 = const()[name = tensor("op_1295"), val = tensor([1, 16, 77, 77])]; + tensor var_1296_cast = reshape(shape = var_1295, x = attn_weights_79_cast)[name = tensor("op_1296_cast")]; + tensor attn_weights_81_cast = add(x = var_1296_cast, y = op_57_to_fp16_palettized)[name = tensor("attn_weights_81_cast")]; + tensor var_1301 = const()[name = tensor("op_1301"), val = tensor([16, 77, 77])]; + tensor input_213_cast = reshape(shape = var_1301, x = attn_weights_81_cast)[name = tensor("input_213_cast")]; + tensor input_215_cast = softmax(axis = var_5, x = input_213_cast)[name = tensor("input_215_cast")]; + tensor attn_output_79_transpose_x_0 = const()[name = tensor("attn_output_79_transpose_x_0"), val = tensor(false)]; + tensor attn_output_79_transpose_y_0 = const()[name = tensor("attn_output_79_transpose_y_0"), val = tensor(false)]; + tensor attn_output_79_cast = matmul(transpose_x = attn_output_79_transpose_x_0, transpose_y = attn_output_79_transpose_y_0, x = input_215_cast, y = value_states_55_cast)[name = tensor("attn_output_79_cast")]; + tensor var_1306 = const()[name = tensor("op_1306"), val = tensor([1, 16, 77, 64])]; + tensor attn_output_81_cast = reshape(shape = var_1306, x = attn_output_79_cast)[name = tensor("attn_output_81_cast")]; + tensor attn_output_83_perm_0 = const()[name = tensor("attn_output_83_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1309 = const()[name = tensor("op_1309"), val = tensor([1, 77, 1024])]; + tensor transpose_45 = transpose(perm = attn_output_83_perm_0, x = attn_output_81_cast)[name = tensor("transpose_45")]; + tensor input_217_cast = reshape(shape = var_1309, x = transpose_45)[name = tensor("input_217_cast")]; + tensor text_encoder_text_model_encoder_layers_13_self_attn_out_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(184894656))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(185419008))), name = tensor("text_encoder_text_model_encoder_layers_13_self_attn_out_proj_weight_to_fp16_palettized"), shape = tensor([1024, 1024])]; + tensor text_encoder_text_model_encoder_layers_13_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_13_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(185419136)))]; + tensor linear_81_cast = linear(bias = text_encoder_text_model_encoder_layers_13_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_13_self_attn_out_proj_weight_to_fp16_palettized, x = input_217_cast)[name = tensor("linear_81_cast")]; + tensor input_219_cast = add(x = input_211_cast, y = linear_81_cast)[name = tensor("input_219_cast")]; + tensor input_221_axes_0 = const()[name = tensor("input_221_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_13_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_13_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(185421248)))]; + tensor text_encoder_text_model_encoder_layers_13_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_13_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(185423360)))]; + tensor input_221_cast = layer_norm(axes = input_221_axes_0, beta = text_encoder_text_model_encoder_layers_13_layer_norm2_bias_to_fp16, epsilon = var_15_to_fp16, gamma = text_encoder_text_model_encoder_layers_13_layer_norm2_weight_to_fp16, x = input_219_cast)[name = tensor("input_221_cast")]; + tensor text_encoder_text_model_encoder_layers_13_mlp_fc1_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(185425472))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(187522688))), name = tensor("text_encoder_text_model_encoder_layers_13_mlp_fc1_weight_to_fp16_palettized"), shape = tensor([4096, 1024])]; + tensor text_encoder_text_model_encoder_layers_13_mlp_fc1_bias_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(187522816))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(187524928))), name = tensor("text_encoder_text_model_encoder_layers_13_mlp_fc1_bias_to_fp16_palettized"), shape = tensor([4096])]; + tensor linear_82_cast = linear(bias = text_encoder_text_model_encoder_layers_13_mlp_fc1_bias_to_fp16_palettized, weight = text_encoder_text_model_encoder_layers_13_mlp_fc1_weight_to_fp16_palettized, x = input_221_cast)[name = tensor("linear_82_cast")]; + tensor input_225_mode_0 = const()[name = tensor("input_225_mode_0"), val = tensor("EXACT")]; + tensor input_225_cast = gelu(mode = input_225_mode_0, x = linear_82_cast)[name = tensor("input_225_cast")]; + tensor text_encoder_text_model_encoder_layers_13_mlp_fc2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(187525056))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(189622272))), name = tensor("text_encoder_text_model_encoder_layers_13_mlp_fc2_weight_to_fp16_palettized"), shape = tensor([1024, 4096])]; + tensor text_encoder_text_model_encoder_layers_13_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_13_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(189622400)))]; + tensor linear_83_cast = linear(bias = text_encoder_text_model_encoder_layers_13_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_13_mlp_fc2_weight_to_fp16_palettized, x = input_225_cast)[name = tensor("linear_83_cast")]; + tensor input_227_cast = add(x = input_219_cast, y = linear_83_cast)[name = tensor("input_227_cast")]; + tensor hidden_states_85_axes_0 = const()[name = tensor("hidden_states_85_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_14_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_14_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(189624512)))]; + tensor text_encoder_text_model_encoder_layers_14_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_14_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(189626624)))]; + tensor hidden_states_85_cast = layer_norm(axes = hidden_states_85_axes_0, beta = text_encoder_text_model_encoder_layers_14_layer_norm1_bias_to_fp16, epsilon = var_15_to_fp16, gamma = text_encoder_text_model_encoder_layers_14_layer_norm1_weight_to_fp16, x = input_227_cast)[name = tensor("hidden_states_85_cast")]; + tensor text_encoder_text_model_encoder_layers_14_self_attn_q_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(189628736))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(190153088))), name = tensor("text_encoder_text_model_encoder_layers_14_self_attn_q_proj_weight_to_fp16_palettized"), shape = tensor([1024, 1024])]; + tensor text_encoder_text_model_encoder_layers_14_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_14_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(190153216)))]; + tensor linear_84_cast = linear(bias = text_encoder_text_model_encoder_layers_14_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_14_self_attn_q_proj_weight_to_fp16_palettized, x = hidden_states_85_cast)[name = tensor("linear_84_cast")]; + tensor var_1348_to_fp16 = const()[name = tensor("op_1348_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_89_cast = mul(x = linear_84_cast, y = var_1348_to_fp16)[name = tensor("tensor_89_cast")]; + tensor text_encoder_text_model_encoder_layers_14_self_attn_k_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(190155328))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(190679680))), name = tensor("text_encoder_text_model_encoder_layers_14_self_attn_k_proj_weight_to_fp16_palettized"), shape = tensor([1024, 1024])]; + tensor text_encoder_text_model_encoder_layers_14_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_14_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(190679808)))]; + tensor linear_85_cast = linear(bias = text_encoder_text_model_encoder_layers_14_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_14_self_attn_k_proj_weight_to_fp16_palettized, x = hidden_states_85_cast)[name = tensor("linear_85_cast")]; + tensor var_1353 = const()[name = tensor("op_1353"), val = tensor([1, -1, 16, 64])]; + tensor var_1354_cast = reshape(shape = var_1353, x = linear_85_cast)[name = tensor("op_1354_cast")]; + tensor var_1355_perm_0 = const()[name = tensor("op_1355_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_14_self_attn_v_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(190681920))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(191206272))), name = tensor("text_encoder_text_model_encoder_layers_14_self_attn_v_proj_weight_to_fp16_palettized"), shape = tensor([1024, 1024])]; + tensor text_encoder_text_model_encoder_layers_14_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_14_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(191206400)))]; + tensor linear_86_cast = linear(bias = text_encoder_text_model_encoder_layers_14_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_14_self_attn_v_proj_weight_to_fp16_palettized, x = hidden_states_85_cast)[name = tensor("linear_86_cast")]; + tensor var_1360 = const()[name = tensor("op_1360"), val = tensor([1, -1, 16, 64])]; + tensor var_1361_cast = reshape(shape = var_1360, x = linear_86_cast)[name = tensor("op_1361_cast")]; + tensor var_1362_perm_0 = const()[name = tensor("op_1362_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1369 = const()[name = tensor("op_1369"), val = tensor([1, 77, 16, 64])]; + tensor var_1370_cast = reshape(shape = var_1369, x = tensor_89_cast)[name = tensor("op_1370_cast")]; + tensor var_1371_perm_0 = const()[name = tensor("op_1371_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1373 = const()[name = tensor("op_1373"), val = tensor([16, -1, 64])]; + tensor transpose_44 = transpose(perm = var_1371_perm_0, x = var_1370_cast)[name = tensor("transpose_44")]; + tensor query_states_29_cast = reshape(shape = var_1373, x = transpose_44)[name = tensor("query_states_29_cast")]; + tensor var_1375 = const()[name = tensor("op_1375"), val = tensor([16, -1, 64])]; + tensor transpose_43 = transpose(perm = var_1355_perm_0, x = var_1354_cast)[name = tensor("transpose_43")]; + tensor key_states_59_cast = reshape(shape = var_1375, x = transpose_43)[name = tensor("key_states_59_cast")]; + tensor var_1377 = const()[name = tensor("op_1377"), val = tensor([16, -1, 64])]; + tensor transpose_42 = transpose(perm = var_1362_perm_0, x = var_1361_cast)[name = tensor("transpose_42")]; + tensor value_states_59_cast = reshape(shape = var_1377, x = transpose_42)[name = tensor("value_states_59_cast")]; + tensor var_1380_perm_0 = const()[name = tensor("op_1380_perm_0"), val = tensor([0, 2, 1])]; + tensor attn_weights_85_transpose_x_0 = const()[name = tensor("attn_weights_85_transpose_x_0"), val = tensor(false)]; + tensor attn_weights_85_transpose_y_0 = const()[name = tensor("attn_weights_85_transpose_y_0"), val = tensor(false)]; + tensor transpose_41 = transpose(perm = var_1380_perm_0, x = key_states_59_cast)[name = tensor("transpose_41")]; + tensor attn_weights_85_cast = matmul(transpose_x = attn_weights_85_transpose_x_0, transpose_y = attn_weights_85_transpose_y_0, x = query_states_29_cast, y = transpose_41)[name = tensor("attn_weights_85_cast")]; + tensor var_1382 = const()[name = tensor("op_1382"), val = tensor([1, 16, 77, 77])]; + tensor var_1383_cast = reshape(shape = var_1382, x = attn_weights_85_cast)[name = tensor("op_1383_cast")]; + tensor attn_weights_87_cast = add(x = var_1383_cast, y = op_57_to_fp16_palettized)[name = tensor("attn_weights_87_cast")]; + tensor var_1388 = const()[name = tensor("op_1388"), val = tensor([16, 77, 77])]; + tensor input_229_cast = reshape(shape = var_1388, x = attn_weights_87_cast)[name = tensor("input_229_cast")]; + tensor input_231_cast = softmax(axis = var_5, x = input_229_cast)[name = tensor("input_231_cast")]; + tensor attn_output_85_transpose_x_0 = const()[name = tensor("attn_output_85_transpose_x_0"), val = tensor(false)]; + tensor attn_output_85_transpose_y_0 = const()[name = tensor("attn_output_85_transpose_y_0"), val = tensor(false)]; + tensor attn_output_85_cast = matmul(transpose_x = attn_output_85_transpose_x_0, transpose_y = attn_output_85_transpose_y_0, x = input_231_cast, y = value_states_59_cast)[name = tensor("attn_output_85_cast")]; + tensor var_1393 = const()[name = tensor("op_1393"), val = tensor([1, 16, 77, 64])]; + tensor attn_output_87_cast = reshape(shape = var_1393, x = attn_output_85_cast)[name = tensor("attn_output_87_cast")]; + tensor attn_output_89_perm_0 = const()[name = tensor("attn_output_89_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1396 = const()[name = tensor("op_1396"), val = tensor([1, 77, 1024])]; + tensor transpose_40 = transpose(perm = attn_output_89_perm_0, x = attn_output_87_cast)[name = tensor("transpose_40")]; + tensor input_233_cast = reshape(shape = var_1396, x = transpose_40)[name = tensor("input_233_cast")]; + tensor text_encoder_text_model_encoder_layers_14_self_attn_out_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(191208512))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(191732864))), name = tensor("text_encoder_text_model_encoder_layers_14_self_attn_out_proj_weight_to_fp16_palettized"), shape = tensor([1024, 1024])]; + tensor text_encoder_text_model_encoder_layers_14_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_14_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(191732992)))]; + tensor linear_87_cast = linear(bias = text_encoder_text_model_encoder_layers_14_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_14_self_attn_out_proj_weight_to_fp16_palettized, x = input_233_cast)[name = tensor("linear_87_cast")]; + tensor input_235_cast = add(x = input_227_cast, y = linear_87_cast)[name = tensor("input_235_cast")]; + tensor input_237_axes_0 = const()[name = tensor("input_237_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_14_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_14_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(191735104)))]; + tensor text_encoder_text_model_encoder_layers_14_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_14_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(191737216)))]; + tensor input_237_cast = layer_norm(axes = input_237_axes_0, beta = text_encoder_text_model_encoder_layers_14_layer_norm2_bias_to_fp16, epsilon = var_15_to_fp16, gamma = text_encoder_text_model_encoder_layers_14_layer_norm2_weight_to_fp16, x = input_235_cast)[name = tensor("input_237_cast")]; + tensor text_encoder_text_model_encoder_layers_14_mlp_fc1_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(191739328))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(193836544))), name = tensor("text_encoder_text_model_encoder_layers_14_mlp_fc1_weight_to_fp16_palettized"), shape = tensor([4096, 1024])]; + tensor text_encoder_text_model_encoder_layers_14_mlp_fc1_bias_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(193836672))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(193838784))), name = tensor("text_encoder_text_model_encoder_layers_14_mlp_fc1_bias_to_fp16_palettized"), shape = tensor([4096])]; + tensor linear_88_cast = linear(bias = text_encoder_text_model_encoder_layers_14_mlp_fc1_bias_to_fp16_palettized, weight = text_encoder_text_model_encoder_layers_14_mlp_fc1_weight_to_fp16_palettized, x = input_237_cast)[name = tensor("linear_88_cast")]; + tensor input_241_mode_0 = const()[name = tensor("input_241_mode_0"), val = tensor("EXACT")]; + tensor input_241_cast = gelu(mode = input_241_mode_0, x = linear_88_cast)[name = tensor("input_241_cast")]; + tensor text_encoder_text_model_encoder_layers_14_mlp_fc2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(193838912))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(195936128))), name = tensor("text_encoder_text_model_encoder_layers_14_mlp_fc2_weight_to_fp16_palettized"), shape = tensor([1024, 4096])]; + tensor text_encoder_text_model_encoder_layers_14_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_14_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(195936256)))]; + tensor linear_89_cast = linear(bias = text_encoder_text_model_encoder_layers_14_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_14_mlp_fc2_weight_to_fp16_palettized, x = input_241_cast)[name = tensor("linear_89_cast")]; + tensor input_243_cast = add(x = input_235_cast, y = linear_89_cast)[name = tensor("input_243_cast")]; + tensor hidden_states_91_axes_0 = const()[name = tensor("hidden_states_91_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_15_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_15_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(195938368)))]; + tensor text_encoder_text_model_encoder_layers_15_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_15_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(195940480)))]; + tensor hidden_states_91_cast = layer_norm(axes = hidden_states_91_axes_0, beta = text_encoder_text_model_encoder_layers_15_layer_norm1_bias_to_fp16, epsilon = var_15_to_fp16, gamma = text_encoder_text_model_encoder_layers_15_layer_norm1_weight_to_fp16, x = input_243_cast)[name = tensor("hidden_states_91_cast")]; + tensor text_encoder_text_model_encoder_layers_15_self_attn_q_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(195942592))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(196466944))), name = tensor("text_encoder_text_model_encoder_layers_15_self_attn_q_proj_weight_to_fp16_palettized"), shape = tensor([1024, 1024])]; + tensor text_encoder_text_model_encoder_layers_15_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_15_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(196467072)))]; + tensor linear_90_cast = linear(bias = text_encoder_text_model_encoder_layers_15_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_15_self_attn_q_proj_weight_to_fp16_palettized, x = hidden_states_91_cast)[name = tensor("linear_90_cast")]; + tensor var_1435_to_fp16 = const()[name = tensor("op_1435_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_95_cast = mul(x = linear_90_cast, y = var_1435_to_fp16)[name = tensor("tensor_95_cast")]; + tensor text_encoder_text_model_encoder_layers_15_self_attn_k_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(196469184))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(196993536))), name = tensor("text_encoder_text_model_encoder_layers_15_self_attn_k_proj_weight_to_fp16_palettized"), shape = tensor([1024, 1024])]; + tensor text_encoder_text_model_encoder_layers_15_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_15_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(196993664)))]; + tensor linear_91_cast = linear(bias = text_encoder_text_model_encoder_layers_15_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_15_self_attn_k_proj_weight_to_fp16_palettized, x = hidden_states_91_cast)[name = tensor("linear_91_cast")]; + tensor var_1440 = const()[name = tensor("op_1440"), val = tensor([1, -1, 16, 64])]; + tensor var_1441_cast = reshape(shape = var_1440, x = linear_91_cast)[name = tensor("op_1441_cast")]; + tensor var_1442_perm_0 = const()[name = tensor("op_1442_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_15_self_attn_v_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(196995776))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(197520128))), name = tensor("text_encoder_text_model_encoder_layers_15_self_attn_v_proj_weight_to_fp16_palettized"), shape = tensor([1024, 1024])]; + tensor text_encoder_text_model_encoder_layers_15_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_15_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(197520256)))]; + tensor linear_92_cast = linear(bias = text_encoder_text_model_encoder_layers_15_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_15_self_attn_v_proj_weight_to_fp16_palettized, x = hidden_states_91_cast)[name = tensor("linear_92_cast")]; + tensor var_1447 = const()[name = tensor("op_1447"), val = tensor([1, -1, 16, 64])]; + tensor var_1448_cast = reshape(shape = var_1447, x = linear_92_cast)[name = tensor("op_1448_cast")]; + tensor var_1449_perm_0 = const()[name = tensor("op_1449_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1456 = const()[name = tensor("op_1456"), val = tensor([1, 77, 16, 64])]; + tensor var_1457_cast = reshape(shape = var_1456, x = tensor_95_cast)[name = tensor("op_1457_cast")]; + tensor var_1458_perm_0 = const()[name = tensor("op_1458_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1460 = const()[name = tensor("op_1460"), val = tensor([16, -1, 64])]; + tensor transpose_39 = transpose(perm = var_1458_perm_0, x = var_1457_cast)[name = tensor("transpose_39")]; + tensor query_states_31_cast = reshape(shape = var_1460, x = transpose_39)[name = tensor("query_states_31_cast")]; + tensor var_1462 = const()[name = tensor("op_1462"), val = tensor([16, -1, 64])]; + tensor transpose_38 = transpose(perm = var_1442_perm_0, x = var_1441_cast)[name = tensor("transpose_38")]; + tensor key_states_63_cast = reshape(shape = var_1462, x = transpose_38)[name = tensor("key_states_63_cast")]; + tensor var_1464 = const()[name = tensor("op_1464"), val = tensor([16, -1, 64])]; + tensor transpose_37 = transpose(perm = var_1449_perm_0, x = var_1448_cast)[name = tensor("transpose_37")]; + tensor value_states_63_cast = reshape(shape = var_1464, x = transpose_37)[name = tensor("value_states_63_cast")]; + tensor var_1467_perm_0 = const()[name = tensor("op_1467_perm_0"), val = tensor([0, 2, 1])]; + tensor attn_weights_91_transpose_x_0 = const()[name = tensor("attn_weights_91_transpose_x_0"), val = tensor(false)]; + tensor attn_weights_91_transpose_y_0 = const()[name = tensor("attn_weights_91_transpose_y_0"), val = tensor(false)]; + tensor transpose_36 = transpose(perm = var_1467_perm_0, x = key_states_63_cast)[name = tensor("transpose_36")]; + tensor attn_weights_91_cast = matmul(transpose_x = attn_weights_91_transpose_x_0, transpose_y = attn_weights_91_transpose_y_0, x = query_states_31_cast, y = transpose_36)[name = tensor("attn_weights_91_cast")]; + tensor var_1469 = const()[name = tensor("op_1469"), val = tensor([1, 16, 77, 77])]; + tensor var_1470_cast = reshape(shape = var_1469, x = attn_weights_91_cast)[name = tensor("op_1470_cast")]; + tensor attn_weights_93_cast = add(x = var_1470_cast, y = op_57_to_fp16_palettized)[name = tensor("attn_weights_93_cast")]; + tensor var_1475 = const()[name = tensor("op_1475"), val = tensor([16, 77, 77])]; + tensor input_245_cast = reshape(shape = var_1475, x = attn_weights_93_cast)[name = tensor("input_245_cast")]; + tensor input_247_cast = softmax(axis = var_5, x = input_245_cast)[name = tensor("input_247_cast")]; + tensor attn_output_91_transpose_x_0 = const()[name = tensor("attn_output_91_transpose_x_0"), val = tensor(false)]; + tensor attn_output_91_transpose_y_0 = const()[name = tensor("attn_output_91_transpose_y_0"), val = tensor(false)]; + tensor attn_output_91_cast = matmul(transpose_x = attn_output_91_transpose_x_0, transpose_y = attn_output_91_transpose_y_0, x = input_247_cast, y = value_states_63_cast)[name = tensor("attn_output_91_cast")]; + tensor var_1480 = const()[name = tensor("op_1480"), val = tensor([1, 16, 77, 64])]; + tensor attn_output_93_cast = reshape(shape = var_1480, x = attn_output_91_cast)[name = tensor("attn_output_93_cast")]; + tensor attn_output_95_perm_0 = const()[name = tensor("attn_output_95_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1483 = const()[name = tensor("op_1483"), val = tensor([1, 77, 1024])]; + tensor transpose_35 = transpose(perm = attn_output_95_perm_0, x = attn_output_93_cast)[name = tensor("transpose_35")]; + tensor input_249_cast = reshape(shape = var_1483, x = transpose_35)[name = tensor("input_249_cast")]; + tensor text_encoder_text_model_encoder_layers_15_self_attn_out_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(197522368))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(198046720))), name = tensor("text_encoder_text_model_encoder_layers_15_self_attn_out_proj_weight_to_fp16_palettized"), shape = tensor([1024, 1024])]; + tensor text_encoder_text_model_encoder_layers_15_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_15_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(198046848)))]; + tensor linear_93_cast = linear(bias = text_encoder_text_model_encoder_layers_15_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_15_self_attn_out_proj_weight_to_fp16_palettized, x = input_249_cast)[name = tensor("linear_93_cast")]; + tensor input_251_cast = add(x = input_243_cast, y = linear_93_cast)[name = tensor("input_251_cast")]; + tensor input_253_axes_0 = const()[name = tensor("input_253_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_15_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_15_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(198048960)))]; + tensor text_encoder_text_model_encoder_layers_15_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_15_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(198051072)))]; + tensor input_253_cast = layer_norm(axes = input_253_axes_0, beta = text_encoder_text_model_encoder_layers_15_layer_norm2_bias_to_fp16, epsilon = var_15_to_fp16, gamma = text_encoder_text_model_encoder_layers_15_layer_norm2_weight_to_fp16, x = input_251_cast)[name = tensor("input_253_cast")]; + tensor text_encoder_text_model_encoder_layers_15_mlp_fc1_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(198053184))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(200150400))), name = tensor("text_encoder_text_model_encoder_layers_15_mlp_fc1_weight_to_fp16_palettized"), shape = tensor([4096, 1024])]; + tensor text_encoder_text_model_encoder_layers_15_mlp_fc1_bias_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(200150528))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(200152640))), name = tensor("text_encoder_text_model_encoder_layers_15_mlp_fc1_bias_to_fp16_palettized"), shape = tensor([4096])]; + tensor linear_94_cast = linear(bias = text_encoder_text_model_encoder_layers_15_mlp_fc1_bias_to_fp16_palettized, weight = text_encoder_text_model_encoder_layers_15_mlp_fc1_weight_to_fp16_palettized, x = input_253_cast)[name = tensor("linear_94_cast")]; + tensor input_257_mode_0 = const()[name = tensor("input_257_mode_0"), val = tensor("EXACT")]; + tensor input_257_cast = gelu(mode = input_257_mode_0, x = linear_94_cast)[name = tensor("input_257_cast")]; + tensor text_encoder_text_model_encoder_layers_15_mlp_fc2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(200152768))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(202249984))), name = tensor("text_encoder_text_model_encoder_layers_15_mlp_fc2_weight_to_fp16_palettized"), shape = tensor([1024, 4096])]; + tensor text_encoder_text_model_encoder_layers_15_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_15_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(202250112)))]; + tensor linear_95_cast = linear(bias = text_encoder_text_model_encoder_layers_15_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_15_mlp_fc2_weight_to_fp16_palettized, x = input_257_cast)[name = tensor("linear_95_cast")]; + tensor input_259_cast = add(x = input_251_cast, y = linear_95_cast)[name = tensor("input_259_cast")]; + tensor hidden_states_97_axes_0 = const()[name = tensor("hidden_states_97_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_16_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_16_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(202252224)))]; + tensor text_encoder_text_model_encoder_layers_16_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_16_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(202254336)))]; + tensor hidden_states_97_cast = layer_norm(axes = hidden_states_97_axes_0, beta = text_encoder_text_model_encoder_layers_16_layer_norm1_bias_to_fp16, epsilon = var_15_to_fp16, gamma = text_encoder_text_model_encoder_layers_16_layer_norm1_weight_to_fp16, x = input_259_cast)[name = tensor("hidden_states_97_cast")]; + tensor text_encoder_text_model_encoder_layers_16_self_attn_q_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(202256448))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(202780800))), name = tensor("text_encoder_text_model_encoder_layers_16_self_attn_q_proj_weight_to_fp16_palettized"), shape = tensor([1024, 1024])]; + tensor text_encoder_text_model_encoder_layers_16_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_16_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(202780928)))]; + tensor linear_96_cast = linear(bias = text_encoder_text_model_encoder_layers_16_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_16_self_attn_q_proj_weight_to_fp16_palettized, x = hidden_states_97_cast)[name = tensor("linear_96_cast")]; + tensor var_1522_to_fp16 = const()[name = tensor("op_1522_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_101_cast = mul(x = linear_96_cast, y = var_1522_to_fp16)[name = tensor("tensor_101_cast")]; + tensor text_encoder_text_model_encoder_layers_16_self_attn_k_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(202783040))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(203307392))), name = tensor("text_encoder_text_model_encoder_layers_16_self_attn_k_proj_weight_to_fp16_palettized"), shape = tensor([1024, 1024])]; + tensor text_encoder_text_model_encoder_layers_16_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_16_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(203307520)))]; + tensor linear_97_cast = linear(bias = text_encoder_text_model_encoder_layers_16_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_16_self_attn_k_proj_weight_to_fp16_palettized, x = hidden_states_97_cast)[name = tensor("linear_97_cast")]; + tensor var_1527 = const()[name = tensor("op_1527"), val = tensor([1, -1, 16, 64])]; + tensor var_1528_cast = reshape(shape = var_1527, x = linear_97_cast)[name = tensor("op_1528_cast")]; + tensor var_1529_perm_0 = const()[name = tensor("op_1529_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_16_self_attn_v_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(203309632))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(203833984))), name = tensor("text_encoder_text_model_encoder_layers_16_self_attn_v_proj_weight_to_fp16_palettized"), shape = tensor([1024, 1024])]; + tensor text_encoder_text_model_encoder_layers_16_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_16_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(203834112)))]; + tensor linear_98_cast = linear(bias = text_encoder_text_model_encoder_layers_16_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_16_self_attn_v_proj_weight_to_fp16_palettized, x = hidden_states_97_cast)[name = tensor("linear_98_cast")]; + tensor var_1534 = const()[name = tensor("op_1534"), val = tensor([1, -1, 16, 64])]; + tensor var_1535_cast = reshape(shape = var_1534, x = linear_98_cast)[name = tensor("op_1535_cast")]; + tensor var_1536_perm_0 = const()[name = tensor("op_1536_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1543 = const()[name = tensor("op_1543"), val = tensor([1, 77, 16, 64])]; + tensor var_1544_cast = reshape(shape = var_1543, x = tensor_101_cast)[name = tensor("op_1544_cast")]; + tensor var_1545_perm_0 = const()[name = tensor("op_1545_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1547 = const()[name = tensor("op_1547"), val = tensor([16, -1, 64])]; + tensor transpose_34 = transpose(perm = var_1545_perm_0, x = var_1544_cast)[name = tensor("transpose_34")]; + tensor query_states_33_cast = reshape(shape = var_1547, x = transpose_34)[name = tensor("query_states_33_cast")]; + tensor var_1549 = const()[name = tensor("op_1549"), val = tensor([16, -1, 64])]; + tensor transpose_33 = transpose(perm = var_1529_perm_0, x = var_1528_cast)[name = tensor("transpose_33")]; + tensor key_states_67_cast = reshape(shape = var_1549, x = transpose_33)[name = tensor("key_states_67_cast")]; + tensor var_1551 = const()[name = tensor("op_1551"), val = tensor([16, -1, 64])]; + tensor transpose_32 = transpose(perm = var_1536_perm_0, x = var_1535_cast)[name = tensor("transpose_32")]; + tensor value_states_67_cast = reshape(shape = var_1551, x = transpose_32)[name = tensor("value_states_67_cast")]; + tensor var_1554_perm_0 = const()[name = tensor("op_1554_perm_0"), val = tensor([0, 2, 1])]; + tensor attn_weights_97_transpose_x_0 = const()[name = tensor("attn_weights_97_transpose_x_0"), val = tensor(false)]; + tensor attn_weights_97_transpose_y_0 = const()[name = tensor("attn_weights_97_transpose_y_0"), val = tensor(false)]; + tensor transpose_31 = transpose(perm = var_1554_perm_0, x = key_states_67_cast)[name = tensor("transpose_31")]; + tensor attn_weights_97_cast = matmul(transpose_x = attn_weights_97_transpose_x_0, transpose_y = attn_weights_97_transpose_y_0, x = query_states_33_cast, y = transpose_31)[name = tensor("attn_weights_97_cast")]; + tensor var_1556 = const()[name = tensor("op_1556"), val = tensor([1, 16, 77, 77])]; + tensor var_1557_cast = reshape(shape = var_1556, x = attn_weights_97_cast)[name = tensor("op_1557_cast")]; + tensor attn_weights_99_cast = add(x = var_1557_cast, y = op_57_to_fp16_palettized)[name = tensor("attn_weights_99_cast")]; + tensor var_1562 = const()[name = tensor("op_1562"), val = tensor([16, 77, 77])]; + tensor input_261_cast = reshape(shape = var_1562, x = attn_weights_99_cast)[name = tensor("input_261_cast")]; + tensor input_263_cast = softmax(axis = var_5, x = input_261_cast)[name = tensor("input_263_cast")]; + tensor attn_output_97_transpose_x_0 = const()[name = tensor("attn_output_97_transpose_x_0"), val = tensor(false)]; + tensor attn_output_97_transpose_y_0 = const()[name = tensor("attn_output_97_transpose_y_0"), val = tensor(false)]; + tensor attn_output_97_cast = matmul(transpose_x = attn_output_97_transpose_x_0, transpose_y = attn_output_97_transpose_y_0, x = input_263_cast, y = value_states_67_cast)[name = tensor("attn_output_97_cast")]; + tensor var_1567 = const()[name = tensor("op_1567"), val = tensor([1, 16, 77, 64])]; + tensor attn_output_99_cast = reshape(shape = var_1567, x = attn_output_97_cast)[name = tensor("attn_output_99_cast")]; + tensor attn_output_101_perm_0 = const()[name = tensor("attn_output_101_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1570 = const()[name = tensor("op_1570"), val = tensor([1, 77, 1024])]; + tensor transpose_30 = transpose(perm = attn_output_101_perm_0, x = attn_output_99_cast)[name = tensor("transpose_30")]; + tensor input_265_cast = reshape(shape = var_1570, x = transpose_30)[name = tensor("input_265_cast")]; + tensor text_encoder_text_model_encoder_layers_16_self_attn_out_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(203836224))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(204360576))), name = tensor("text_encoder_text_model_encoder_layers_16_self_attn_out_proj_weight_to_fp16_palettized"), shape = tensor([1024, 1024])]; + tensor text_encoder_text_model_encoder_layers_16_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_16_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(204360704)))]; + tensor linear_99_cast = linear(bias = text_encoder_text_model_encoder_layers_16_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_16_self_attn_out_proj_weight_to_fp16_palettized, x = input_265_cast)[name = tensor("linear_99_cast")]; + tensor input_267_cast = add(x = input_259_cast, y = linear_99_cast)[name = tensor("input_267_cast")]; + tensor input_269_axes_0 = const()[name = tensor("input_269_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_16_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_16_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(204362816)))]; + tensor text_encoder_text_model_encoder_layers_16_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_16_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(204364928)))]; + tensor input_269_cast = layer_norm(axes = input_269_axes_0, beta = text_encoder_text_model_encoder_layers_16_layer_norm2_bias_to_fp16, epsilon = var_15_to_fp16, gamma = text_encoder_text_model_encoder_layers_16_layer_norm2_weight_to_fp16, x = input_267_cast)[name = tensor("input_269_cast")]; + tensor text_encoder_text_model_encoder_layers_16_mlp_fc1_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(204367040))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(206464256))), name = tensor("text_encoder_text_model_encoder_layers_16_mlp_fc1_weight_to_fp16_palettized"), shape = tensor([4096, 1024])]; + tensor text_encoder_text_model_encoder_layers_16_mlp_fc1_bias_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(206464384))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(206466496))), name = tensor("text_encoder_text_model_encoder_layers_16_mlp_fc1_bias_to_fp16_palettized"), shape = tensor([4096])]; + tensor linear_100_cast = linear(bias = text_encoder_text_model_encoder_layers_16_mlp_fc1_bias_to_fp16_palettized, weight = text_encoder_text_model_encoder_layers_16_mlp_fc1_weight_to_fp16_palettized, x = input_269_cast)[name = tensor("linear_100_cast")]; + tensor input_273_mode_0 = const()[name = tensor("input_273_mode_0"), val = tensor("EXACT")]; + tensor input_273_cast = gelu(mode = input_273_mode_0, x = linear_100_cast)[name = tensor("input_273_cast")]; + tensor text_encoder_text_model_encoder_layers_16_mlp_fc2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(206466624))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(208563840))), name = tensor("text_encoder_text_model_encoder_layers_16_mlp_fc2_weight_to_fp16_palettized"), shape = tensor([1024, 4096])]; + tensor text_encoder_text_model_encoder_layers_16_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_16_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(208563968)))]; + tensor linear_101_cast = linear(bias = text_encoder_text_model_encoder_layers_16_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_16_mlp_fc2_weight_to_fp16_palettized, x = input_273_cast)[name = tensor("linear_101_cast")]; + tensor input_275_cast = add(x = input_267_cast, y = linear_101_cast)[name = tensor("input_275_cast")]; + tensor hidden_states_103_axes_0 = const()[name = tensor("hidden_states_103_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_17_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_17_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(208566080)))]; + tensor text_encoder_text_model_encoder_layers_17_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_17_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(208568192)))]; + tensor hidden_states_103_cast = layer_norm(axes = hidden_states_103_axes_0, beta = text_encoder_text_model_encoder_layers_17_layer_norm1_bias_to_fp16, epsilon = var_15_to_fp16, gamma = text_encoder_text_model_encoder_layers_17_layer_norm1_weight_to_fp16, x = input_275_cast)[name = tensor("hidden_states_103_cast")]; + tensor text_encoder_text_model_encoder_layers_17_self_attn_q_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(208570304))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(209094656))), name = tensor("text_encoder_text_model_encoder_layers_17_self_attn_q_proj_weight_to_fp16_palettized"), shape = tensor([1024, 1024])]; + tensor text_encoder_text_model_encoder_layers_17_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_17_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(209094784)))]; + tensor linear_102_cast = linear(bias = text_encoder_text_model_encoder_layers_17_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_17_self_attn_q_proj_weight_to_fp16_palettized, x = hidden_states_103_cast)[name = tensor("linear_102_cast")]; + tensor var_1609_to_fp16 = const()[name = tensor("op_1609_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_107_cast = mul(x = linear_102_cast, y = var_1609_to_fp16)[name = tensor("tensor_107_cast")]; + tensor text_encoder_text_model_encoder_layers_17_self_attn_k_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(209096896))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(209621248))), name = tensor("text_encoder_text_model_encoder_layers_17_self_attn_k_proj_weight_to_fp16_palettized"), shape = tensor([1024, 1024])]; + tensor text_encoder_text_model_encoder_layers_17_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_17_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(209621376)))]; + tensor linear_103_cast = linear(bias = text_encoder_text_model_encoder_layers_17_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_17_self_attn_k_proj_weight_to_fp16_palettized, x = hidden_states_103_cast)[name = tensor("linear_103_cast")]; + tensor var_1614 = const()[name = tensor("op_1614"), val = tensor([1, -1, 16, 64])]; + tensor var_1615_cast = reshape(shape = var_1614, x = linear_103_cast)[name = tensor("op_1615_cast")]; + tensor var_1616_perm_0 = const()[name = tensor("op_1616_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_17_self_attn_v_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(209623488))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(210147840))), name = tensor("text_encoder_text_model_encoder_layers_17_self_attn_v_proj_weight_to_fp16_palettized"), shape = tensor([1024, 1024])]; + tensor text_encoder_text_model_encoder_layers_17_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_17_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(210147968)))]; + tensor linear_104_cast = linear(bias = text_encoder_text_model_encoder_layers_17_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_17_self_attn_v_proj_weight_to_fp16_palettized, x = hidden_states_103_cast)[name = tensor("linear_104_cast")]; + tensor var_1621 = const()[name = tensor("op_1621"), val = tensor([1, -1, 16, 64])]; + tensor var_1622_cast = reshape(shape = var_1621, x = linear_104_cast)[name = tensor("op_1622_cast")]; + tensor var_1623_perm_0 = const()[name = tensor("op_1623_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1630 = const()[name = tensor("op_1630"), val = tensor([1, 77, 16, 64])]; + tensor var_1631_cast = reshape(shape = var_1630, x = tensor_107_cast)[name = tensor("op_1631_cast")]; + tensor var_1632_perm_0 = const()[name = tensor("op_1632_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1634 = const()[name = tensor("op_1634"), val = tensor([16, -1, 64])]; + tensor transpose_29 = transpose(perm = var_1632_perm_0, x = var_1631_cast)[name = tensor("transpose_29")]; + tensor query_states_35_cast = reshape(shape = var_1634, x = transpose_29)[name = tensor("query_states_35_cast")]; + tensor var_1636 = const()[name = tensor("op_1636"), val = tensor([16, -1, 64])]; + tensor transpose_28 = transpose(perm = var_1616_perm_0, x = var_1615_cast)[name = tensor("transpose_28")]; + tensor key_states_71_cast = reshape(shape = var_1636, x = transpose_28)[name = tensor("key_states_71_cast")]; + tensor var_1638 = const()[name = tensor("op_1638"), val = tensor([16, -1, 64])]; + tensor transpose_27 = transpose(perm = var_1623_perm_0, x = var_1622_cast)[name = tensor("transpose_27")]; + tensor value_states_71_cast = reshape(shape = var_1638, x = transpose_27)[name = tensor("value_states_71_cast")]; + tensor var_1641_perm_0 = const()[name = tensor("op_1641_perm_0"), val = tensor([0, 2, 1])]; + tensor attn_weights_103_transpose_x_0 = const()[name = tensor("attn_weights_103_transpose_x_0"), val = tensor(false)]; + tensor attn_weights_103_transpose_y_0 = const()[name = tensor("attn_weights_103_transpose_y_0"), val = tensor(false)]; + tensor transpose_26 = transpose(perm = var_1641_perm_0, x = key_states_71_cast)[name = tensor("transpose_26")]; + tensor attn_weights_103_cast = matmul(transpose_x = attn_weights_103_transpose_x_0, transpose_y = attn_weights_103_transpose_y_0, x = query_states_35_cast, y = transpose_26)[name = tensor("attn_weights_103_cast")]; + tensor var_1643 = const()[name = tensor("op_1643"), val = tensor([1, 16, 77, 77])]; + tensor var_1644_cast = reshape(shape = var_1643, x = attn_weights_103_cast)[name = tensor("op_1644_cast")]; + tensor attn_weights_105_cast = add(x = var_1644_cast, y = op_57_to_fp16_palettized)[name = tensor("attn_weights_105_cast")]; + tensor var_1649 = const()[name = tensor("op_1649"), val = tensor([16, 77, 77])]; + tensor input_277_cast = reshape(shape = var_1649, x = attn_weights_105_cast)[name = tensor("input_277_cast")]; + tensor input_279_cast = softmax(axis = var_5, x = input_277_cast)[name = tensor("input_279_cast")]; + tensor attn_output_103_transpose_x_0 = const()[name = tensor("attn_output_103_transpose_x_0"), val = tensor(false)]; + tensor attn_output_103_transpose_y_0 = const()[name = tensor("attn_output_103_transpose_y_0"), val = tensor(false)]; + tensor attn_output_103_cast = matmul(transpose_x = attn_output_103_transpose_x_0, transpose_y = attn_output_103_transpose_y_0, x = input_279_cast, y = value_states_71_cast)[name = tensor("attn_output_103_cast")]; + tensor var_1654 = const()[name = tensor("op_1654"), val = tensor([1, 16, 77, 64])]; + tensor attn_output_105_cast = reshape(shape = var_1654, x = attn_output_103_cast)[name = tensor("attn_output_105_cast")]; + tensor attn_output_107_perm_0 = const()[name = tensor("attn_output_107_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1657 = const()[name = tensor("op_1657"), val = tensor([1, 77, 1024])]; + tensor transpose_25 = transpose(perm = attn_output_107_perm_0, x = attn_output_105_cast)[name = tensor("transpose_25")]; + tensor input_281_cast = reshape(shape = var_1657, x = transpose_25)[name = tensor("input_281_cast")]; + tensor text_encoder_text_model_encoder_layers_17_self_attn_out_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(210150080))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(210674432))), name = tensor("text_encoder_text_model_encoder_layers_17_self_attn_out_proj_weight_to_fp16_palettized"), shape = tensor([1024, 1024])]; + tensor text_encoder_text_model_encoder_layers_17_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_17_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(210674560)))]; + tensor linear_105_cast = linear(bias = text_encoder_text_model_encoder_layers_17_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_17_self_attn_out_proj_weight_to_fp16_palettized, x = input_281_cast)[name = tensor("linear_105_cast")]; + tensor input_283_cast = add(x = input_275_cast, y = linear_105_cast)[name = tensor("input_283_cast")]; + tensor input_285_axes_0 = const()[name = tensor("input_285_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_17_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_17_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(210676672)))]; + tensor text_encoder_text_model_encoder_layers_17_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_17_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(210678784)))]; + tensor input_285_cast = layer_norm(axes = input_285_axes_0, beta = text_encoder_text_model_encoder_layers_17_layer_norm2_bias_to_fp16, epsilon = var_15_to_fp16, gamma = text_encoder_text_model_encoder_layers_17_layer_norm2_weight_to_fp16, x = input_283_cast)[name = tensor("input_285_cast")]; + tensor text_encoder_text_model_encoder_layers_17_mlp_fc1_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(210680896))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(212778112))), name = tensor("text_encoder_text_model_encoder_layers_17_mlp_fc1_weight_to_fp16_palettized"), shape = tensor([4096, 1024])]; + tensor text_encoder_text_model_encoder_layers_17_mlp_fc1_bias_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(212778240))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(212780352))), name = tensor("text_encoder_text_model_encoder_layers_17_mlp_fc1_bias_to_fp16_palettized"), shape = tensor([4096])]; + tensor linear_106_cast = linear(bias = text_encoder_text_model_encoder_layers_17_mlp_fc1_bias_to_fp16_palettized, weight = text_encoder_text_model_encoder_layers_17_mlp_fc1_weight_to_fp16_palettized, x = input_285_cast)[name = tensor("linear_106_cast")]; + tensor input_289_mode_0 = const()[name = tensor("input_289_mode_0"), val = tensor("EXACT")]; + tensor input_289_cast = gelu(mode = input_289_mode_0, x = linear_106_cast)[name = tensor("input_289_cast")]; + tensor text_encoder_text_model_encoder_layers_17_mlp_fc2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(212780480))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(214877696))), name = tensor("text_encoder_text_model_encoder_layers_17_mlp_fc2_weight_to_fp16_palettized"), shape = tensor([1024, 4096])]; + tensor text_encoder_text_model_encoder_layers_17_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_17_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(214877824)))]; + tensor linear_107_cast = linear(bias = text_encoder_text_model_encoder_layers_17_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_17_mlp_fc2_weight_to_fp16_palettized, x = input_289_cast)[name = tensor("linear_107_cast")]; + tensor input_291_cast = add(x = input_283_cast, y = linear_107_cast)[name = tensor("input_291_cast")]; + tensor hidden_states_109_axes_0 = const()[name = tensor("hidden_states_109_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_18_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_18_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(214879936)))]; + tensor text_encoder_text_model_encoder_layers_18_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_18_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(214882048)))]; + tensor hidden_states_109_cast = layer_norm(axes = hidden_states_109_axes_0, beta = text_encoder_text_model_encoder_layers_18_layer_norm1_bias_to_fp16, epsilon = var_15_to_fp16, gamma = text_encoder_text_model_encoder_layers_18_layer_norm1_weight_to_fp16, x = input_291_cast)[name = tensor("hidden_states_109_cast")]; + tensor text_encoder_text_model_encoder_layers_18_self_attn_q_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(214884160))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(215408512))), name = tensor("text_encoder_text_model_encoder_layers_18_self_attn_q_proj_weight_to_fp16_palettized"), shape = tensor([1024, 1024])]; + tensor text_encoder_text_model_encoder_layers_18_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_18_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(215408640)))]; + tensor linear_108_cast = linear(bias = text_encoder_text_model_encoder_layers_18_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_18_self_attn_q_proj_weight_to_fp16_palettized, x = hidden_states_109_cast)[name = tensor("linear_108_cast")]; + tensor var_1696_to_fp16 = const()[name = tensor("op_1696_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_113_cast = mul(x = linear_108_cast, y = var_1696_to_fp16)[name = tensor("tensor_113_cast")]; + tensor text_encoder_text_model_encoder_layers_18_self_attn_k_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(215410752))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(215935104))), name = tensor("text_encoder_text_model_encoder_layers_18_self_attn_k_proj_weight_to_fp16_palettized"), shape = tensor([1024, 1024])]; + tensor text_encoder_text_model_encoder_layers_18_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_18_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(215935232)))]; + tensor linear_109_cast = linear(bias = text_encoder_text_model_encoder_layers_18_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_18_self_attn_k_proj_weight_to_fp16_palettized, x = hidden_states_109_cast)[name = tensor("linear_109_cast")]; + tensor var_1701 = const()[name = tensor("op_1701"), val = tensor([1, -1, 16, 64])]; + tensor var_1702_cast = reshape(shape = var_1701, x = linear_109_cast)[name = tensor("op_1702_cast")]; + tensor var_1703_perm_0 = const()[name = tensor("op_1703_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_18_self_attn_v_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(215937344))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(216461696))), name = tensor("text_encoder_text_model_encoder_layers_18_self_attn_v_proj_weight_to_fp16_palettized"), shape = tensor([1024, 1024])]; + tensor text_encoder_text_model_encoder_layers_18_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_18_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(216461824)))]; + tensor linear_110_cast = linear(bias = text_encoder_text_model_encoder_layers_18_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_18_self_attn_v_proj_weight_to_fp16_palettized, x = hidden_states_109_cast)[name = tensor("linear_110_cast")]; + tensor var_1708 = const()[name = tensor("op_1708"), val = tensor([1, -1, 16, 64])]; + tensor var_1709_cast = reshape(shape = var_1708, x = linear_110_cast)[name = tensor("op_1709_cast")]; + tensor var_1710_perm_0 = const()[name = tensor("op_1710_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1717 = const()[name = tensor("op_1717"), val = tensor([1, 77, 16, 64])]; + tensor var_1718_cast = reshape(shape = var_1717, x = tensor_113_cast)[name = tensor("op_1718_cast")]; + tensor var_1719_perm_0 = const()[name = tensor("op_1719_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1721 = const()[name = tensor("op_1721"), val = tensor([16, -1, 64])]; + tensor transpose_24 = transpose(perm = var_1719_perm_0, x = var_1718_cast)[name = tensor("transpose_24")]; + tensor query_states_37_cast = reshape(shape = var_1721, x = transpose_24)[name = tensor("query_states_37_cast")]; + tensor var_1723 = const()[name = tensor("op_1723"), val = tensor([16, -1, 64])]; + tensor transpose_23 = transpose(perm = var_1703_perm_0, x = var_1702_cast)[name = tensor("transpose_23")]; + tensor key_states_75_cast = reshape(shape = var_1723, x = transpose_23)[name = tensor("key_states_75_cast")]; + tensor var_1725 = const()[name = tensor("op_1725"), val = tensor([16, -1, 64])]; + tensor transpose_22 = transpose(perm = var_1710_perm_0, x = var_1709_cast)[name = tensor("transpose_22")]; + tensor value_states_75_cast = reshape(shape = var_1725, x = transpose_22)[name = tensor("value_states_75_cast")]; + tensor var_1728_perm_0 = const()[name = tensor("op_1728_perm_0"), val = tensor([0, 2, 1])]; + tensor attn_weights_109_transpose_x_0 = const()[name = tensor("attn_weights_109_transpose_x_0"), val = tensor(false)]; + tensor attn_weights_109_transpose_y_0 = const()[name = tensor("attn_weights_109_transpose_y_0"), val = tensor(false)]; + tensor transpose_21 = transpose(perm = var_1728_perm_0, x = key_states_75_cast)[name = tensor("transpose_21")]; + tensor attn_weights_109_cast = matmul(transpose_x = attn_weights_109_transpose_x_0, transpose_y = attn_weights_109_transpose_y_0, x = query_states_37_cast, y = transpose_21)[name = tensor("attn_weights_109_cast")]; + tensor var_1730 = const()[name = tensor("op_1730"), val = tensor([1, 16, 77, 77])]; + tensor var_1731_cast = reshape(shape = var_1730, x = attn_weights_109_cast)[name = tensor("op_1731_cast")]; + tensor attn_weights_111_cast = add(x = var_1731_cast, y = op_57_to_fp16_palettized)[name = tensor("attn_weights_111_cast")]; + tensor var_1736 = const()[name = tensor("op_1736"), val = tensor([16, 77, 77])]; + tensor input_293_cast = reshape(shape = var_1736, x = attn_weights_111_cast)[name = tensor("input_293_cast")]; + tensor input_295_cast = softmax(axis = var_5, x = input_293_cast)[name = tensor("input_295_cast")]; + tensor attn_output_109_transpose_x_0 = const()[name = tensor("attn_output_109_transpose_x_0"), val = tensor(false)]; + tensor attn_output_109_transpose_y_0 = const()[name = tensor("attn_output_109_transpose_y_0"), val = tensor(false)]; + tensor attn_output_109_cast = matmul(transpose_x = attn_output_109_transpose_x_0, transpose_y = attn_output_109_transpose_y_0, x = input_295_cast, y = value_states_75_cast)[name = tensor("attn_output_109_cast")]; + tensor var_1741 = const()[name = tensor("op_1741"), val = tensor([1, 16, 77, 64])]; + tensor attn_output_111_cast = reshape(shape = var_1741, x = attn_output_109_cast)[name = tensor("attn_output_111_cast")]; + tensor attn_output_113_perm_0 = const()[name = tensor("attn_output_113_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1744 = const()[name = tensor("op_1744"), val = tensor([1, 77, 1024])]; + tensor transpose_20 = transpose(perm = attn_output_113_perm_0, x = attn_output_111_cast)[name = tensor("transpose_20")]; + tensor input_297_cast = reshape(shape = var_1744, x = transpose_20)[name = tensor("input_297_cast")]; + tensor text_encoder_text_model_encoder_layers_18_self_attn_out_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(216463936))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(216988288))), name = tensor("text_encoder_text_model_encoder_layers_18_self_attn_out_proj_weight_to_fp16_palettized"), shape = tensor([1024, 1024])]; + tensor text_encoder_text_model_encoder_layers_18_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_18_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(216988416)))]; + tensor linear_111_cast = linear(bias = text_encoder_text_model_encoder_layers_18_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_18_self_attn_out_proj_weight_to_fp16_palettized, x = input_297_cast)[name = tensor("linear_111_cast")]; + tensor input_299_cast = add(x = input_291_cast, y = linear_111_cast)[name = tensor("input_299_cast")]; + tensor input_301_axes_0 = const()[name = tensor("input_301_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_18_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_18_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(216990528)))]; + tensor text_encoder_text_model_encoder_layers_18_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_18_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(216992640)))]; + tensor input_301_cast = layer_norm(axes = input_301_axes_0, beta = text_encoder_text_model_encoder_layers_18_layer_norm2_bias_to_fp16, epsilon = var_15_to_fp16, gamma = text_encoder_text_model_encoder_layers_18_layer_norm2_weight_to_fp16, x = input_299_cast)[name = tensor("input_301_cast")]; + tensor text_encoder_text_model_encoder_layers_18_mlp_fc1_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(216994752))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(219091968))), name = tensor("text_encoder_text_model_encoder_layers_18_mlp_fc1_weight_to_fp16_palettized"), shape = tensor([4096, 1024])]; + tensor text_encoder_text_model_encoder_layers_18_mlp_fc1_bias_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(219092096))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(219094208))), name = tensor("text_encoder_text_model_encoder_layers_18_mlp_fc1_bias_to_fp16_palettized"), shape = tensor([4096])]; + tensor linear_112_cast = linear(bias = text_encoder_text_model_encoder_layers_18_mlp_fc1_bias_to_fp16_palettized, weight = text_encoder_text_model_encoder_layers_18_mlp_fc1_weight_to_fp16_palettized, x = input_301_cast)[name = tensor("linear_112_cast")]; + tensor input_305_mode_0 = const()[name = tensor("input_305_mode_0"), val = tensor("EXACT")]; + tensor input_305_cast = gelu(mode = input_305_mode_0, x = linear_112_cast)[name = tensor("input_305_cast")]; + tensor text_encoder_text_model_encoder_layers_18_mlp_fc2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(219094336))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(221191552))), name = tensor("text_encoder_text_model_encoder_layers_18_mlp_fc2_weight_to_fp16_palettized"), shape = tensor([1024, 4096])]; + tensor text_encoder_text_model_encoder_layers_18_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_18_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(221191680)))]; + tensor linear_113_cast = linear(bias = text_encoder_text_model_encoder_layers_18_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_18_mlp_fc2_weight_to_fp16_palettized, x = input_305_cast)[name = tensor("linear_113_cast")]; + tensor input_307_cast = add(x = input_299_cast, y = linear_113_cast)[name = tensor("input_307_cast")]; + tensor hidden_states_115_axes_0 = const()[name = tensor("hidden_states_115_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_19_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_19_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(221193792)))]; + tensor text_encoder_text_model_encoder_layers_19_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_19_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(221195904)))]; + tensor hidden_states_115_cast = layer_norm(axes = hidden_states_115_axes_0, beta = text_encoder_text_model_encoder_layers_19_layer_norm1_bias_to_fp16, epsilon = var_15_to_fp16, gamma = text_encoder_text_model_encoder_layers_19_layer_norm1_weight_to_fp16, x = input_307_cast)[name = tensor("hidden_states_115_cast")]; + tensor text_encoder_text_model_encoder_layers_19_self_attn_q_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(221198016))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(221722368))), name = tensor("text_encoder_text_model_encoder_layers_19_self_attn_q_proj_weight_to_fp16_palettized"), shape = tensor([1024, 1024])]; + tensor text_encoder_text_model_encoder_layers_19_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_19_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(221722496)))]; + tensor linear_114_cast = linear(bias = text_encoder_text_model_encoder_layers_19_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_19_self_attn_q_proj_weight_to_fp16_palettized, x = hidden_states_115_cast)[name = tensor("linear_114_cast")]; + tensor var_1783_to_fp16 = const()[name = tensor("op_1783_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_119_cast = mul(x = linear_114_cast, y = var_1783_to_fp16)[name = tensor("tensor_119_cast")]; + tensor text_encoder_text_model_encoder_layers_19_self_attn_k_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(221724608))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(222248960))), name = tensor("text_encoder_text_model_encoder_layers_19_self_attn_k_proj_weight_to_fp16_palettized"), shape = tensor([1024, 1024])]; + tensor text_encoder_text_model_encoder_layers_19_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_19_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(222249088)))]; + tensor linear_115_cast = linear(bias = text_encoder_text_model_encoder_layers_19_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_19_self_attn_k_proj_weight_to_fp16_palettized, x = hidden_states_115_cast)[name = tensor("linear_115_cast")]; + tensor var_1788 = const()[name = tensor("op_1788"), val = tensor([1, -1, 16, 64])]; + tensor var_1789_cast = reshape(shape = var_1788, x = linear_115_cast)[name = tensor("op_1789_cast")]; + tensor var_1790_perm_0 = const()[name = tensor("op_1790_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_19_self_attn_v_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(222251200))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(222775552))), name = tensor("text_encoder_text_model_encoder_layers_19_self_attn_v_proj_weight_to_fp16_palettized"), shape = tensor([1024, 1024])]; + tensor text_encoder_text_model_encoder_layers_19_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_19_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(222775680)))]; + tensor linear_116_cast = linear(bias = text_encoder_text_model_encoder_layers_19_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_19_self_attn_v_proj_weight_to_fp16_palettized, x = hidden_states_115_cast)[name = tensor("linear_116_cast")]; + tensor var_1795 = const()[name = tensor("op_1795"), val = tensor([1, -1, 16, 64])]; + tensor var_1796_cast = reshape(shape = var_1795, x = linear_116_cast)[name = tensor("op_1796_cast")]; + tensor var_1797_perm_0 = const()[name = tensor("op_1797_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1804 = const()[name = tensor("op_1804"), val = tensor([1, 77, 16, 64])]; + tensor var_1805_cast = reshape(shape = var_1804, x = tensor_119_cast)[name = tensor("op_1805_cast")]; + tensor var_1806_perm_0 = const()[name = tensor("op_1806_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1808 = const()[name = tensor("op_1808"), val = tensor([16, -1, 64])]; + tensor transpose_19 = transpose(perm = var_1806_perm_0, x = var_1805_cast)[name = tensor("transpose_19")]; + tensor query_states_39_cast = reshape(shape = var_1808, x = transpose_19)[name = tensor("query_states_39_cast")]; + tensor var_1810 = const()[name = tensor("op_1810"), val = tensor([16, -1, 64])]; + tensor transpose_18 = transpose(perm = var_1790_perm_0, x = var_1789_cast)[name = tensor("transpose_18")]; + tensor key_states_79_cast = reshape(shape = var_1810, x = transpose_18)[name = tensor("key_states_79_cast")]; + tensor var_1812 = const()[name = tensor("op_1812"), val = tensor([16, -1, 64])]; + tensor transpose_17 = transpose(perm = var_1797_perm_0, x = var_1796_cast)[name = tensor("transpose_17")]; + tensor value_states_79_cast = reshape(shape = var_1812, x = transpose_17)[name = tensor("value_states_79_cast")]; + tensor var_1815_perm_0 = const()[name = tensor("op_1815_perm_0"), val = tensor([0, 2, 1])]; + tensor attn_weights_115_transpose_x_0 = const()[name = tensor("attn_weights_115_transpose_x_0"), val = tensor(false)]; + tensor attn_weights_115_transpose_y_0 = const()[name = tensor("attn_weights_115_transpose_y_0"), val = tensor(false)]; + tensor transpose_16 = transpose(perm = var_1815_perm_0, x = key_states_79_cast)[name = tensor("transpose_16")]; + tensor attn_weights_115_cast = matmul(transpose_x = attn_weights_115_transpose_x_0, transpose_y = attn_weights_115_transpose_y_0, x = query_states_39_cast, y = transpose_16)[name = tensor("attn_weights_115_cast")]; + tensor var_1817 = const()[name = tensor("op_1817"), val = tensor([1, 16, 77, 77])]; + tensor var_1818_cast = reshape(shape = var_1817, x = attn_weights_115_cast)[name = tensor("op_1818_cast")]; + tensor attn_weights_117_cast = add(x = var_1818_cast, y = op_57_to_fp16_palettized)[name = tensor("attn_weights_117_cast")]; + tensor var_1823 = const()[name = tensor("op_1823"), val = tensor([16, 77, 77])]; + tensor input_309_cast = reshape(shape = var_1823, x = attn_weights_117_cast)[name = tensor("input_309_cast")]; + tensor input_311_cast = softmax(axis = var_5, x = input_309_cast)[name = tensor("input_311_cast")]; + tensor attn_output_115_transpose_x_0 = const()[name = tensor("attn_output_115_transpose_x_0"), val = tensor(false)]; + tensor attn_output_115_transpose_y_0 = const()[name = tensor("attn_output_115_transpose_y_0"), val = tensor(false)]; + tensor attn_output_115_cast = matmul(transpose_x = attn_output_115_transpose_x_0, transpose_y = attn_output_115_transpose_y_0, x = input_311_cast, y = value_states_79_cast)[name = tensor("attn_output_115_cast")]; + tensor var_1828 = const()[name = tensor("op_1828"), val = tensor([1, 16, 77, 64])]; + tensor attn_output_117_cast = reshape(shape = var_1828, x = attn_output_115_cast)[name = tensor("attn_output_117_cast")]; + tensor attn_output_119_perm_0 = const()[name = tensor("attn_output_119_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1831 = const()[name = tensor("op_1831"), val = tensor([1, 77, 1024])]; + tensor transpose_15 = transpose(perm = attn_output_119_perm_0, x = attn_output_117_cast)[name = tensor("transpose_15")]; + tensor input_313_cast = reshape(shape = var_1831, x = transpose_15)[name = tensor("input_313_cast")]; + tensor text_encoder_text_model_encoder_layers_19_self_attn_out_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(222777792))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(223302144))), name = tensor("text_encoder_text_model_encoder_layers_19_self_attn_out_proj_weight_to_fp16_palettized"), shape = tensor([1024, 1024])]; + tensor text_encoder_text_model_encoder_layers_19_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_19_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(223302272)))]; + tensor linear_117_cast = linear(bias = text_encoder_text_model_encoder_layers_19_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_19_self_attn_out_proj_weight_to_fp16_palettized, x = input_313_cast)[name = tensor("linear_117_cast")]; + tensor input_315_cast = add(x = input_307_cast, y = linear_117_cast)[name = tensor("input_315_cast")]; + tensor input_317_axes_0 = const()[name = tensor("input_317_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_19_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_19_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(223304384)))]; + tensor text_encoder_text_model_encoder_layers_19_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_19_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(223306496)))]; + tensor input_317_cast = layer_norm(axes = input_317_axes_0, beta = text_encoder_text_model_encoder_layers_19_layer_norm2_bias_to_fp16, epsilon = var_15_to_fp16, gamma = text_encoder_text_model_encoder_layers_19_layer_norm2_weight_to_fp16, x = input_315_cast)[name = tensor("input_317_cast")]; + tensor text_encoder_text_model_encoder_layers_19_mlp_fc1_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(223308608))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(225405824))), name = tensor("text_encoder_text_model_encoder_layers_19_mlp_fc1_weight_to_fp16_palettized"), shape = tensor([4096, 1024])]; + tensor text_encoder_text_model_encoder_layers_19_mlp_fc1_bias_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(225405952))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(225408064))), name = tensor("text_encoder_text_model_encoder_layers_19_mlp_fc1_bias_to_fp16_palettized"), shape = tensor([4096])]; + tensor linear_118_cast = linear(bias = text_encoder_text_model_encoder_layers_19_mlp_fc1_bias_to_fp16_palettized, weight = text_encoder_text_model_encoder_layers_19_mlp_fc1_weight_to_fp16_palettized, x = input_317_cast)[name = tensor("linear_118_cast")]; + tensor input_321_mode_0 = const()[name = tensor("input_321_mode_0"), val = tensor("EXACT")]; + tensor input_321_cast = gelu(mode = input_321_mode_0, x = linear_118_cast)[name = tensor("input_321_cast")]; + tensor text_encoder_text_model_encoder_layers_19_mlp_fc2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(225408192))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(227505408))), name = tensor("text_encoder_text_model_encoder_layers_19_mlp_fc2_weight_to_fp16_palettized"), shape = tensor([1024, 4096])]; + tensor text_encoder_text_model_encoder_layers_19_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_19_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(227505536)))]; + tensor linear_119_cast = linear(bias = text_encoder_text_model_encoder_layers_19_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_19_mlp_fc2_weight_to_fp16_palettized, x = input_321_cast)[name = tensor("linear_119_cast")]; + tensor input_323_cast = add(x = input_315_cast, y = linear_119_cast)[name = tensor("input_323_cast")]; + tensor hidden_states_121_axes_0 = const()[name = tensor("hidden_states_121_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_20_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_20_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(227507648)))]; + tensor text_encoder_text_model_encoder_layers_20_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_20_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(227509760)))]; + tensor hidden_states_121_cast = layer_norm(axes = hidden_states_121_axes_0, beta = text_encoder_text_model_encoder_layers_20_layer_norm1_bias_to_fp16, epsilon = var_15_to_fp16, gamma = text_encoder_text_model_encoder_layers_20_layer_norm1_weight_to_fp16, x = input_323_cast)[name = tensor("hidden_states_121_cast")]; + tensor text_encoder_text_model_encoder_layers_20_self_attn_q_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(227511872))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(228036224))), name = tensor("text_encoder_text_model_encoder_layers_20_self_attn_q_proj_weight_to_fp16_palettized"), shape = tensor([1024, 1024])]; + tensor text_encoder_text_model_encoder_layers_20_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_20_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(228036352)))]; + tensor linear_120_cast = linear(bias = text_encoder_text_model_encoder_layers_20_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_20_self_attn_q_proj_weight_to_fp16_palettized, x = hidden_states_121_cast)[name = tensor("linear_120_cast")]; + tensor var_1870_to_fp16 = const()[name = tensor("op_1870_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_125_cast = mul(x = linear_120_cast, y = var_1870_to_fp16)[name = tensor("tensor_125_cast")]; + tensor text_encoder_text_model_encoder_layers_20_self_attn_k_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(228038464))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(228562816))), name = tensor("text_encoder_text_model_encoder_layers_20_self_attn_k_proj_weight_to_fp16_palettized"), shape = tensor([1024, 1024])]; + tensor text_encoder_text_model_encoder_layers_20_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_20_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(228562944)))]; + tensor linear_121_cast = linear(bias = text_encoder_text_model_encoder_layers_20_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_20_self_attn_k_proj_weight_to_fp16_palettized, x = hidden_states_121_cast)[name = tensor("linear_121_cast")]; + tensor var_1875 = const()[name = tensor("op_1875"), val = tensor([1, -1, 16, 64])]; + tensor var_1876_cast = reshape(shape = var_1875, x = linear_121_cast)[name = tensor("op_1876_cast")]; + tensor var_1877_perm_0 = const()[name = tensor("op_1877_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_20_self_attn_v_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(228565056))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(229089408))), name = tensor("text_encoder_text_model_encoder_layers_20_self_attn_v_proj_weight_to_fp16_palettized"), shape = tensor([1024, 1024])]; + tensor text_encoder_text_model_encoder_layers_20_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_20_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(229089536)))]; + tensor linear_122_cast = linear(bias = text_encoder_text_model_encoder_layers_20_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_20_self_attn_v_proj_weight_to_fp16_palettized, x = hidden_states_121_cast)[name = tensor("linear_122_cast")]; + tensor var_1882 = const()[name = tensor("op_1882"), val = tensor([1, -1, 16, 64])]; + tensor var_1883_cast = reshape(shape = var_1882, x = linear_122_cast)[name = tensor("op_1883_cast")]; + tensor var_1884_perm_0 = const()[name = tensor("op_1884_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1891 = const()[name = tensor("op_1891"), val = tensor([1, 77, 16, 64])]; + tensor var_1892_cast = reshape(shape = var_1891, x = tensor_125_cast)[name = tensor("op_1892_cast")]; + tensor var_1893_perm_0 = const()[name = tensor("op_1893_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1895 = const()[name = tensor("op_1895"), val = tensor([16, -1, 64])]; + tensor transpose_14 = transpose(perm = var_1893_perm_0, x = var_1892_cast)[name = tensor("transpose_14")]; + tensor query_states_41_cast = reshape(shape = var_1895, x = transpose_14)[name = tensor("query_states_41_cast")]; + tensor var_1897 = const()[name = tensor("op_1897"), val = tensor([16, -1, 64])]; + tensor transpose_13 = transpose(perm = var_1877_perm_0, x = var_1876_cast)[name = tensor("transpose_13")]; + tensor key_states_83_cast = reshape(shape = var_1897, x = transpose_13)[name = tensor("key_states_83_cast")]; + tensor var_1899 = const()[name = tensor("op_1899"), val = tensor([16, -1, 64])]; + tensor transpose_12 = transpose(perm = var_1884_perm_0, x = var_1883_cast)[name = tensor("transpose_12")]; + tensor value_states_83_cast = reshape(shape = var_1899, x = transpose_12)[name = tensor("value_states_83_cast")]; + tensor var_1902_perm_0 = const()[name = tensor("op_1902_perm_0"), val = tensor([0, 2, 1])]; + tensor attn_weights_121_transpose_x_0 = const()[name = tensor("attn_weights_121_transpose_x_0"), val = tensor(false)]; + tensor attn_weights_121_transpose_y_0 = const()[name = tensor("attn_weights_121_transpose_y_0"), val = tensor(false)]; + tensor transpose_11 = transpose(perm = var_1902_perm_0, x = key_states_83_cast)[name = tensor("transpose_11")]; + tensor attn_weights_121_cast = matmul(transpose_x = attn_weights_121_transpose_x_0, transpose_y = attn_weights_121_transpose_y_0, x = query_states_41_cast, y = transpose_11)[name = tensor("attn_weights_121_cast")]; + tensor var_1904 = const()[name = tensor("op_1904"), val = tensor([1, 16, 77, 77])]; + tensor var_1905_cast = reshape(shape = var_1904, x = attn_weights_121_cast)[name = tensor("op_1905_cast")]; + tensor attn_weights_123_cast = add(x = var_1905_cast, y = op_57_to_fp16_palettized)[name = tensor("attn_weights_123_cast")]; + tensor var_1910 = const()[name = tensor("op_1910"), val = tensor([16, 77, 77])]; + tensor input_325_cast = reshape(shape = var_1910, x = attn_weights_123_cast)[name = tensor("input_325_cast")]; + tensor input_327_cast = softmax(axis = var_5, x = input_325_cast)[name = tensor("input_327_cast")]; + tensor attn_output_121_transpose_x_0 = const()[name = tensor("attn_output_121_transpose_x_0"), val = tensor(false)]; + tensor attn_output_121_transpose_y_0 = const()[name = tensor("attn_output_121_transpose_y_0"), val = tensor(false)]; + tensor attn_output_121_cast = matmul(transpose_x = attn_output_121_transpose_x_0, transpose_y = attn_output_121_transpose_y_0, x = input_327_cast, y = value_states_83_cast)[name = tensor("attn_output_121_cast")]; + tensor var_1915 = const()[name = tensor("op_1915"), val = tensor([1, 16, 77, 64])]; + tensor attn_output_123_cast = reshape(shape = var_1915, x = attn_output_121_cast)[name = tensor("attn_output_123_cast")]; + tensor attn_output_125_perm_0 = const()[name = tensor("attn_output_125_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1918 = const()[name = tensor("op_1918"), val = tensor([1, 77, 1024])]; + tensor transpose_10 = transpose(perm = attn_output_125_perm_0, x = attn_output_123_cast)[name = tensor("transpose_10")]; + tensor input_329_cast = reshape(shape = var_1918, x = transpose_10)[name = tensor("input_329_cast")]; + tensor text_encoder_text_model_encoder_layers_20_self_attn_out_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(229091648))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(229616000))), name = tensor("text_encoder_text_model_encoder_layers_20_self_attn_out_proj_weight_to_fp16_palettized"), shape = tensor([1024, 1024])]; + tensor text_encoder_text_model_encoder_layers_20_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_20_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(229616128)))]; + tensor linear_123_cast = linear(bias = text_encoder_text_model_encoder_layers_20_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_20_self_attn_out_proj_weight_to_fp16_palettized, x = input_329_cast)[name = tensor("linear_123_cast")]; + tensor input_331_cast = add(x = input_323_cast, y = linear_123_cast)[name = tensor("input_331_cast")]; + tensor input_333_axes_0 = const()[name = tensor("input_333_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_20_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_20_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(229618240)))]; + tensor text_encoder_text_model_encoder_layers_20_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_20_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(229620352)))]; + tensor input_333_cast = layer_norm(axes = input_333_axes_0, beta = text_encoder_text_model_encoder_layers_20_layer_norm2_bias_to_fp16, epsilon = var_15_to_fp16, gamma = text_encoder_text_model_encoder_layers_20_layer_norm2_weight_to_fp16, x = input_331_cast)[name = tensor("input_333_cast")]; + tensor text_encoder_text_model_encoder_layers_20_mlp_fc1_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(229622464))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(231719680))), name = tensor("text_encoder_text_model_encoder_layers_20_mlp_fc1_weight_to_fp16_palettized"), shape = tensor([4096, 1024])]; + tensor text_encoder_text_model_encoder_layers_20_mlp_fc1_bias_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(231719808))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(231721920))), name = tensor("text_encoder_text_model_encoder_layers_20_mlp_fc1_bias_to_fp16_palettized"), shape = tensor([4096])]; + tensor linear_124_cast = linear(bias = text_encoder_text_model_encoder_layers_20_mlp_fc1_bias_to_fp16_palettized, weight = text_encoder_text_model_encoder_layers_20_mlp_fc1_weight_to_fp16_palettized, x = input_333_cast)[name = tensor("linear_124_cast")]; + tensor input_337_mode_0 = const()[name = tensor("input_337_mode_0"), val = tensor("EXACT")]; + tensor input_337_cast = gelu(mode = input_337_mode_0, x = linear_124_cast)[name = tensor("input_337_cast")]; + tensor text_encoder_text_model_encoder_layers_20_mlp_fc2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(231722048))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(233819264))), name = tensor("text_encoder_text_model_encoder_layers_20_mlp_fc2_weight_to_fp16_palettized"), shape = tensor([1024, 4096])]; + tensor text_encoder_text_model_encoder_layers_20_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_20_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(233819392)))]; + tensor linear_125_cast = linear(bias = text_encoder_text_model_encoder_layers_20_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_20_mlp_fc2_weight_to_fp16_palettized, x = input_337_cast)[name = tensor("linear_125_cast")]; + tensor input_339_cast = add(x = input_331_cast, y = linear_125_cast)[name = tensor("input_339_cast")]; + tensor hidden_states_127_axes_0 = const()[name = tensor("hidden_states_127_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_21_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_21_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(233821504)))]; + tensor text_encoder_text_model_encoder_layers_21_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_21_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(233823616)))]; + tensor hidden_states_127_cast = layer_norm(axes = hidden_states_127_axes_0, beta = text_encoder_text_model_encoder_layers_21_layer_norm1_bias_to_fp16, epsilon = var_15_to_fp16, gamma = text_encoder_text_model_encoder_layers_21_layer_norm1_weight_to_fp16, x = input_339_cast)[name = tensor("hidden_states_127_cast")]; + tensor text_encoder_text_model_encoder_layers_21_self_attn_q_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(233825728))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(234350080))), name = tensor("text_encoder_text_model_encoder_layers_21_self_attn_q_proj_weight_to_fp16_palettized"), shape = tensor([1024, 1024])]; + tensor text_encoder_text_model_encoder_layers_21_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_21_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(234350208)))]; + tensor linear_126_cast = linear(bias = text_encoder_text_model_encoder_layers_21_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_21_self_attn_q_proj_weight_to_fp16_palettized, x = hidden_states_127_cast)[name = tensor("linear_126_cast")]; + tensor var_1957_to_fp16 = const()[name = tensor("op_1957_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_131_cast = mul(x = linear_126_cast, y = var_1957_to_fp16)[name = tensor("tensor_131_cast")]; + tensor text_encoder_text_model_encoder_layers_21_self_attn_k_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(234352320))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(234876672))), name = tensor("text_encoder_text_model_encoder_layers_21_self_attn_k_proj_weight_to_fp16_palettized"), shape = tensor([1024, 1024])]; + tensor text_encoder_text_model_encoder_layers_21_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_21_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(234876800)))]; + tensor linear_127_cast = linear(bias = text_encoder_text_model_encoder_layers_21_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_21_self_attn_k_proj_weight_to_fp16_palettized, x = hidden_states_127_cast)[name = tensor("linear_127_cast")]; + tensor var_1962 = const()[name = tensor("op_1962"), val = tensor([1, -1, 16, 64])]; + tensor var_1963_cast = reshape(shape = var_1962, x = linear_127_cast)[name = tensor("op_1963_cast")]; + tensor var_1964_perm_0 = const()[name = tensor("op_1964_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_21_self_attn_v_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(234878912))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(235403264))), name = tensor("text_encoder_text_model_encoder_layers_21_self_attn_v_proj_weight_to_fp16_palettized"), shape = tensor([1024, 1024])]; + tensor text_encoder_text_model_encoder_layers_21_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_21_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(235403392)))]; + tensor linear_128_cast = linear(bias = text_encoder_text_model_encoder_layers_21_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_21_self_attn_v_proj_weight_to_fp16_palettized, x = hidden_states_127_cast)[name = tensor("linear_128_cast")]; + tensor var_1969 = const()[name = tensor("op_1969"), val = tensor([1, -1, 16, 64])]; + tensor var_1970_cast = reshape(shape = var_1969, x = linear_128_cast)[name = tensor("op_1970_cast")]; + tensor var_1971_perm_0 = const()[name = tensor("op_1971_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1978 = const()[name = tensor("op_1978"), val = tensor([1, 77, 16, 64])]; + tensor var_1979_cast = reshape(shape = var_1978, x = tensor_131_cast)[name = tensor("op_1979_cast")]; + tensor var_1980_perm_0 = const()[name = tensor("op_1980_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1982 = const()[name = tensor("op_1982"), val = tensor([16, -1, 64])]; + tensor transpose_9 = transpose(perm = var_1980_perm_0, x = var_1979_cast)[name = tensor("transpose_9")]; + tensor query_states_43_cast = reshape(shape = var_1982, x = transpose_9)[name = tensor("query_states_43_cast")]; + tensor var_1984 = const()[name = tensor("op_1984"), val = tensor([16, -1, 64])]; + tensor transpose_8 = transpose(perm = var_1964_perm_0, x = var_1963_cast)[name = tensor("transpose_8")]; + tensor key_states_87_cast = reshape(shape = var_1984, x = transpose_8)[name = tensor("key_states_87_cast")]; + tensor var_1986 = const()[name = tensor("op_1986"), val = tensor([16, -1, 64])]; + tensor transpose_7 = transpose(perm = var_1971_perm_0, x = var_1970_cast)[name = tensor("transpose_7")]; + tensor value_states_87_cast = reshape(shape = var_1986, x = transpose_7)[name = tensor("value_states_87_cast")]; + tensor var_1989_perm_0 = const()[name = tensor("op_1989_perm_0"), val = tensor([0, 2, 1])]; + tensor attn_weights_127_transpose_x_0 = const()[name = tensor("attn_weights_127_transpose_x_0"), val = tensor(false)]; + tensor attn_weights_127_transpose_y_0 = const()[name = tensor("attn_weights_127_transpose_y_0"), val = tensor(false)]; + tensor transpose_6 = transpose(perm = var_1989_perm_0, x = key_states_87_cast)[name = tensor("transpose_6")]; + tensor attn_weights_127_cast = matmul(transpose_x = attn_weights_127_transpose_x_0, transpose_y = attn_weights_127_transpose_y_0, x = query_states_43_cast, y = transpose_6)[name = tensor("attn_weights_127_cast")]; + tensor var_1991 = const()[name = tensor("op_1991"), val = tensor([1, 16, 77, 77])]; + tensor var_1992_cast = reshape(shape = var_1991, x = attn_weights_127_cast)[name = tensor("op_1992_cast")]; + tensor attn_weights_129_cast = add(x = var_1992_cast, y = op_57_to_fp16_palettized)[name = tensor("attn_weights_129_cast")]; + tensor var_1997 = const()[name = tensor("op_1997"), val = tensor([16, 77, 77])]; + tensor input_341_cast = reshape(shape = var_1997, x = attn_weights_129_cast)[name = tensor("input_341_cast")]; + tensor input_343_cast = softmax(axis = var_5, x = input_341_cast)[name = tensor("input_343_cast")]; + tensor attn_output_127_transpose_x_0 = const()[name = tensor("attn_output_127_transpose_x_0"), val = tensor(false)]; + tensor attn_output_127_transpose_y_0 = const()[name = tensor("attn_output_127_transpose_y_0"), val = tensor(false)]; + tensor attn_output_127_cast = matmul(transpose_x = attn_output_127_transpose_x_0, transpose_y = attn_output_127_transpose_y_0, x = input_343_cast, y = value_states_87_cast)[name = tensor("attn_output_127_cast")]; + tensor var_2002 = const()[name = tensor("op_2002"), val = tensor([1, 16, 77, 64])]; + tensor attn_output_129_cast = reshape(shape = var_2002, x = attn_output_127_cast)[name = tensor("attn_output_129_cast")]; + tensor attn_output_131_perm_0 = const()[name = tensor("attn_output_131_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_2005 = const()[name = tensor("op_2005"), val = tensor([1, 77, 1024])]; + tensor transpose_5 = transpose(perm = attn_output_131_perm_0, x = attn_output_129_cast)[name = tensor("transpose_5")]; + tensor input_345_cast = reshape(shape = var_2005, x = transpose_5)[name = tensor("input_345_cast")]; + tensor text_encoder_text_model_encoder_layers_21_self_attn_out_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(235405504))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(235929856))), name = tensor("text_encoder_text_model_encoder_layers_21_self_attn_out_proj_weight_to_fp16_palettized"), shape = tensor([1024, 1024])]; + tensor text_encoder_text_model_encoder_layers_21_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_21_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(235929984)))]; + tensor linear_129_cast = linear(bias = text_encoder_text_model_encoder_layers_21_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_21_self_attn_out_proj_weight_to_fp16_palettized, x = input_345_cast)[name = tensor("linear_129_cast")]; + tensor input_347_cast = add(x = input_339_cast, y = linear_129_cast)[name = tensor("input_347_cast")]; + tensor input_349_axes_0 = const()[name = tensor("input_349_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_21_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_21_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(235932096)))]; + tensor text_encoder_text_model_encoder_layers_21_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_21_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(235934208)))]; + tensor input_349_cast = layer_norm(axes = input_349_axes_0, beta = text_encoder_text_model_encoder_layers_21_layer_norm2_bias_to_fp16, epsilon = var_15_to_fp16, gamma = text_encoder_text_model_encoder_layers_21_layer_norm2_weight_to_fp16, x = input_347_cast)[name = tensor("input_349_cast")]; + tensor text_encoder_text_model_encoder_layers_21_mlp_fc1_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(235936320))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(238033536))), name = tensor("text_encoder_text_model_encoder_layers_21_mlp_fc1_weight_to_fp16_palettized"), shape = tensor([4096, 1024])]; + tensor text_encoder_text_model_encoder_layers_21_mlp_fc1_bias_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(238033664))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(238035776))), name = tensor("text_encoder_text_model_encoder_layers_21_mlp_fc1_bias_to_fp16_palettized"), shape = tensor([4096])]; + tensor linear_130_cast = linear(bias = text_encoder_text_model_encoder_layers_21_mlp_fc1_bias_to_fp16_palettized, weight = text_encoder_text_model_encoder_layers_21_mlp_fc1_weight_to_fp16_palettized, x = input_349_cast)[name = tensor("linear_130_cast")]; + tensor input_353_mode_0 = const()[name = tensor("input_353_mode_0"), val = tensor("EXACT")]; + tensor input_353_cast = gelu(mode = input_353_mode_0, x = linear_130_cast)[name = tensor("input_353_cast")]; + tensor text_encoder_text_model_encoder_layers_21_mlp_fc2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(238035904))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(240133120))), name = tensor("text_encoder_text_model_encoder_layers_21_mlp_fc2_weight_to_fp16_palettized"), shape = tensor([1024, 4096])]; + tensor text_encoder_text_model_encoder_layers_21_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_21_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(240133248)))]; + tensor linear_131_cast = linear(bias = text_encoder_text_model_encoder_layers_21_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_21_mlp_fc2_weight_to_fp16_palettized, x = input_353_cast)[name = tensor("linear_131_cast")]; + tensor input_355_cast = add(x = input_347_cast, y = linear_131_cast)[name = tensor("input_355_cast")]; + tensor hidden_states_133_axes_0 = const()[name = tensor("hidden_states_133_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_22_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_22_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(240135360)))]; + tensor text_encoder_text_model_encoder_layers_22_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_22_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(240137472)))]; + tensor hidden_states_133_cast = layer_norm(axes = hidden_states_133_axes_0, beta = text_encoder_text_model_encoder_layers_22_layer_norm1_bias_to_fp16, epsilon = var_15_to_fp16, gamma = text_encoder_text_model_encoder_layers_22_layer_norm1_weight_to_fp16, x = input_355_cast)[name = tensor("hidden_states_133_cast")]; + tensor text_encoder_text_model_encoder_layers_22_self_attn_q_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(240139584))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(240663936))), name = tensor("text_encoder_text_model_encoder_layers_22_self_attn_q_proj_weight_to_fp16_palettized"), shape = tensor([1024, 1024])]; + tensor text_encoder_text_model_encoder_layers_22_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_22_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(240664064)))]; + tensor linear_132_cast = linear(bias = text_encoder_text_model_encoder_layers_22_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_22_self_attn_q_proj_weight_to_fp16_palettized, x = hidden_states_133_cast)[name = tensor("linear_132_cast")]; + tensor var_2044_to_fp16 = const()[name = tensor("op_2044_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_cast = mul(x = linear_132_cast, y = var_2044_to_fp16)[name = tensor("tensor_cast")]; + tensor text_encoder_text_model_encoder_layers_22_self_attn_k_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(240666176))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(241190528))), name = tensor("text_encoder_text_model_encoder_layers_22_self_attn_k_proj_weight_to_fp16_palettized"), shape = tensor([1024, 1024])]; + tensor text_encoder_text_model_encoder_layers_22_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_22_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(241190656)))]; + tensor linear_133_cast = linear(bias = text_encoder_text_model_encoder_layers_22_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_22_self_attn_k_proj_weight_to_fp16_palettized, x = hidden_states_133_cast)[name = tensor("linear_133_cast")]; + tensor var_2049 = const()[name = tensor("op_2049"), val = tensor([1, -1, 16, 64])]; + tensor var_2050_cast = reshape(shape = var_2049, x = linear_133_cast)[name = tensor("op_2050_cast")]; + tensor var_2051_perm_0 = const()[name = tensor("op_2051_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_22_self_attn_v_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(241192768))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(241717120))), name = tensor("text_encoder_text_model_encoder_layers_22_self_attn_v_proj_weight_to_fp16_palettized"), shape = tensor([1024, 1024])]; + tensor text_encoder_text_model_encoder_layers_22_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_22_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(241717248)))]; + tensor linear_134_cast = linear(bias = text_encoder_text_model_encoder_layers_22_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_22_self_attn_v_proj_weight_to_fp16_palettized, x = hidden_states_133_cast)[name = tensor("linear_134_cast")]; + tensor var_2056 = const()[name = tensor("op_2056"), val = tensor([1, -1, 16, 64])]; + tensor var_2057_cast = reshape(shape = var_2056, x = linear_134_cast)[name = tensor("op_2057_cast")]; + tensor var_2058_perm_0 = const()[name = tensor("op_2058_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_2065 = const()[name = tensor("op_2065"), val = tensor([1, 77, 16, 64])]; + tensor var_2066_cast = reshape(shape = var_2065, x = tensor_cast)[name = tensor("op_2066_cast")]; + tensor var_2067_perm_0 = const()[name = tensor("op_2067_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_2069 = const()[name = tensor("op_2069"), val = tensor([16, -1, 64])]; + tensor transpose_4 = transpose(perm = var_2067_perm_0, x = var_2066_cast)[name = tensor("transpose_4")]; + tensor query_states_cast = reshape(shape = var_2069, x = transpose_4)[name = tensor("query_states_cast")]; + tensor var_2071 = const()[name = tensor("op_2071"), val = tensor([16, -1, 64])]; + tensor transpose_3 = transpose(perm = var_2051_perm_0, x = var_2050_cast)[name = tensor("transpose_3")]; + tensor key_states_cast = reshape(shape = var_2071, x = transpose_3)[name = tensor("key_states_cast")]; + tensor var_2073 = const()[name = tensor("op_2073"), val = tensor([16, -1, 64])]; + tensor transpose_2 = transpose(perm = var_2058_perm_0, x = var_2057_cast)[name = tensor("transpose_2")]; + tensor value_states_cast = reshape(shape = var_2073, x = transpose_2)[name = tensor("value_states_cast")]; + tensor var_2076_perm_0 = const()[name = tensor("op_2076_perm_0"), val = tensor([0, 2, 1])]; + tensor attn_weights_133_transpose_x_0 = const()[name = tensor("attn_weights_133_transpose_x_0"), val = tensor(false)]; + tensor attn_weights_133_transpose_y_0 = const()[name = tensor("attn_weights_133_transpose_y_0"), val = tensor(false)]; + tensor transpose_1 = transpose(perm = var_2076_perm_0, x = key_states_cast)[name = tensor("transpose_1")]; + tensor attn_weights_133_cast = matmul(transpose_x = attn_weights_133_transpose_x_0, transpose_y = attn_weights_133_transpose_y_0, x = query_states_cast, y = transpose_1)[name = tensor("attn_weights_133_cast")]; + tensor var_2078 = const()[name = tensor("op_2078"), val = tensor([1, 16, 77, 77])]; + tensor var_2079_cast = reshape(shape = var_2078, x = attn_weights_133_cast)[name = tensor("op_2079_cast")]; + tensor attn_weights_135_cast = add(x = var_2079_cast, y = op_57_to_fp16_palettized)[name = tensor("attn_weights_135_cast")]; + tensor var_2084 = const()[name = tensor("op_2084"), val = tensor([16, 77, 77])]; + tensor input_357_cast = reshape(shape = var_2084, x = attn_weights_135_cast)[name = tensor("input_357_cast")]; + tensor input_359_cast = softmax(axis = var_5, x = input_357_cast)[name = tensor("input_359_cast")]; + tensor attn_output_133_transpose_x_0 = const()[name = tensor("attn_output_133_transpose_x_0"), val = tensor(false)]; + tensor attn_output_133_transpose_y_0 = const()[name = tensor("attn_output_133_transpose_y_0"), val = tensor(false)]; + tensor attn_output_133_cast = matmul(transpose_x = attn_output_133_transpose_x_0, transpose_y = attn_output_133_transpose_y_0, x = input_359_cast, y = value_states_cast)[name = tensor("attn_output_133_cast")]; + tensor var_2089 = const()[name = tensor("op_2089"), val = tensor([1, 16, 77, 64])]; + tensor attn_output_135_cast = reshape(shape = var_2089, x = attn_output_133_cast)[name = tensor("attn_output_135_cast")]; + tensor attn_output_perm_0 = const()[name = tensor("attn_output_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_2092 = const()[name = tensor("op_2092"), val = tensor([1, 77, 1024])]; + tensor transpose_0 = transpose(perm = attn_output_perm_0, x = attn_output_135_cast)[name = tensor("transpose_0")]; + tensor input_361_cast = reshape(shape = var_2092, x = transpose_0)[name = tensor("input_361_cast")]; + tensor text_encoder_text_model_encoder_layers_22_self_attn_out_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(241719360))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(242243712))), name = tensor("text_encoder_text_model_encoder_layers_22_self_attn_out_proj_weight_to_fp16_palettized"), shape = tensor([1024, 1024])]; + tensor text_encoder_text_model_encoder_layers_22_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_22_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(242243840)))]; + tensor linear_135_cast = linear(bias = text_encoder_text_model_encoder_layers_22_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_22_self_attn_out_proj_weight_to_fp16_palettized, x = input_361_cast)[name = tensor("linear_135_cast")]; + tensor input_363_cast = add(x = input_355_cast, y = linear_135_cast)[name = tensor("input_363_cast")]; + tensor input_365_axes_0 = const()[name = tensor("input_365_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_22_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_22_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(242245952)))]; + tensor text_encoder_text_model_encoder_layers_22_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_22_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(242248064)))]; + tensor input_365_cast = layer_norm(axes = input_365_axes_0, beta = text_encoder_text_model_encoder_layers_22_layer_norm2_bias_to_fp16, epsilon = var_15_to_fp16, gamma = text_encoder_text_model_encoder_layers_22_layer_norm2_weight_to_fp16, x = input_363_cast)[name = tensor("input_365_cast")]; + tensor text_encoder_text_model_encoder_layers_22_mlp_fc1_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(242250176))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(244347392))), name = tensor("text_encoder_text_model_encoder_layers_22_mlp_fc1_weight_to_fp16_palettized"), shape = tensor([4096, 1024])]; + tensor text_encoder_text_model_encoder_layers_22_mlp_fc1_bias_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(244347520))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(244349632))), name = tensor("text_encoder_text_model_encoder_layers_22_mlp_fc1_bias_to_fp16_palettized"), shape = tensor([4096])]; + tensor linear_136_cast = linear(bias = text_encoder_text_model_encoder_layers_22_mlp_fc1_bias_to_fp16_palettized, weight = text_encoder_text_model_encoder_layers_22_mlp_fc1_weight_to_fp16_palettized, x = input_365_cast)[name = tensor("linear_136_cast")]; + tensor input_369_mode_0 = const()[name = tensor("input_369_mode_0"), val = tensor("EXACT")]; + tensor input_369_cast = gelu(mode = input_369_mode_0, x = linear_136_cast)[name = tensor("input_369_cast")]; + tensor text_encoder_text_model_encoder_layers_22_mlp_fc2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(244349760))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(246446976))), name = tensor("text_encoder_text_model_encoder_layers_22_mlp_fc2_weight_to_fp16_palettized"), shape = tensor([1024, 4096])]; + tensor text_encoder_text_model_encoder_layers_22_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_22_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(246447104)))]; + tensor linear_137_cast = linear(bias = text_encoder_text_model_encoder_layers_22_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_22_mlp_fc2_weight_to_fp16_palettized, x = input_369_cast)[name = tensor("linear_137_cast")]; + tensor input_cast = add(x = input_363_cast, y = linear_137_cast)[name = tensor("input_cast")]; + tensor last_hidden_state_axes_0 = const()[name = tensor("last_hidden_state_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_final_layer_norm_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_final_layer_norm_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(246449216)))]; + tensor text_encoder_text_model_final_layer_norm_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_final_layer_norm_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(246451328)))]; + tensor last_hidden_state_cast = layer_norm(axes = last_hidden_state_axes_0, beta = text_encoder_text_model_final_layer_norm_bias_to_fp16, epsilon = var_15_to_fp16, gamma = text_encoder_text_model_final_layer_norm_weight_to_fp16, x = input_cast)[name = tensor("last_hidden_state_cast")]; + tensor last_hidden_state_cast_to_fp32_dtype_0 = const()[name = tensor("last_hidden_state_cast_to_fp32_dtype_0"), val = tensor("fp32")]; + tensor var_2117 = const()[name = tensor("op_2117"), val = tensor([0])]; + tensor var_2119 = reduce_argmax(axis = var_5, keep_dims = var_6, x = cast_2)[name = tensor("op_2119")]; + tensor stack_0_axis_0 = const()[name = tensor("stack_0_axis_0"), val = tensor(1)]; + tensor stack_0 = stack(axis = stack_0_axis_0, values = (var_2117, var_2119))[name = tensor("stack_0")]; + tensor var_2121_transpose_batch_dims_0 = const()[name = tensor("op_2121_transpose_batch_dims_0"), val = tensor(0)]; + tensor var_2121_transpose_cast = gather_nd(batch_dims = var_2121_transpose_batch_dims_0, indices = stack_0, x = last_hidden_state_cast)[name = tensor("op_2121_transpose_cast")]; + tensor var_2121_cast_to_fp32_dtype_0 = const()[name = tensor("op_2121_cast_to_fp32_dtype_0"), val = tensor("fp32")]; + tensor last_hidden_state = cast(dtype = last_hidden_state_cast_to_fp32_dtype_0, x = last_hidden_state_cast)[name = tensor("cast_0")]; + tensor pooled_outputs = cast(dtype = var_2121_cast_to_fp32_dtype_0, x = var_2121_transpose_cast)[name = tensor("cast_1")]; + } -> (last_hidden_state, pooled_outputs); +} \ No newline at end of file