lithium0003 commited on
Commit
ca32d55
·
1 Parent(s): 41fc0e9

initial commit

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. base/decoder_first.mlmodelc/analytics/coremldata.bin +3 -0
  2. base/decoder_first.mlmodelc/coremldata.bin +3 -0
  3. base/decoder_first.mlmodelc/metadata.json +106 -0
  4. base/decoder_first.mlmodelc/model.mil +369 -0
  5. base/decoder_first.mlmodelc/weights/weight.bin +3 -0
  6. base/decoder_second.mlmodelc/analytics/coremldata.bin +3 -0
  7. base/decoder_second.mlmodelc/coremldata.bin +3 -0
  8. base/decoder_second.mlmodelc/metadata.json +127 -0
  9. base/decoder_second.mlmodelc/model.mil +0 -0
  10. base/decoder_second.mlmodelc/weights/weight.bin +3 -0
  11. base/encoder.mlmodelc/analytics/coremldata.bin +3 -0
  12. base/encoder.mlmodelc/coremldata.bin +3 -0
  13. base/encoder.mlmodelc/metadata.json +69 -0
  14. base/encoder.mlmodelc/model.mil +384 -0
  15. base/encoder.mlmodelc/weights/weight.bin +3 -0
  16. base/model_dims.json +12 -0
  17. compile_model.sh +34 -0
  18. index/base +16 -0
  19. index/large-v2 +22 -0
  20. index/large-v3 +22 -0
  21. index/medium +16 -0
  22. index/small +16 -0
  23. index/tiny +16 -0
  24. large-v2/decoder_first.mlmodelc/analytics/coremldata.bin +3 -0
  25. large-v2/decoder_first.mlmodelc/coremldata.bin +3 -0
  26. large-v2/decoder_first.mlmodelc/metadata.json +106 -0
  27. large-v2/decoder_first.mlmodelc/model.mil +0 -0
  28. large-v2/decoder_first.mlmodelc/weights/weight.bin +3 -0
  29. large-v2/decoder_second.mlmodelc/analytics/coremldata.bin +3 -0
  30. large-v2/decoder_second.mlmodelc/coremldata.bin +3 -0
  31. large-v2/decoder_second.mlmodelc/metadata.json +127 -0
  32. large-v2/decoder_second.mlmodelc/model.mil +0 -0
  33. large-v2/decoder_second.mlmodelc/weights/weight.bin +3 -0
  34. large-v2/encoder.mlmodelc/analytics/coremldata.bin +3 -0
  35. large-v2/encoder.mlmodelc/coremldata.bin +3 -0
  36. large-v2/encoder.mlmodelc/metadata.json +76 -0
  37. large-v2/encoder.mlmodelc/model0/analytics/coremldata.bin +3 -0
  38. large-v2/encoder.mlmodelc/model0/coremldata.bin +3 -0
  39. large-v2/encoder.mlmodelc/model0/model.mil +0 -0
  40. large-v2/encoder.mlmodelc/model0/weights/0-weight.bin +3 -0
  41. large-v2/encoder.mlmodelc/model1/analytics/coremldata.bin +3 -0
  42. large-v2/encoder.mlmodelc/model1/coremldata.bin +3 -0
  43. large-v2/encoder.mlmodelc/model1/model.mil +0 -0
  44. large-v2/encoder.mlmodelc/model1/weights/1-weight.bin +3 -0
  45. large-v2/model_dims.json +12 -0
  46. large-v3/decoder_first.mlmodelc/analytics/coremldata.bin +3 -0
  47. large-v3/decoder_first.mlmodelc/coremldata.bin +3 -0
  48. large-v3/decoder_first.mlmodelc/metadata.json +106 -0
  49. large-v3/decoder_first.mlmodelc/model.mil +0 -0
  50. large-v3/decoder_first.mlmodelc/weights/weight.bin +3 -0
base/decoder_first.mlmodelc/analytics/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3071377562292da4d34bf9d0ddcfe168fd10c3b81d4689d25c207179d2d58578
3
+ size 243
base/decoder_first.mlmodelc/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0fbe1879a296cf22a0441826a3028ae2ec63bfc8e9ff019132681d2a93610324
3
+ size 453
base/decoder_first.mlmodelc/metadata.json ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "metadataOutputVersion" : "3.0",
4
+ "storagePrecision" : "Float16",
5
+ "outputSchema" : [
6
+ {
7
+ "hasShapeFlexibility" : "0",
8
+ "isOptional" : "0",
9
+ "dataType" : "Float16",
10
+ "formattedType" : "MultiArray (Float16)",
11
+ "shortDescription" : "",
12
+ "shape" : "[]",
13
+ "name" : "dummy",
14
+ "type" : "MultiArray"
15
+ }
16
+ ],
17
+ "modelParameters" : [
18
+
19
+ ],
20
+ "specificationVersion" : 9,
21
+ "mlProgramOperationTypeHistogram" : {
22
+ "Ios18.writeState" : 14,
23
+ "Shape" : 12,
24
+ "Ios18.linear" : 12,
25
+ "Identity" : 1,
26
+ "Ios18.gather" : 12,
27
+ "Ios18.concat" : 12,
28
+ "Ios18.sliceUpdate" : 14,
29
+ "Ios18.cast" : 24,
30
+ "Ios18.expandDims" : 12,
31
+ "Ios18.readState" : 14
32
+ },
33
+ "computePrecision" : "Mixed (Float16, Int16, Int32, UInt16)",
34
+ "isUpdatable" : "0",
35
+ "stateSchema" : [
36
+ {
37
+ "dataType" : "Float16",
38
+ "isOptional" : "0",
39
+ "formattedType" : "State (Float16 6 × 1 × 448 × 512)",
40
+ "shortDescription" : "",
41
+ "shape" : "[6, 1, 448, 512]",
42
+ "name" : "k_cache1",
43
+ "type" : "State"
44
+ },
45
+ {
46
+ "dataType" : "Float16",
47
+ "isOptional" : "0",
48
+ "formattedType" : "State (Float16 6 × 1 × 448 × 512)",
49
+ "shortDescription" : "",
50
+ "shape" : "[6, 1, 448, 512]",
51
+ "name" : "v_cache1",
52
+ "type" : "State"
53
+ },
54
+ {
55
+ "dataType" : "Float16",
56
+ "isOptional" : "0",
57
+ "formattedType" : "State (Float16 6 × 1 × 1500 × 512)",
58
+ "shortDescription" : "",
59
+ "shape" : "[6, 1, 1500, 512]",
60
+ "name" : "k_cache2",
61
+ "type" : "State"
62
+ },
63
+ {
64
+ "dataType" : "Float16",
65
+ "isOptional" : "0",
66
+ "formattedType" : "State (Float16 6 × 1 × 1500 × 512)",
67
+ "shortDescription" : "",
68
+ "shape" : "[6, 1, 1500, 512]",
69
+ "name" : "v_cache2",
70
+ "type" : "State"
71
+ }
72
+ ],
73
+ "availability" : {
74
+ "macOS" : "15.0",
75
+ "tvOS" : "18.0",
76
+ "visionOS" : "2.0",
77
+ "watchOS" : "11.0",
78
+ "iOS" : "18.0",
79
+ "macCatalyst" : "18.0"
80
+ },
81
+ "modelType" : {
82
+ "name" : "MLModelType_mlProgram"
83
+ },
84
+ "userDefinedMetadata" : {
85
+ "com.github.apple.coremltools.source_dialect" : "TorchScript",
86
+ "com.github.apple.coremltools.source" : "torch==2.4.1",
87
+ "com.github.apple.coremltools.version" : "8.0"
88
+ },
89
+ "inputSchema" : [
90
+ {
91
+ "dataType" : "Float16",
92
+ "hasShapeFlexibility" : "1",
93
+ "isOptional" : "0",
94
+ "shapeFlexibility" : "1 × 1...1500 × 512",
95
+ "shapeRange" : "[[1, 1], [1, 1500], [512, 512]]",
96
+ "formattedType" : "MultiArray (Float16 1 × 1 × 512)",
97
+ "type" : "MultiArray",
98
+ "shape" : "[1, 1, 512]",
99
+ "name" : "audio_data",
100
+ "shortDescription" : ""
101
+ }
102
+ ],
103
+ "generatedClassName" : "decoder_first",
104
+ "method" : "predict"
105
+ }
106
+ ]
base/decoder_first.mlmodelc/model.mil ADDED
@@ -0,0 +1,369 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ program(1.3)
2
+ [buildInfo = dict<string, string>({{"coremlc-component-MIL", "3400.43.1"}, {"coremlc-version", "3400.58.2"}, {"coremltools-component-torch", "2.4.1"}, {"coremltools-source-dialect", "TorchScript"}, {"coremltools-version", "8.0"}})]
3
+ {
4
+ func main<ios18>(tensor<fp16, [1, ?, 512]> audio_data, state<tensor<fp16, [6, 1, 448, 512]>> k_cache1, state<tensor<fp16, [6, 1, 1500, 512]>> k_cache2, state<tensor<fp16, [6, 1, 448, 512]>> v_cache1, state<tensor<fp16, [6, 1, 1500, 512]>> v_cache2) [FlexibleShapeInformation = tuple<tuple<string, dict<string, tensor<int32, [?]>>>, tuple<string, dict<string, list<tensor<int32, [2]>, ?>>>>((("DefaultShapes", {{"audio_data", [1, 1, 512]}}), ("RangeDims", {{"audio_data", [[1, 1], [1, 1500], [512, 512]]}})))] {
5
+ tensor<fp16, [1, ?, 512]> dummy = identity(x = audio_data)[name = string("identity_0")];
6
+ tensor<fp16, [6, 1, 448, 512]> read_state_0 = read_state(input = k_cache1)[name = string("read_state_0")];
7
+ tensor<int32, [4]> concat_0 = const()[name = string("concat_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
8
+ tensor<int32, [4]> concat_1 = const()[name = string("concat_1"), val = tensor<int32, [4]>([0, 0, 0, 0])];
9
+ tensor<int32, [4]> k_cache1_internal_tensor_assign_1_stride_0 = const()[name = string("k_cache1_internal_tensor_assign_1_stride_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
10
+ tensor<bool, [4]> k_cache1_internal_tensor_assign_1_begin_mask_0 = const()[name = string("k_cache1_internal_tensor_assign_1_begin_mask_0"), val = tensor<bool, [4]>([false, false, false, false])];
11
+ tensor<bool, [4]> k_cache1_internal_tensor_assign_1_end_mask_0 = const()[name = string("k_cache1_internal_tensor_assign_1_end_mask_0"), val = tensor<bool, [4]>([true, true, true, true])];
12
+ tensor<bool, [4]> k_cache1_internal_tensor_assign_1_squeeze_mask_0 = const()[name = string("k_cache1_internal_tensor_assign_1_squeeze_mask_0"), val = tensor<bool, [4]>([false, false, false, false])];
13
+ tensor<fp16, [6, 1, 448, 512]> const_0_to_fp16 = const()[name = string("const_0_to_fp16"), val = tensor<fp16, [6, 1, 448, 512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(64)))];
14
+ tensor<fp16, [6, 1, 448, 512]> k_cache1_internal_tensor_assign_1_cast_fp16 = slice_update(begin = concat_0, begin_mask = k_cache1_internal_tensor_assign_1_begin_mask_0, end = concat_1, end_mask = k_cache1_internal_tensor_assign_1_end_mask_0, squeeze_mask = k_cache1_internal_tensor_assign_1_squeeze_mask_0, stride = k_cache1_internal_tensor_assign_1_stride_0, update = const_0_to_fp16, x = read_state_0)[name = string("k_cache1_internal_tensor_assign_1_cast_fp16")];
15
+ write_state(data = k_cache1_internal_tensor_assign_1_cast_fp16, input = k_cache1)[name = string("coreml_update_state_14_write_state")];
16
+ tensor<fp16, [6, 1, 448, 512]> read_state_1 = read_state(input = v_cache1)[name = string("read_state_1")];
17
+ tensor<int32, [4]> concat_2 = const()[name = string("concat_2"), val = tensor<int32, [4]>([0, 0, 0, 0])];
18
+ tensor<int32, [4]> concat_3 = const()[name = string("concat_3"), val = tensor<int32, [4]>([0, 0, 0, 0])];
19
+ tensor<int32, [4]> v_cache1_internal_tensor_assign_1_stride_0 = const()[name = string("v_cache1_internal_tensor_assign_1_stride_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
20
+ tensor<bool, [4]> v_cache1_internal_tensor_assign_1_begin_mask_0 = const()[name = string("v_cache1_internal_tensor_assign_1_begin_mask_0"), val = tensor<bool, [4]>([false, false, false, false])];
21
+ tensor<bool, [4]> v_cache1_internal_tensor_assign_1_end_mask_0 = const()[name = string("v_cache1_internal_tensor_assign_1_end_mask_0"), val = tensor<bool, [4]>([true, true, true, true])];
22
+ tensor<bool, [4]> v_cache1_internal_tensor_assign_1_squeeze_mask_0 = const()[name = string("v_cache1_internal_tensor_assign_1_squeeze_mask_0"), val = tensor<bool, [4]>([false, false, false, false])];
23
+ tensor<fp16, [6, 1, 448, 512]> v_cache1_internal_tensor_assign_1_cast_fp16 = slice_update(begin = concat_2, begin_mask = v_cache1_internal_tensor_assign_1_begin_mask_0, end = concat_3, end_mask = v_cache1_internal_tensor_assign_1_end_mask_0, squeeze_mask = v_cache1_internal_tensor_assign_1_squeeze_mask_0, stride = v_cache1_internal_tensor_assign_1_stride_0, update = const_0_to_fp16, x = read_state_1)[name = string("v_cache1_internal_tensor_assign_1_cast_fp16")];
24
+ write_state(data = v_cache1_internal_tensor_assign_1_cast_fp16, input = v_cache1)[name = string("coreml_update_state_15_write_state")];
25
+ tensor<fp16, [6, 1, 1500, 512]> read_state_2 = read_state(input = k_cache2)[name = string("read_state_2")];
26
+ tensor<fp16, [6, 1, 1500, 512]> read_state_3 = read_state(input = v_cache2)[name = string("read_state_3")];
27
+ tensor<fp16, [512, 512]> var_79_to_fp16 = const()[name = string("op_79_to_fp16"), val = tensor<fp16, [512, 512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(2752640)))];
28
+ tensor<fp16, [512]> linear_0_bias_0_to_fp16 = const()[name = string("linear_0_bias_0_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(3276992)))];
29
+ tensor<fp16, [1, ?, 512]> linear_0_cast_fp16 = linear(bias = linear_0_bias_0_to_fp16, weight = var_79_to_fp16, x = audio_data)[name = string("linear_0_cast_fp16")];
30
+ tensor<fp16, [512, 512]> var_83_to_fp16 = const()[name = string("op_83_to_fp16"), val = tensor<fp16, [512, 512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(3278080)))];
31
+ tensor<fp16, [512]> var_84_to_fp16 = const()[name = string("op_84_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(3802432)))];
32
+ tensor<fp16, [1, ?, 512]> linear_1_cast_fp16 = linear(bias = var_84_to_fp16, weight = var_83_to_fp16, x = audio_data)[name = string("linear_1_cast_fp16")];
33
+ tensor<int32, [3]> var_86_shape_cast_fp16 = shape(x = linear_0_cast_fp16)[name = string("op_86_shape_cast_fp16")];
34
+ int32 gather_0_axis_0 = const()[name = string("gather_0_axis_0"), val = int32(0)];
35
+ int32 gather_0_batch_dims_0 = const()[name = string("gather_0_batch_dims_0"), val = int32(0)];
36
+ bool gather_0_validate_indices_0 = const()[name = string("gather_0_validate_indices_0"), val = bool(false)];
37
+ string var_86_shape_cast_fp16_to_int16_dtype_0 = const()[name = string("op_86_shape_cast_fp16_to_int16_dtype_0"), val = string("int16")];
38
+ uint16 select_0_to_uint16 = const()[name = string("select_0_to_uint16"), val = uint16(1)];
39
+ tensor<int16, [3]> var_86_shape_cast_fp16_to_int16 = cast(dtype = var_86_shape_cast_fp16_to_int16_dtype_0, x = var_86_shape_cast_fp16)[name = string("cast_43")];
40
+ int16 gather_0_cast_uint16 = gather(axis = gather_0_axis_0, batch_dims = gather_0_batch_dims_0, indices = select_0_to_uint16, validate_indices = gather_0_validate_indices_0, x = var_86_shape_cast_fp16_to_int16)[name = string("gather_0_cast_uint16")];
41
+ string gather_0_cast_uint16_to_int32_dtype_0 = const()[name = string("gather_0_cast_uint16_to_int32_dtype_0"), val = string("int32")];
42
+ tensor<int32, [1]> expand_dims_11_axes_0 = const()[name = string("expand_dims_11_axes_0"), val = tensor<int32, [1]>([0])];
43
+ int32 gather_0_cast_uint16_to_int32 = cast(dtype = gather_0_cast_uint16_to_int32_dtype_0, x = gather_0_cast_uint16)[name = string("cast_42")];
44
+ tensor<int32, [1]> expand_dims_11 = expand_dims(axes = expand_dims_11_axes_0, x = gather_0_cast_uint16_to_int32)[name = string("expand_dims_11")];
45
+ tensor<int32, [4]> concat_5 = const()[name = string("concat_5"), val = tensor<int32, [4]>([0, 0, 0, 0])];
46
+ tensor<int32, [1]> concat_6_values0_0 = const()[name = string("concat_6_values0_0"), val = tensor<int32, [1]>([0])];
47
+ tensor<int32, [1]> concat_6_values1_0 = const()[name = string("concat_6_values1_0"), val = tensor<int32, [1]>([0])];
48
+ tensor<int32, [1]> concat_6_values3_0 = const()[name = string("concat_6_values3_0"), val = tensor<int32, [1]>([0])];
49
+ int32 concat_6_axis_0 = const()[name = string("concat_6_axis_0"), val = int32(0)];
50
+ bool concat_6_interleave_0 = const()[name = string("concat_6_interleave_0"), val = bool(false)];
51
+ tensor<int32, [4]> concat_6 = concat(axis = concat_6_axis_0, interleave = concat_6_interleave_0, values = (concat_6_values0_0, concat_6_values1_0, expand_dims_11, concat_6_values3_0))[name = string("concat_6")];
52
+ tensor<int32, [4]> k_cache2_internal_tensor_assign_1_stride_0 = const()[name = string("k_cache2_internal_tensor_assign_1_stride_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
53
+ tensor<bool, [4]> k_cache2_internal_tensor_assign_1_begin_mask_0 = const()[name = string("k_cache2_internal_tensor_assign_1_begin_mask_0"), val = tensor<bool, [4]>([false, false, false, false])];
54
+ tensor<bool, [4]> k_cache2_internal_tensor_assign_1_end_mask_0 = const()[name = string("k_cache2_internal_tensor_assign_1_end_mask_0"), val = tensor<bool, [4]>([false, true, false, true])];
55
+ tensor<bool, [4]> k_cache2_internal_tensor_assign_1_squeeze_mask_0 = const()[name = string("k_cache2_internal_tensor_assign_1_squeeze_mask_0"), val = tensor<bool, [4]>([true, false, false, false])];
56
+ tensor<fp16, [6, 1, 1500, 512]> k_cache2_internal_tensor_assign_1_cast_fp16 = slice_update(begin = concat_5, begin_mask = k_cache2_internal_tensor_assign_1_begin_mask_0, end = concat_6, end_mask = k_cache2_internal_tensor_assign_1_end_mask_0, squeeze_mask = k_cache2_internal_tensor_assign_1_squeeze_mask_0, stride = k_cache2_internal_tensor_assign_1_stride_0, update = linear_0_cast_fp16, x = read_state_2)[name = string("k_cache2_internal_tensor_assign_1_cast_fp16")];
57
+ write_state(data = k_cache2_internal_tensor_assign_1_cast_fp16, input = k_cache2)[name = string("coreml_update_state_16_write_state")];
58
+ tensor<fp16, [6, 1, 1500, 512]> coreml_update_state_16 = read_state(input = k_cache2)[name = string("coreml_update_state_16")];
59
+ tensor<int32, [3]> var_91_shape_cast_fp16 = shape(x = linear_1_cast_fp16)[name = string("op_91_shape_cast_fp16")];
60
+ int32 gather_1_axis_0 = const()[name = string("gather_1_axis_0"), val = int32(0)];
61
+ int32 gather_1_batch_dims_0 = const()[name = string("gather_1_batch_dims_0"), val = int32(0)];
62
+ bool gather_1_validate_indices_0 = const()[name = string("gather_1_validate_indices_0"), val = bool(false)];
63
+ string var_91_shape_cast_fp16_to_uint16_dtype_0 = const()[name = string("op_91_shape_cast_fp16_to_uint16_dtype_0"), val = string("uint16")];
64
+ uint16 select_1_to_uint16 = const()[name = string("select_1_to_uint16"), val = uint16(1)];
65
+ tensor<uint16, [3]> var_91_shape_cast_fp16_to_uint16 = cast(dtype = var_91_shape_cast_fp16_to_uint16_dtype_0, x = var_91_shape_cast_fp16)[name = string("cast_41")];
66
+ uint16 gather_1_cast_uint16 = gather(axis = gather_1_axis_0, batch_dims = gather_1_batch_dims_0, indices = select_1_to_uint16, validate_indices = gather_1_validate_indices_0, x = var_91_shape_cast_fp16_to_uint16)[name = string("gather_1_cast_uint16")];
67
+ string gather_1_cast_uint16_to_int32_dtype_0 = const()[name = string("gather_1_cast_uint16_to_int32_dtype_0"), val = string("int32")];
68
+ tensor<int32, [1]> expand_dims_15_axes_0 = const()[name = string("expand_dims_15_axes_0"), val = tensor<int32, [1]>([0])];
69
+ int32 gather_1_cast_uint16_to_int32 = cast(dtype = gather_1_cast_uint16_to_int32_dtype_0, x = gather_1_cast_uint16)[name = string("cast_40")];
70
+ tensor<int32, [1]> expand_dims_15 = expand_dims(axes = expand_dims_15_axes_0, x = gather_1_cast_uint16_to_int32)[name = string("expand_dims_15")];
71
+ tensor<int32, [4]> concat_8 = const()[name = string("concat_8"), val = tensor<int32, [4]>([0, 0, 0, 0])];
72
+ tensor<int32, [1]> concat_9_values0_0 = const()[name = string("concat_9_values0_0"), val = tensor<int32, [1]>([0])];
73
+ tensor<int32, [1]> concat_9_values1_0 = const()[name = string("concat_9_values1_0"), val = tensor<int32, [1]>([0])];
74
+ tensor<int32, [1]> concat_9_values3_0 = const()[name = string("concat_9_values3_0"), val = tensor<int32, [1]>([0])];
75
+ int32 concat_9_axis_0 = const()[name = string("concat_9_axis_0"), val = int32(0)];
76
+ bool concat_9_interleave_0 = const()[name = string("concat_9_interleave_0"), val = bool(false)];
77
+ tensor<int32, [4]> concat_9 = concat(axis = concat_9_axis_0, interleave = concat_9_interleave_0, values = (concat_9_values0_0, concat_9_values1_0, expand_dims_15, concat_9_values3_0))[name = string("concat_9")];
78
+ tensor<int32, [4]> v_cache2_internal_tensor_assign_1_stride_0 = const()[name = string("v_cache2_internal_tensor_assign_1_stride_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
79
+ tensor<bool, [4]> v_cache2_internal_tensor_assign_1_begin_mask_0 = const()[name = string("v_cache2_internal_tensor_assign_1_begin_mask_0"), val = tensor<bool, [4]>([false, false, false, false])];
80
+ tensor<bool, [4]> v_cache2_internal_tensor_assign_1_end_mask_0 = const()[name = string("v_cache2_internal_tensor_assign_1_end_mask_0"), val = tensor<bool, [4]>([false, true, false, true])];
81
+ tensor<bool, [4]> v_cache2_internal_tensor_assign_1_squeeze_mask_0 = const()[name = string("v_cache2_internal_tensor_assign_1_squeeze_mask_0"), val = tensor<bool, [4]>([true, false, false, false])];
82
+ tensor<fp16, [6, 1, 1500, 512]> v_cache2_internal_tensor_assign_1_cast_fp16 = slice_update(begin = concat_8, begin_mask = v_cache2_internal_tensor_assign_1_begin_mask_0, end = concat_9, end_mask = v_cache2_internal_tensor_assign_1_end_mask_0, squeeze_mask = v_cache2_internal_tensor_assign_1_squeeze_mask_0, stride = v_cache2_internal_tensor_assign_1_stride_0, update = linear_1_cast_fp16, x = read_state_3)[name = string("v_cache2_internal_tensor_assign_1_cast_fp16")];
83
+ write_state(data = v_cache2_internal_tensor_assign_1_cast_fp16, input = v_cache2)[name = string("coreml_update_state_17_write_state")];
84
+ tensor<fp16, [6, 1, 1500, 512]> coreml_update_state_17 = read_state(input = v_cache2)[name = string("coreml_update_state_17")];
85
+ tensor<fp16, [512, 512]> var_113_to_fp16 = const()[name = string("op_113_to_fp16"), val = tensor<fp16, [512, 512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(3803520)))];
86
+ tensor<fp16, [1, ?, 512]> linear_2_cast_fp16 = linear(bias = linear_0_bias_0_to_fp16, weight = var_113_to_fp16, x = audio_data)[name = string("linear_2_cast_fp16")];
87
+ tensor<fp16, [512, 512]> var_117_to_fp16 = const()[name = string("op_117_to_fp16"), val = tensor<fp16, [512, 512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(4327872)))];
88
+ tensor<fp16, [512]> var_118_to_fp16 = const()[name = string("op_118_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(4852224)))];
89
+ tensor<fp16, [1, ?, 512]> linear_3_cast_fp16 = linear(bias = var_118_to_fp16, weight = var_117_to_fp16, x = audio_data)[name = string("linear_3_cast_fp16")];
90
+ tensor<int32, [3]> var_120_shape_cast_fp16 = shape(x = linear_2_cast_fp16)[name = string("op_120_shape_cast_fp16")];
91
+ int32 gather_2_axis_0 = const()[name = string("gather_2_axis_0"), val = int32(0)];
92
+ int32 gather_2_batch_dims_0 = const()[name = string("gather_2_batch_dims_0"), val = int32(0)];
93
+ bool gather_2_validate_indices_0 = const()[name = string("gather_2_validate_indices_0"), val = bool(false)];
94
+ string var_120_shape_cast_fp16_to_uint16_dtype_0 = const()[name = string("op_120_shape_cast_fp16_to_uint16_dtype_0"), val = string("uint16")];
95
+ uint16 select_2_to_uint16 = const()[name = string("select_2_to_uint16"), val = uint16(1)];
96
+ tensor<uint16, [3]> var_120_shape_cast_fp16_to_uint16 = cast(dtype = var_120_shape_cast_fp16_to_uint16_dtype_0, x = var_120_shape_cast_fp16)[name = string("cast_39")];
97
+ uint16 gather_2_cast_uint16 = gather(axis = gather_2_axis_0, batch_dims = gather_2_batch_dims_0, indices = select_2_to_uint16, validate_indices = gather_2_validate_indices_0, x = var_120_shape_cast_fp16_to_uint16)[name = string("gather_2_cast_uint16")];
98
+ string gather_2_cast_uint16_to_int32_dtype_0 = const()[name = string("gather_2_cast_uint16_to_int32_dtype_0"), val = string("int32")];
99
+ tensor<int32, [1]> expand_dims_19_axes_0 = const()[name = string("expand_dims_19_axes_0"), val = tensor<int32, [1]>([0])];
100
+ int32 gather_2_cast_uint16_to_int32 = cast(dtype = gather_2_cast_uint16_to_int32_dtype_0, x = gather_2_cast_uint16)[name = string("cast_38")];
101
+ tensor<int32, [1]> expand_dims_19 = expand_dims(axes = expand_dims_19_axes_0, x = gather_2_cast_uint16_to_int32)[name = string("expand_dims_19")];
102
+ tensor<int32, [4]> concat_11 = const()[name = string("concat_11"), val = tensor<int32, [4]>([1, 0, 0, 0])];
103
+ tensor<int32, [1]> concat_12_values0_0 = const()[name = string("concat_12_values0_0"), val = tensor<int32, [1]>([0])];
104
+ tensor<int32, [1]> concat_12_values1_0 = const()[name = string("concat_12_values1_0"), val = tensor<int32, [1]>([0])];
105
+ tensor<int32, [1]> concat_12_values3_0 = const()[name = string("concat_12_values3_0"), val = tensor<int32, [1]>([0])];
106
+ int32 concat_12_axis_0 = const()[name = string("concat_12_axis_0"), val = int32(0)];
107
+ bool concat_12_interleave_0 = const()[name = string("concat_12_interleave_0"), val = bool(false)];
108
+ tensor<int32, [4]> concat_12 = concat(axis = concat_12_axis_0, interleave = concat_12_interleave_0, values = (concat_12_values0_0, concat_12_values1_0, expand_dims_19, concat_12_values3_0))[name = string("concat_12")];
109
+ tensor<int32, [4]> k_cache2_internal_tensor_assign_2_stride_0 = const()[name = string("k_cache2_internal_tensor_assign_2_stride_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
110
+ tensor<bool, [4]> k_cache2_internal_tensor_assign_2_begin_mask_0 = const()[name = string("k_cache2_internal_tensor_assign_2_begin_mask_0"), val = tensor<bool, [4]>([false, false, false, false])];
111
+ tensor<bool, [4]> k_cache2_internal_tensor_assign_2_end_mask_0 = const()[name = string("k_cache2_internal_tensor_assign_2_end_mask_0"), val = tensor<bool, [4]>([false, true, false, true])];
112
+ tensor<bool, [4]> k_cache2_internal_tensor_assign_2_squeeze_mask_0 = const()[name = string("k_cache2_internal_tensor_assign_2_squeeze_mask_0"), val = tensor<bool, [4]>([true, false, false, false])];
113
+ tensor<fp16, [6, 1, 1500, 512]> k_cache2_internal_tensor_assign_2_cast_fp16 = slice_update(begin = concat_11, begin_mask = k_cache2_internal_tensor_assign_2_begin_mask_0, end = concat_12, end_mask = k_cache2_internal_tensor_assign_2_end_mask_0, squeeze_mask = k_cache2_internal_tensor_assign_2_squeeze_mask_0, stride = k_cache2_internal_tensor_assign_2_stride_0, update = linear_2_cast_fp16, x = coreml_update_state_16)[name = string("k_cache2_internal_tensor_assign_2_cast_fp16")];
114
+ write_state(data = k_cache2_internal_tensor_assign_2_cast_fp16, input = k_cache2)[name = string("coreml_update_state_18_write_state")];
115
+ tensor<fp16, [6, 1, 1500, 512]> coreml_update_state_18 = read_state(input = k_cache2)[name = string("coreml_update_state_18")];
116
+ tensor<int32, [3]> var_125_shape_cast_fp16 = shape(x = linear_3_cast_fp16)[name = string("op_125_shape_cast_fp16")];
117
+ int32 gather_3_axis_0 = const()[name = string("gather_3_axis_0"), val = int32(0)];
118
+ int32 gather_3_batch_dims_0 = const()[name = string("gather_3_batch_dims_0"), val = int32(0)];
119
+ bool gather_3_validate_indices_0 = const()[name = string("gather_3_validate_indices_0"), val = bool(false)];
120
+ string var_125_shape_cast_fp16_to_uint16_dtype_0 = const()[name = string("op_125_shape_cast_fp16_to_uint16_dtype_0"), val = string("uint16")];
121
+ uint16 select_3_to_uint16 = const()[name = string("select_3_to_uint16"), val = uint16(1)];
122
+ tensor<uint16, [3]> var_125_shape_cast_fp16_to_uint16 = cast(dtype = var_125_shape_cast_fp16_to_uint16_dtype_0, x = var_125_shape_cast_fp16)[name = string("cast_37")];
123
+ uint16 gather_3_cast_uint16 = gather(axis = gather_3_axis_0, batch_dims = gather_3_batch_dims_0, indices = select_3_to_uint16, validate_indices = gather_3_validate_indices_0, x = var_125_shape_cast_fp16_to_uint16)[name = string("gather_3_cast_uint16")];
124
+ string gather_3_cast_uint16_to_int32_dtype_0 = const()[name = string("gather_3_cast_uint16_to_int32_dtype_0"), val = string("int32")];
125
+ tensor<int32, [1]> expand_dims_23_axes_0 = const()[name = string("expand_dims_23_axes_0"), val = tensor<int32, [1]>([0])];
126
+ int32 gather_3_cast_uint16_to_int32 = cast(dtype = gather_3_cast_uint16_to_int32_dtype_0, x = gather_3_cast_uint16)[name = string("cast_36")];
127
+ tensor<int32, [1]> expand_dims_23 = expand_dims(axes = expand_dims_23_axes_0, x = gather_3_cast_uint16_to_int32)[name = string("expand_dims_23")];
128
+ tensor<int32, [4]> concat_14 = const()[name = string("concat_14"), val = tensor<int32, [4]>([1, 0, 0, 0])];
129
+ tensor<int32, [1]> concat_15_values0_0 = const()[name = string("concat_15_values0_0"), val = tensor<int32, [1]>([0])];
130
+ tensor<int32, [1]> concat_15_values1_0 = const()[name = string("concat_15_values1_0"), val = tensor<int32, [1]>([0])];
131
+ tensor<int32, [1]> concat_15_values3_0 = const()[name = string("concat_15_values3_0"), val = tensor<int32, [1]>([0])];
132
+ int32 concat_15_axis_0 = const()[name = string("concat_15_axis_0"), val = int32(0)];
133
+ bool concat_15_interleave_0 = const()[name = string("concat_15_interleave_0"), val = bool(false)];
134
+ tensor<int32, [4]> concat_15 = concat(axis = concat_15_axis_0, interleave = concat_15_interleave_0, values = (concat_15_values0_0, concat_15_values1_0, expand_dims_23, concat_15_values3_0))[name = string("concat_15")];
135
+ tensor<int32, [4]> v_cache2_internal_tensor_assign_2_stride_0 = const()[name = string("v_cache2_internal_tensor_assign_2_stride_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
136
+ tensor<bool, [4]> v_cache2_internal_tensor_assign_2_begin_mask_0 = const()[name = string("v_cache2_internal_tensor_assign_2_begin_mask_0"), val = tensor<bool, [4]>([false, false, false, false])];
137
+ tensor<bool, [4]> v_cache2_internal_tensor_assign_2_end_mask_0 = const()[name = string("v_cache2_internal_tensor_assign_2_end_mask_0"), val = tensor<bool, [4]>([false, true, false, true])];
138
+ tensor<bool, [4]> v_cache2_internal_tensor_assign_2_squeeze_mask_0 = const()[name = string("v_cache2_internal_tensor_assign_2_squeeze_mask_0"), val = tensor<bool, [4]>([true, false, false, false])];
139
+ tensor<fp16, [6, 1, 1500, 512]> v_cache2_internal_tensor_assign_2_cast_fp16 = slice_update(begin = concat_14, begin_mask = v_cache2_internal_tensor_assign_2_begin_mask_0, end = concat_15, end_mask = v_cache2_internal_tensor_assign_2_end_mask_0, squeeze_mask = v_cache2_internal_tensor_assign_2_squeeze_mask_0, stride = v_cache2_internal_tensor_assign_2_stride_0, update = linear_3_cast_fp16, x = coreml_update_state_17)[name = string("v_cache2_internal_tensor_assign_2_cast_fp16")];
140
+ write_state(data = v_cache2_internal_tensor_assign_2_cast_fp16, input = v_cache2)[name = string("coreml_update_state_19_write_state")];
141
+ tensor<fp16, [6, 1, 1500, 512]> coreml_update_state_19 = read_state(input = v_cache2)[name = string("coreml_update_state_19")];
142
+ tensor<fp16, [512, 512]> var_147_to_fp16 = const()[name = string("op_147_to_fp16"), val = tensor<fp16, [512, 512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(4853312)))];
143
+ tensor<fp16, [1, ?, 512]> linear_4_cast_fp16 = linear(bias = linear_0_bias_0_to_fp16, weight = var_147_to_fp16, x = audio_data)[name = string("linear_4_cast_fp16")];
144
+ tensor<fp16, [512, 512]> var_151_to_fp16 = const()[name = string("op_151_to_fp16"), val = tensor<fp16, [512, 512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(5377664)))];
145
+ tensor<fp16, [512]> var_152_to_fp16 = const()[name = string("op_152_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(5902016)))];
146
+ tensor<fp16, [1, ?, 512]> linear_5_cast_fp16 = linear(bias = var_152_to_fp16, weight = var_151_to_fp16, x = audio_data)[name = string("linear_5_cast_fp16")];
147
+ tensor<int32, [3]> var_154_shape_cast_fp16 = shape(x = linear_4_cast_fp16)[name = string("op_154_shape_cast_fp16")];
148
+ int32 gather_4_axis_0 = const()[name = string("gather_4_axis_0"), val = int32(0)];
149
+ int32 gather_4_batch_dims_0 = const()[name = string("gather_4_batch_dims_0"), val = int32(0)];
150
+ bool gather_4_validate_indices_0 = const()[name = string("gather_4_validate_indices_0"), val = bool(false)];
151
+ string var_154_shape_cast_fp16_to_uint16_dtype_0 = const()[name = string("op_154_shape_cast_fp16_to_uint16_dtype_0"), val = string("uint16")];
152
+ uint16 select_4_to_uint16 = const()[name = string("select_4_to_uint16"), val = uint16(1)];
153
+ tensor<uint16, [3]> var_154_shape_cast_fp16_to_uint16 = cast(dtype = var_154_shape_cast_fp16_to_uint16_dtype_0, x = var_154_shape_cast_fp16)[name = string("cast_35")];
154
+ uint16 gather_4_cast_uint16 = gather(axis = gather_4_axis_0, batch_dims = gather_4_batch_dims_0, indices = select_4_to_uint16, validate_indices = gather_4_validate_indices_0, x = var_154_shape_cast_fp16_to_uint16)[name = string("gather_4_cast_uint16")];
155
+ string gather_4_cast_uint16_to_int32_dtype_0 = const()[name = string("gather_4_cast_uint16_to_int32_dtype_0"), val = string("int32")];
156
+ tensor<int32, [1]> expand_dims_27_axes_0 = const()[name = string("expand_dims_27_axes_0"), val = tensor<int32, [1]>([0])];
157
+ int32 gather_4_cast_uint16_to_int32 = cast(dtype = gather_4_cast_uint16_to_int32_dtype_0, x = gather_4_cast_uint16)[name = string("cast_34")];
158
+ tensor<int32, [1]> expand_dims_27 = expand_dims(axes = expand_dims_27_axes_0, x = gather_4_cast_uint16_to_int32)[name = string("expand_dims_27")];
159
+ tensor<int32, [4]> concat_17 = const()[name = string("concat_17"), val = tensor<int32, [4]>([2, 0, 0, 0])];
160
+ tensor<int32, [1]> concat_18_values0_0 = const()[name = string("concat_18_values0_0"), val = tensor<int32, [1]>([0])];
161
+ tensor<int32, [1]> concat_18_values1_0 = const()[name = string("concat_18_values1_0"), val = tensor<int32, [1]>([0])];
162
+ tensor<int32, [1]> concat_18_values3_0 = const()[name = string("concat_18_values3_0"), val = tensor<int32, [1]>([0])];
163
+ int32 concat_18_axis_0 = const()[name = string("concat_18_axis_0"), val = int32(0)];
164
+ bool concat_18_interleave_0 = const()[name = string("concat_18_interleave_0"), val = bool(false)];
165
+ tensor<int32, [4]> concat_18 = concat(axis = concat_18_axis_0, interleave = concat_18_interleave_0, values = (concat_18_values0_0, concat_18_values1_0, expand_dims_27, concat_18_values3_0))[name = string("concat_18")];
166
+ tensor<int32, [4]> k_cache2_internal_tensor_assign_3_stride_0 = const()[name = string("k_cache2_internal_tensor_assign_3_stride_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
167
+ tensor<bool, [4]> k_cache2_internal_tensor_assign_3_begin_mask_0 = const()[name = string("k_cache2_internal_tensor_assign_3_begin_mask_0"), val = tensor<bool, [4]>([false, false, false, false])];
168
+ tensor<bool, [4]> k_cache2_internal_tensor_assign_3_end_mask_0 = const()[name = string("k_cache2_internal_tensor_assign_3_end_mask_0"), val = tensor<bool, [4]>([false, true, false, true])];
169
+ tensor<bool, [4]> k_cache2_internal_tensor_assign_3_squeeze_mask_0 = const()[name = string("k_cache2_internal_tensor_assign_3_squeeze_mask_0"), val = tensor<bool, [4]>([true, false, false, false])];
170
+ tensor<fp16, [6, 1, 1500, 512]> k_cache2_internal_tensor_assign_3_cast_fp16 = slice_update(begin = concat_17, begin_mask = k_cache2_internal_tensor_assign_3_begin_mask_0, end = concat_18, end_mask = k_cache2_internal_tensor_assign_3_end_mask_0, squeeze_mask = k_cache2_internal_tensor_assign_3_squeeze_mask_0, stride = k_cache2_internal_tensor_assign_3_stride_0, update = linear_4_cast_fp16, x = coreml_update_state_18)[name = string("k_cache2_internal_tensor_assign_3_cast_fp16")];
171
+ write_state(data = k_cache2_internal_tensor_assign_3_cast_fp16, input = k_cache2)[name = string("coreml_update_state_20_write_state")];
172
+ tensor<fp16, [6, 1, 1500, 512]> coreml_update_state_20 = read_state(input = k_cache2)[name = string("coreml_update_state_20")];
173
+ tensor<int32, [3]> var_159_shape_cast_fp16 = shape(x = linear_5_cast_fp16)[name = string("op_159_shape_cast_fp16")];
174
+ int32 gather_5_axis_0 = const()[name = string("gather_5_axis_0"), val = int32(0)];
175
+ int32 gather_5_batch_dims_0 = const()[name = string("gather_5_batch_dims_0"), val = int32(0)];
176
+ bool gather_5_validate_indices_0 = const()[name = string("gather_5_validate_indices_0"), val = bool(false)];
177
+ string var_159_shape_cast_fp16_to_uint16_dtype_0 = const()[name = string("op_159_shape_cast_fp16_to_uint16_dtype_0"), val = string("uint16")];
178
+ uint16 select_5_to_uint16 = const()[name = string("select_5_to_uint16"), val = uint16(1)];
179
+ tensor<uint16, [3]> var_159_shape_cast_fp16_to_uint16 = cast(dtype = var_159_shape_cast_fp16_to_uint16_dtype_0, x = var_159_shape_cast_fp16)[name = string("cast_33")];
180
+ uint16 gather_5_cast_uint16 = gather(axis = gather_5_axis_0, batch_dims = gather_5_batch_dims_0, indices = select_5_to_uint16, validate_indices = gather_5_validate_indices_0, x = var_159_shape_cast_fp16_to_uint16)[name = string("gather_5_cast_uint16")];
181
+ string gather_5_cast_uint16_to_int32_dtype_0 = const()[name = string("gather_5_cast_uint16_to_int32_dtype_0"), val = string("int32")];
182
+ tensor<int32, [1]> expand_dims_31_axes_0 = const()[name = string("expand_dims_31_axes_0"), val = tensor<int32, [1]>([0])];
183
+ int32 gather_5_cast_uint16_to_int32 = cast(dtype = gather_5_cast_uint16_to_int32_dtype_0, x = gather_5_cast_uint16)[name = string("cast_32")];
184
+ tensor<int32, [1]> expand_dims_31 = expand_dims(axes = expand_dims_31_axes_0, x = gather_5_cast_uint16_to_int32)[name = string("expand_dims_31")];
185
+ tensor<int32, [4]> concat_20 = const()[name = string("concat_20"), val = tensor<int32, [4]>([2, 0, 0, 0])];
186
+ tensor<int32, [1]> concat_21_values0_0 = const()[name = string("concat_21_values0_0"), val = tensor<int32, [1]>([0])];
187
+ tensor<int32, [1]> concat_21_values1_0 = const()[name = string("concat_21_values1_0"), val = tensor<int32, [1]>([0])];
188
+ tensor<int32, [1]> concat_21_values3_0 = const()[name = string("concat_21_values3_0"), val = tensor<int32, [1]>([0])];
189
+ int32 concat_21_axis_0 = const()[name = string("concat_21_axis_0"), val = int32(0)];
190
+ bool concat_21_interleave_0 = const()[name = string("concat_21_interleave_0"), val = bool(false)];
191
+ tensor<int32, [4]> concat_21 = concat(axis = concat_21_axis_0, interleave = concat_21_interleave_0, values = (concat_21_values0_0, concat_21_values1_0, expand_dims_31, concat_21_values3_0))[name = string("concat_21")];
192
+ tensor<int32, [4]> v_cache2_internal_tensor_assign_3_stride_0 = const()[name = string("v_cache2_internal_tensor_assign_3_stride_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
193
+ tensor<bool, [4]> v_cache2_internal_tensor_assign_3_begin_mask_0 = const()[name = string("v_cache2_internal_tensor_assign_3_begin_mask_0"), val = tensor<bool, [4]>([false, false, false, false])];
194
+ tensor<bool, [4]> v_cache2_internal_tensor_assign_3_end_mask_0 = const()[name = string("v_cache2_internal_tensor_assign_3_end_mask_0"), val = tensor<bool, [4]>([false, true, false, true])];
195
+ tensor<bool, [4]> v_cache2_internal_tensor_assign_3_squeeze_mask_0 = const()[name = string("v_cache2_internal_tensor_assign_3_squeeze_mask_0"), val = tensor<bool, [4]>([true, false, false, false])];
196
+ tensor<fp16, [6, 1, 1500, 512]> v_cache2_internal_tensor_assign_3_cast_fp16 = slice_update(begin = concat_20, begin_mask = v_cache2_internal_tensor_assign_3_begin_mask_0, end = concat_21, end_mask = v_cache2_internal_tensor_assign_3_end_mask_0, squeeze_mask = v_cache2_internal_tensor_assign_3_squeeze_mask_0, stride = v_cache2_internal_tensor_assign_3_stride_0, update = linear_5_cast_fp16, x = coreml_update_state_19)[name = string("v_cache2_internal_tensor_assign_3_cast_fp16")];
197
+ write_state(data = v_cache2_internal_tensor_assign_3_cast_fp16, input = v_cache2)[name = string("coreml_update_state_21_write_state")];
198
+ tensor<fp16, [6, 1, 1500, 512]> coreml_update_state_21 = read_state(input = v_cache2)[name = string("coreml_update_state_21")];
199
+ tensor<fp16, [512, 512]> var_181_to_fp16 = const()[name = string("op_181_to_fp16"), val = tensor<fp16, [512, 512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(5903104)))];
200
+ tensor<fp16, [1, ?, 512]> linear_6_cast_fp16 = linear(bias = linear_0_bias_0_to_fp16, weight = var_181_to_fp16, x = audio_data)[name = string("linear_6_cast_fp16")];
201
+ tensor<fp16, [512, 512]> var_185_to_fp16 = const()[name = string("op_185_to_fp16"), val = tensor<fp16, [512, 512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(6427456)))];
202
+ tensor<fp16, [512]> var_186_to_fp16 = const()[name = string("op_186_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(6951808)))];
203
+ tensor<fp16, [1, ?, 512]> linear_7_cast_fp16 = linear(bias = var_186_to_fp16, weight = var_185_to_fp16, x = audio_data)[name = string("linear_7_cast_fp16")];
204
+ tensor<int32, [3]> var_188_shape_cast_fp16 = shape(x = linear_6_cast_fp16)[name = string("op_188_shape_cast_fp16")];
205
+ int32 gather_6_axis_0 = const()[name = string("gather_6_axis_0"), val = int32(0)];
206
+ int32 gather_6_batch_dims_0 = const()[name = string("gather_6_batch_dims_0"), val = int32(0)];
207
+ bool gather_6_validate_indices_0 = const()[name = string("gather_6_validate_indices_0"), val = bool(false)];
208
+ string var_188_shape_cast_fp16_to_uint16_dtype_0 = const()[name = string("op_188_shape_cast_fp16_to_uint16_dtype_0"), val = string("uint16")];
209
+ uint16 select_6_to_uint16 = const()[name = string("select_6_to_uint16"), val = uint16(1)];
210
+ tensor<uint16, [3]> var_188_shape_cast_fp16_to_uint16 = cast(dtype = var_188_shape_cast_fp16_to_uint16_dtype_0, x = var_188_shape_cast_fp16)[name = string("cast_31")];
211
+ uint16 gather_6_cast_uint16 = gather(axis = gather_6_axis_0, batch_dims = gather_6_batch_dims_0, indices = select_6_to_uint16, validate_indices = gather_6_validate_indices_0, x = var_188_shape_cast_fp16_to_uint16)[name = string("gather_6_cast_uint16")];
212
+ string gather_6_cast_uint16_to_int32_dtype_0 = const()[name = string("gather_6_cast_uint16_to_int32_dtype_0"), val = string("int32")];
213
+ tensor<int32, [1]> expand_dims_35_axes_0 = const()[name = string("expand_dims_35_axes_0"), val = tensor<int32, [1]>([0])];
214
+ int32 gather_6_cast_uint16_to_int32 = cast(dtype = gather_6_cast_uint16_to_int32_dtype_0, x = gather_6_cast_uint16)[name = string("cast_30")];
215
+ tensor<int32, [1]> expand_dims_35 = expand_dims(axes = expand_dims_35_axes_0, x = gather_6_cast_uint16_to_int32)[name = string("expand_dims_35")];
216
+ tensor<int32, [4]> concat_23 = const()[name = string("concat_23"), val = tensor<int32, [4]>([3, 0, 0, 0])];
217
+ tensor<int32, [1]> concat_24_values0_0 = const()[name = string("concat_24_values0_0"), val = tensor<int32, [1]>([0])];
218
+ tensor<int32, [1]> concat_24_values1_0 = const()[name = string("concat_24_values1_0"), val = tensor<int32, [1]>([0])];
219
+ tensor<int32, [1]> concat_24_values3_0 = const()[name = string("concat_24_values3_0"), val = tensor<int32, [1]>([0])];
220
+ int32 concat_24_axis_0 = const()[name = string("concat_24_axis_0"), val = int32(0)];
221
+ bool concat_24_interleave_0 = const()[name = string("concat_24_interleave_0"), val = bool(false)];
222
+ tensor<int32, [4]> concat_24 = concat(axis = concat_24_axis_0, interleave = concat_24_interleave_0, values = (concat_24_values0_0, concat_24_values1_0, expand_dims_35, concat_24_values3_0))[name = string("concat_24")];
223
+ tensor<int32, [4]> k_cache2_internal_tensor_assign_4_stride_0 = const()[name = string("k_cache2_internal_tensor_assign_4_stride_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
224
+ tensor<bool, [4]> k_cache2_internal_tensor_assign_4_begin_mask_0 = const()[name = string("k_cache2_internal_tensor_assign_4_begin_mask_0"), val = tensor<bool, [4]>([false, false, false, false])];
225
+ tensor<bool, [4]> k_cache2_internal_tensor_assign_4_end_mask_0 = const()[name = string("k_cache2_internal_tensor_assign_4_end_mask_0"), val = tensor<bool, [4]>([false, true, false, true])];
226
+ tensor<bool, [4]> k_cache2_internal_tensor_assign_4_squeeze_mask_0 = const()[name = string("k_cache2_internal_tensor_assign_4_squeeze_mask_0"), val = tensor<bool, [4]>([true, false, false, false])];
227
+ tensor<fp16, [6, 1, 1500, 512]> k_cache2_internal_tensor_assign_4_cast_fp16 = slice_update(begin = concat_23, begin_mask = k_cache2_internal_tensor_assign_4_begin_mask_0, end = concat_24, end_mask = k_cache2_internal_tensor_assign_4_end_mask_0, squeeze_mask = k_cache2_internal_tensor_assign_4_squeeze_mask_0, stride = k_cache2_internal_tensor_assign_4_stride_0, update = linear_6_cast_fp16, x = coreml_update_state_20)[name = string("k_cache2_internal_tensor_assign_4_cast_fp16")];
228
+ write_state(data = k_cache2_internal_tensor_assign_4_cast_fp16, input = k_cache2)[name = string("coreml_update_state_22_write_state")];
229
+ tensor<fp16, [6, 1, 1500, 512]> coreml_update_state_22 = read_state(input = k_cache2)[name = string("coreml_update_state_22")];
230
+ tensor<int32, [3]> var_193_shape_cast_fp16 = shape(x = linear_7_cast_fp16)[name = string("op_193_shape_cast_fp16")];
231
+ int32 gather_7_axis_0 = const()[name = string("gather_7_axis_0"), val = int32(0)];
232
+ int32 gather_7_batch_dims_0 = const()[name = string("gather_7_batch_dims_0"), val = int32(0)];
233
+ bool gather_7_validate_indices_0 = const()[name = string("gather_7_validate_indices_0"), val = bool(false)];
234
+ string var_193_shape_cast_fp16_to_uint16_dtype_0 = const()[name = string("op_193_shape_cast_fp16_to_uint16_dtype_0"), val = string("uint16")];
235
+ uint16 select_7_to_uint16 = const()[name = string("select_7_to_uint16"), val = uint16(1)];
236
+ tensor<uint16, [3]> var_193_shape_cast_fp16_to_uint16 = cast(dtype = var_193_shape_cast_fp16_to_uint16_dtype_0, x = var_193_shape_cast_fp16)[name = string("cast_29")];
237
+ uint16 gather_7_cast_uint16 = gather(axis = gather_7_axis_0, batch_dims = gather_7_batch_dims_0, indices = select_7_to_uint16, validate_indices = gather_7_validate_indices_0, x = var_193_shape_cast_fp16_to_uint16)[name = string("gather_7_cast_uint16")];
238
+ string gather_7_cast_uint16_to_int32_dtype_0 = const()[name = string("gather_7_cast_uint16_to_int32_dtype_0"), val = string("int32")];
239
+ tensor<int32, [1]> expand_dims_39_axes_0 = const()[name = string("expand_dims_39_axes_0"), val = tensor<int32, [1]>([0])];
240
+ int32 gather_7_cast_uint16_to_int32 = cast(dtype = gather_7_cast_uint16_to_int32_dtype_0, x = gather_7_cast_uint16)[name = string("cast_28")];
241
+ tensor<int32, [1]> expand_dims_39 = expand_dims(axes = expand_dims_39_axes_0, x = gather_7_cast_uint16_to_int32)[name = string("expand_dims_39")];
242
+ tensor<int32, [4]> concat_26 = const()[name = string("concat_26"), val = tensor<int32, [4]>([3, 0, 0, 0])];
243
+ tensor<int32, [1]> concat_27_values0_0 = const()[name = string("concat_27_values0_0"), val = tensor<int32, [1]>([0])];
244
+ tensor<int32, [1]> concat_27_values1_0 = const()[name = string("concat_27_values1_0"), val = tensor<int32, [1]>([0])];
245
+ tensor<int32, [1]> concat_27_values3_0 = const()[name = string("concat_27_values3_0"), val = tensor<int32, [1]>([0])];
246
+ int32 concat_27_axis_0 = const()[name = string("concat_27_axis_0"), val = int32(0)];
247
+ bool concat_27_interleave_0 = const()[name = string("concat_27_interleave_0"), val = bool(false)];
248
+ tensor<int32, [4]> concat_27 = concat(axis = concat_27_axis_0, interleave = concat_27_interleave_0, values = (concat_27_values0_0, concat_27_values1_0, expand_dims_39, concat_27_values3_0))[name = string("concat_27")];
249
+ tensor<int32, [4]> v_cache2_internal_tensor_assign_4_stride_0 = const()[name = string("v_cache2_internal_tensor_assign_4_stride_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
250
+ tensor<bool, [4]> v_cache2_internal_tensor_assign_4_begin_mask_0 = const()[name = string("v_cache2_internal_tensor_assign_4_begin_mask_0"), val = tensor<bool, [4]>([false, false, false, false])];
251
+ tensor<bool, [4]> v_cache2_internal_tensor_assign_4_end_mask_0 = const()[name = string("v_cache2_internal_tensor_assign_4_end_mask_0"), val = tensor<bool, [4]>([false, true, false, true])];
252
+ tensor<bool, [4]> v_cache2_internal_tensor_assign_4_squeeze_mask_0 = const()[name = string("v_cache2_internal_tensor_assign_4_squeeze_mask_0"), val = tensor<bool, [4]>([true, false, false, false])];
253
+ tensor<fp16, [6, 1, 1500, 512]> v_cache2_internal_tensor_assign_4_cast_fp16 = slice_update(begin = concat_26, begin_mask = v_cache2_internal_tensor_assign_4_begin_mask_0, end = concat_27, end_mask = v_cache2_internal_tensor_assign_4_end_mask_0, squeeze_mask = v_cache2_internal_tensor_assign_4_squeeze_mask_0, stride = v_cache2_internal_tensor_assign_4_stride_0, update = linear_7_cast_fp16, x = coreml_update_state_21)[name = string("v_cache2_internal_tensor_assign_4_cast_fp16")];
254
+ write_state(data = v_cache2_internal_tensor_assign_4_cast_fp16, input = v_cache2)[name = string("coreml_update_state_23_write_state")];
255
+ tensor<fp16, [6, 1, 1500, 512]> coreml_update_state_23 = read_state(input = v_cache2)[name = string("coreml_update_state_23")];
256
+ tensor<fp16, [512, 512]> var_215_to_fp16 = const()[name = string("op_215_to_fp16"), val = tensor<fp16, [512, 512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(6952896)))];
257
+ tensor<fp16, [1, ?, 512]> linear_8_cast_fp16 = linear(bias = linear_0_bias_0_to_fp16, weight = var_215_to_fp16, x = audio_data)[name = string("linear_8_cast_fp16")];
258
+ tensor<fp16, [512, 512]> var_219_to_fp16 = const()[name = string("op_219_to_fp16"), val = tensor<fp16, [512, 512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(7477248)))];
259
+ tensor<fp16, [512]> var_220_to_fp16 = const()[name = string("op_220_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(8001600)))];
260
+ tensor<fp16, [1, ?, 512]> linear_9_cast_fp16 = linear(bias = var_220_to_fp16, weight = var_219_to_fp16, x = audio_data)[name = string("linear_9_cast_fp16")];
261
+ tensor<int32, [3]> var_222_shape_cast_fp16 = shape(x = linear_8_cast_fp16)[name = string("op_222_shape_cast_fp16")];
262
+ int32 gather_8_axis_0 = const()[name = string("gather_8_axis_0"), val = int32(0)];
263
+ int32 gather_8_batch_dims_0 = const()[name = string("gather_8_batch_dims_0"), val = int32(0)];
264
+ bool gather_8_validate_indices_0 = const()[name = string("gather_8_validate_indices_0"), val = bool(false)];
265
+ string var_222_shape_cast_fp16_to_uint16_dtype_0 = const()[name = string("op_222_shape_cast_fp16_to_uint16_dtype_0"), val = string("uint16")];
266
+ uint16 select_8_to_uint16 = const()[name = string("select_8_to_uint16"), val = uint16(1)];
267
+ tensor<uint16, [3]> var_222_shape_cast_fp16_to_uint16 = cast(dtype = var_222_shape_cast_fp16_to_uint16_dtype_0, x = var_222_shape_cast_fp16)[name = string("cast_27")];
268
+ uint16 gather_8_cast_uint16 = gather(axis = gather_8_axis_0, batch_dims = gather_8_batch_dims_0, indices = select_8_to_uint16, validate_indices = gather_8_validate_indices_0, x = var_222_shape_cast_fp16_to_uint16)[name = string("gather_8_cast_uint16")];
269
+ string gather_8_cast_uint16_to_int32_dtype_0 = const()[name = string("gather_8_cast_uint16_to_int32_dtype_0"), val = string("int32")];
270
+ tensor<int32, [1]> expand_dims_43_axes_0 = const()[name = string("expand_dims_43_axes_0"), val = tensor<int32, [1]>([0])];
271
+ int32 gather_8_cast_uint16_to_int32 = cast(dtype = gather_8_cast_uint16_to_int32_dtype_0, x = gather_8_cast_uint16)[name = string("cast_26")];
272
+ tensor<int32, [1]> expand_dims_43 = expand_dims(axes = expand_dims_43_axes_0, x = gather_8_cast_uint16_to_int32)[name = string("expand_dims_43")];
273
+ tensor<int32, [4]> concat_29 = const()[name = string("concat_29"), val = tensor<int32, [4]>([4, 0, 0, 0])];
274
+ tensor<int32, [1]> concat_30_values0_0 = const()[name = string("concat_30_values0_0"), val = tensor<int32, [1]>([0])];
275
+ tensor<int32, [1]> concat_30_values1_0 = const()[name = string("concat_30_values1_0"), val = tensor<int32, [1]>([0])];
276
+ tensor<int32, [1]> concat_30_values3_0 = const()[name = string("concat_30_values3_0"), val = tensor<int32, [1]>([0])];
277
+ int32 concat_30_axis_0 = const()[name = string("concat_30_axis_0"), val = int32(0)];
278
+ bool concat_30_interleave_0 = const()[name = string("concat_30_interleave_0"), val = bool(false)];
279
+ tensor<int32, [4]> concat_30 = concat(axis = concat_30_axis_0, interleave = concat_30_interleave_0, values = (concat_30_values0_0, concat_30_values1_0, expand_dims_43, concat_30_values3_0))[name = string("concat_30")];
280
+ tensor<int32, [4]> k_cache2_internal_tensor_assign_5_stride_0 = const()[name = string("k_cache2_internal_tensor_assign_5_stride_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
281
+ tensor<bool, [4]> k_cache2_internal_tensor_assign_5_begin_mask_0 = const()[name = string("k_cache2_internal_tensor_assign_5_begin_mask_0"), val = tensor<bool, [4]>([false, false, false, false])];
282
+ tensor<bool, [4]> k_cache2_internal_tensor_assign_5_end_mask_0 = const()[name = string("k_cache2_internal_tensor_assign_5_end_mask_0"), val = tensor<bool, [4]>([false, true, false, true])];
283
+ tensor<bool, [4]> k_cache2_internal_tensor_assign_5_squeeze_mask_0 = const()[name = string("k_cache2_internal_tensor_assign_5_squeeze_mask_0"), val = tensor<bool, [4]>([true, false, false, false])];
284
+ tensor<fp16, [6, 1, 1500, 512]> k_cache2_internal_tensor_assign_5_cast_fp16 = slice_update(begin = concat_29, begin_mask = k_cache2_internal_tensor_assign_5_begin_mask_0, end = concat_30, end_mask = k_cache2_internal_tensor_assign_5_end_mask_0, squeeze_mask = k_cache2_internal_tensor_assign_5_squeeze_mask_0, stride = k_cache2_internal_tensor_assign_5_stride_0, update = linear_8_cast_fp16, x = coreml_update_state_22)[name = string("k_cache2_internal_tensor_assign_5_cast_fp16")];
285
+ write_state(data = k_cache2_internal_tensor_assign_5_cast_fp16, input = k_cache2)[name = string("coreml_update_state_24_write_state")];
286
+ tensor<fp16, [6, 1, 1500, 512]> coreml_update_state_24 = read_state(input = k_cache2)[name = string("coreml_update_state_24")];
287
+ tensor<int32, [3]> var_227_shape_cast_fp16 = shape(x = linear_9_cast_fp16)[name = string("op_227_shape_cast_fp16")];
288
+ int32 gather_9_axis_0 = const()[name = string("gather_9_axis_0"), val = int32(0)];
289
+ int32 gather_9_batch_dims_0 = const()[name = string("gather_9_batch_dims_0"), val = int32(0)];
290
+ bool gather_9_validate_indices_0 = const()[name = string("gather_9_validate_indices_0"), val = bool(false)];
291
+ string var_227_shape_cast_fp16_to_uint16_dtype_0 = const()[name = string("op_227_shape_cast_fp16_to_uint16_dtype_0"), val = string("uint16")];
292
+ uint16 select_9_to_uint16 = const()[name = string("select_9_to_uint16"), val = uint16(1)];
293
+ tensor<uint16, [3]> var_227_shape_cast_fp16_to_uint16 = cast(dtype = var_227_shape_cast_fp16_to_uint16_dtype_0, x = var_227_shape_cast_fp16)[name = string("cast_25")];
294
+ uint16 gather_9_cast_uint16 = gather(axis = gather_9_axis_0, batch_dims = gather_9_batch_dims_0, indices = select_9_to_uint16, validate_indices = gather_9_validate_indices_0, x = var_227_shape_cast_fp16_to_uint16)[name = string("gather_9_cast_uint16")];
295
+ string gather_9_cast_uint16_to_int32_dtype_0 = const()[name = string("gather_9_cast_uint16_to_int32_dtype_0"), val = string("int32")];
296
+ tensor<int32, [1]> expand_dims_47_axes_0 = const()[name = string("expand_dims_47_axes_0"), val = tensor<int32, [1]>([0])];
297
+ int32 gather_9_cast_uint16_to_int32 = cast(dtype = gather_9_cast_uint16_to_int32_dtype_0, x = gather_9_cast_uint16)[name = string("cast_24")];
298
+ tensor<int32, [1]> expand_dims_47 = expand_dims(axes = expand_dims_47_axes_0, x = gather_9_cast_uint16_to_int32)[name = string("expand_dims_47")];
299
+ tensor<int32, [4]> concat_32 = const()[name = string("concat_32"), val = tensor<int32, [4]>([4, 0, 0, 0])];
300
+ tensor<int32, [1]> concat_33_values0_0 = const()[name = string("concat_33_values0_0"), val = tensor<int32, [1]>([0])];
301
+ tensor<int32, [1]> concat_33_values1_0 = const()[name = string("concat_33_values1_0"), val = tensor<int32, [1]>([0])];
302
+ tensor<int32, [1]> concat_33_values3_0 = const()[name = string("concat_33_values3_0"), val = tensor<int32, [1]>([0])];
303
+ int32 concat_33_axis_0 = const()[name = string("concat_33_axis_0"), val = int32(0)];
304
+ bool concat_33_interleave_0 = const()[name = string("concat_33_interleave_0"), val = bool(false)];
305
+ tensor<int32, [4]> concat_33 = concat(axis = concat_33_axis_0, interleave = concat_33_interleave_0, values = (concat_33_values0_0, concat_33_values1_0, expand_dims_47, concat_33_values3_0))[name = string("concat_33")];
306
+ tensor<int32, [4]> v_cache2_internal_tensor_assign_5_stride_0 = const()[name = string("v_cache2_internal_tensor_assign_5_stride_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
307
+ tensor<bool, [4]> v_cache2_internal_tensor_assign_5_begin_mask_0 = const()[name = string("v_cache2_internal_tensor_assign_5_begin_mask_0"), val = tensor<bool, [4]>([false, false, false, false])];
308
+ tensor<bool, [4]> v_cache2_internal_tensor_assign_5_end_mask_0 = const()[name = string("v_cache2_internal_tensor_assign_5_end_mask_0"), val = tensor<bool, [4]>([false, true, false, true])];
309
+ tensor<bool, [4]> v_cache2_internal_tensor_assign_5_squeeze_mask_0 = const()[name = string("v_cache2_internal_tensor_assign_5_squeeze_mask_0"), val = tensor<bool, [4]>([true, false, false, false])];
310
+ tensor<fp16, [6, 1, 1500, 512]> v_cache2_internal_tensor_assign_5_cast_fp16 = slice_update(begin = concat_32, begin_mask = v_cache2_internal_tensor_assign_5_begin_mask_0, end = concat_33, end_mask = v_cache2_internal_tensor_assign_5_end_mask_0, squeeze_mask = v_cache2_internal_tensor_assign_5_squeeze_mask_0, stride = v_cache2_internal_tensor_assign_5_stride_0, update = linear_9_cast_fp16, x = coreml_update_state_23)[name = string("v_cache2_internal_tensor_assign_5_cast_fp16")];
311
+ write_state(data = v_cache2_internal_tensor_assign_5_cast_fp16, input = v_cache2)[name = string("coreml_update_state_25_write_state")];
312
+ tensor<fp16, [6, 1, 1500, 512]> coreml_update_state_25 = read_state(input = v_cache2)[name = string("coreml_update_state_25")];
313
+ tensor<fp16, [512, 512]> var_249_to_fp16 = const()[name = string("op_249_to_fp16"), val = tensor<fp16, [512, 512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(8002688)))];
314
+ tensor<fp16, [1, ?, 512]> linear_10_cast_fp16 = linear(bias = linear_0_bias_0_to_fp16, weight = var_249_to_fp16, x = audio_data)[name = string("linear_10_cast_fp16")];
315
+ tensor<fp16, [512, 512]> var_253_to_fp16 = const()[name = string("op_253_to_fp16"), val = tensor<fp16, [512, 512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(8527040)))];
316
+ tensor<fp16, [512]> var_254_to_fp16 = const()[name = string("op_254_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(9051392)))];
317
+ tensor<fp16, [1, ?, 512]> linear_11_cast_fp16 = linear(bias = var_254_to_fp16, weight = var_253_to_fp16, x = audio_data)[name = string("linear_11_cast_fp16")];
318
+ tensor<int32, [3]> var_256_shape_cast_fp16 = shape(x = linear_10_cast_fp16)[name = string("op_256_shape_cast_fp16")];
319
+ int32 gather_10_axis_0 = const()[name = string("gather_10_axis_0"), val = int32(0)];
320
+ int32 gather_10_batch_dims_0 = const()[name = string("gather_10_batch_dims_0"), val = int32(0)];
321
+ bool gather_10_validate_indices_0 = const()[name = string("gather_10_validate_indices_0"), val = bool(false)];
322
+ string var_256_shape_cast_fp16_to_uint16_dtype_0 = const()[name = string("op_256_shape_cast_fp16_to_uint16_dtype_0"), val = string("uint16")];
323
+ uint16 select_10_to_uint16 = const()[name = string("select_10_to_uint16"), val = uint16(1)];
324
+ tensor<uint16, [3]> var_256_shape_cast_fp16_to_uint16 = cast(dtype = var_256_shape_cast_fp16_to_uint16_dtype_0, x = var_256_shape_cast_fp16)[name = string("cast_23")];
325
+ uint16 gather_10_cast_uint16 = gather(axis = gather_10_axis_0, batch_dims = gather_10_batch_dims_0, indices = select_10_to_uint16, validate_indices = gather_10_validate_indices_0, x = var_256_shape_cast_fp16_to_uint16)[name = string("gather_10_cast_uint16")];
326
+ string gather_10_cast_uint16_to_int32_dtype_0 = const()[name = string("gather_10_cast_uint16_to_int32_dtype_0"), val = string("int32")];
327
+ tensor<int32, [1]> expand_dims_51_axes_0 = const()[name = string("expand_dims_51_axes_0"), val = tensor<int32, [1]>([0])];
328
+ int32 gather_10_cast_uint16_to_int32 = cast(dtype = gather_10_cast_uint16_to_int32_dtype_0, x = gather_10_cast_uint16)[name = string("cast_22")];
329
+ tensor<int32, [1]> expand_dims_51 = expand_dims(axes = expand_dims_51_axes_0, x = gather_10_cast_uint16_to_int32)[name = string("expand_dims_51")];
330
+ tensor<int32, [4]> concat_35 = const()[name = string("concat_35"), val = tensor<int32, [4]>([5, 0, 0, 0])];
331
+ tensor<int32, [1]> concat_36_values0_0 = const()[name = string("concat_36_values0_0"), val = tensor<int32, [1]>([0])];
332
+ tensor<int32, [1]> concat_36_values1_0 = const()[name = string("concat_36_values1_0"), val = tensor<int32, [1]>([0])];
333
+ tensor<int32, [1]> concat_36_values3_0 = const()[name = string("concat_36_values3_0"), val = tensor<int32, [1]>([0])];
334
+ int32 concat_36_axis_0 = const()[name = string("concat_36_axis_0"), val = int32(0)];
335
+ bool concat_36_interleave_0 = const()[name = string("concat_36_interleave_0"), val = bool(false)];
336
+ tensor<int32, [4]> concat_36 = concat(axis = concat_36_axis_0, interleave = concat_36_interleave_0, values = (concat_36_values0_0, concat_36_values1_0, expand_dims_51, concat_36_values3_0))[name = string("concat_36")];
337
+ tensor<int32, [4]> k_cache2_internal_tensor_assign_6_stride_0 = const()[name = string("k_cache2_internal_tensor_assign_6_stride_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
338
+ tensor<bool, [4]> k_cache2_internal_tensor_assign_6_begin_mask_0 = const()[name = string("k_cache2_internal_tensor_assign_6_begin_mask_0"), val = tensor<bool, [4]>([false, false, false, false])];
339
+ tensor<bool, [4]> k_cache2_internal_tensor_assign_6_end_mask_0 = const()[name = string("k_cache2_internal_tensor_assign_6_end_mask_0"), val = tensor<bool, [4]>([false, true, false, true])];
340
+ tensor<bool, [4]> k_cache2_internal_tensor_assign_6_squeeze_mask_0 = const()[name = string("k_cache2_internal_tensor_assign_6_squeeze_mask_0"), val = tensor<bool, [4]>([true, false, false, false])];
341
+ tensor<fp16, [6, 1, 1500, 512]> k_cache2_internal_tensor_assign_6_cast_fp16 = slice_update(begin = concat_35, begin_mask = k_cache2_internal_tensor_assign_6_begin_mask_0, end = concat_36, end_mask = k_cache2_internal_tensor_assign_6_end_mask_0, squeeze_mask = k_cache2_internal_tensor_assign_6_squeeze_mask_0, stride = k_cache2_internal_tensor_assign_6_stride_0, update = linear_10_cast_fp16, x = coreml_update_state_24)[name = string("k_cache2_internal_tensor_assign_6_cast_fp16")];
342
+ write_state(data = k_cache2_internal_tensor_assign_6_cast_fp16, input = k_cache2)[name = string("coreml_update_state_26_write_state")];
343
+ tensor<int32, [3]> var_261_shape_cast_fp16 = shape(x = linear_11_cast_fp16)[name = string("op_261_shape_cast_fp16")];
344
+ int32 gather_11_axis_0 = const()[name = string("gather_11_axis_0"), val = int32(0)];
345
+ int32 gather_11_batch_dims_0 = const()[name = string("gather_11_batch_dims_0"), val = int32(0)];
346
+ bool gather_11_validate_indices_0 = const()[name = string("gather_11_validate_indices_0"), val = bool(false)];
347
+ string var_261_shape_cast_fp16_to_uint16_dtype_0 = const()[name = string("op_261_shape_cast_fp16_to_uint16_dtype_0"), val = string("uint16")];
348
+ uint16 select_11_to_uint16 = const()[name = string("select_11_to_uint16"), val = uint16(1)];
349
+ tensor<uint16, [3]> var_261_shape_cast_fp16_to_uint16 = cast(dtype = var_261_shape_cast_fp16_to_uint16_dtype_0, x = var_261_shape_cast_fp16)[name = string("cast_21")];
350
+ uint16 gather_11_cast_uint16 = gather(axis = gather_11_axis_0, batch_dims = gather_11_batch_dims_0, indices = select_11_to_uint16, validate_indices = gather_11_validate_indices_0, x = var_261_shape_cast_fp16_to_uint16)[name = string("gather_11_cast_uint16")];
351
+ string gather_11_cast_uint16_to_int32_dtype_0 = const()[name = string("gather_11_cast_uint16_to_int32_dtype_0"), val = string("int32")];
352
+ tensor<int32, [1]> expand_dims_55_axes_0 = const()[name = string("expand_dims_55_axes_0"), val = tensor<int32, [1]>([0])];
353
+ int32 gather_11_cast_uint16_to_int32 = cast(dtype = gather_11_cast_uint16_to_int32_dtype_0, x = gather_11_cast_uint16)[name = string("cast_20")];
354
+ tensor<int32, [1]> expand_dims_55 = expand_dims(axes = expand_dims_55_axes_0, x = gather_11_cast_uint16_to_int32)[name = string("expand_dims_55")];
355
+ tensor<int32, [4]> concat_38 = const()[name = string("concat_38"), val = tensor<int32, [4]>([5, 0, 0, 0])];
356
+ tensor<int32, [1]> concat_39_values0_0 = const()[name = string("concat_39_values0_0"), val = tensor<int32, [1]>([0])];
357
+ tensor<int32, [1]> concat_39_values1_0 = const()[name = string("concat_39_values1_0"), val = tensor<int32, [1]>([0])];
358
+ tensor<int32, [1]> concat_39_values3_0 = const()[name = string("concat_39_values3_0"), val = tensor<int32, [1]>([0])];
359
+ int32 concat_39_axis_0 = const()[name = string("concat_39_axis_0"), val = int32(0)];
360
+ bool concat_39_interleave_0 = const()[name = string("concat_39_interleave_0"), val = bool(false)];
361
+ tensor<int32, [4]> concat_39 = concat(axis = concat_39_axis_0, interleave = concat_39_interleave_0, values = (concat_39_values0_0, concat_39_values1_0, expand_dims_55, concat_39_values3_0))[name = string("concat_39")];
362
+ tensor<int32, [4]> v_cache2_internal_tensor_assign_6_stride_0 = const()[name = string("v_cache2_internal_tensor_assign_6_stride_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
363
+ tensor<bool, [4]> v_cache2_internal_tensor_assign_6_begin_mask_0 = const()[name = string("v_cache2_internal_tensor_assign_6_begin_mask_0"), val = tensor<bool, [4]>([false, false, false, false])];
364
+ tensor<bool, [4]> v_cache2_internal_tensor_assign_6_end_mask_0 = const()[name = string("v_cache2_internal_tensor_assign_6_end_mask_0"), val = tensor<bool, [4]>([false, true, false, true])];
365
+ tensor<bool, [4]> v_cache2_internal_tensor_assign_6_squeeze_mask_0 = const()[name = string("v_cache2_internal_tensor_assign_6_squeeze_mask_0"), val = tensor<bool, [4]>([true, false, false, false])];
366
+ tensor<fp16, [6, 1, 1500, 512]> v_cache2_internal_tensor_assign_6_cast_fp16 = slice_update(begin = concat_38, begin_mask = v_cache2_internal_tensor_assign_6_begin_mask_0, end = concat_39, end_mask = v_cache2_internal_tensor_assign_6_end_mask_0, squeeze_mask = v_cache2_internal_tensor_assign_6_squeeze_mask_0, stride = v_cache2_internal_tensor_assign_6_stride_0, update = linear_11_cast_fp16, x = coreml_update_state_25)[name = string("v_cache2_internal_tensor_assign_6_cast_fp16")];
367
+ write_state(data = v_cache2_internal_tensor_assign_6_cast_fp16, input = v_cache2)[name = string("coreml_update_state_27_write_state")];
368
+ } -> (dummy);
369
+ }
base/decoder_first.mlmodelc/weights/weight.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4fdbcff86cdfe9e0b8842ad4bc1af8ebbf22082b1d0342a8304023f63dd3663f
3
+ size 9052480
base/decoder_second.mlmodelc/analytics/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:68a3b3426587d83e56d3286cc0b733c9b8a5bff6b1ad6f9e1789a3cb55164455
3
+ size 243
base/decoder_second.mlmodelc/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a6c0272d581c200e0ab4f29c687e4a7b49152e241cea335fa6faa6a430a460b6
3
+ size 487
base/decoder_second.mlmodelc/metadata.json ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "metadataOutputVersion" : "3.0",
4
+ "storagePrecision" : "Float16",
5
+ "outputSchema" : [
6
+ {
7
+ "hasShapeFlexibility" : "0",
8
+ "isOptional" : "0",
9
+ "dataType" : "Float16",
10
+ "formattedType" : "MultiArray (Float16)",
11
+ "shortDescription" : "",
12
+ "shape" : "[]",
13
+ "name" : "logits",
14
+ "type" : "MultiArray"
15
+ }
16
+ ],
17
+ "modelParameters" : [
18
+
19
+ ],
20
+ "specificationVersion" : 9,
21
+ "mlProgramOperationTypeHistogram" : {
22
+ "Ios18.linear" : 49,
23
+ "Ios18.readState" : 14,
24
+ "Ios18.expandDims" : 7,
25
+ "Ios18.sub" : 1,
26
+ "Ios18.matmul" : 24,
27
+ "Ios18.gelu" : 6,
28
+ "Ios18.gather" : 9,
29
+ "Ios18.concat" : 32,
30
+ "Shape" : 8,
31
+ "Ios18.add" : 31,
32
+ "Ios18.sliceUpdate" : 24,
33
+ "Ios18.sliceByIndex" : 49,
34
+ "Ios18.layerNorm" : 19,
35
+ "Ios18.cast" : 16,
36
+ "Ios18.transpose" : 48,
37
+ "Ios18.writeState" : 12,
38
+ "Ios18.reshape" : 48,
39
+ "Ios18.softmax" : 12,
40
+ "Ios18.mul" : 24
41
+ },
42
+ "computePrecision" : "Mixed (Float16, Int16, Int32, UInt16)",
43
+ "isUpdatable" : "0",
44
+ "stateSchema" : [
45
+ {
46
+ "dataType" : "Float16",
47
+ "isOptional" : "0",
48
+ "formattedType" : "State (Float16 6 × 1 × 448 × 512)",
49
+ "shortDescription" : "",
50
+ "shape" : "[6, 1, 448, 512]",
51
+ "name" : "k_cache1",
52
+ "type" : "State"
53
+ },
54
+ {
55
+ "dataType" : "Float16",
56
+ "isOptional" : "0",
57
+ "formattedType" : "State (Float16 6 × 1 × 448 × 512)",
58
+ "shortDescription" : "",
59
+ "shape" : "[6, 1, 448, 512]",
60
+ "name" : "v_cache1",
61
+ "type" : "State"
62
+ },
63
+ {
64
+ "dataType" : "Float16",
65
+ "isOptional" : "0",
66
+ "formattedType" : "State (Float16 6 × 1 × 1500 × 512)",
67
+ "shortDescription" : "",
68
+ "shape" : "[6, 1, 1500, 512]",
69
+ "name" : "k_cache2",
70
+ "type" : "State"
71
+ },
72
+ {
73
+ "dataType" : "Float16",
74
+ "isOptional" : "0",
75
+ "formattedType" : "State (Float16 6 × 1 × 1500 × 512)",
76
+ "shortDescription" : "",
77
+ "shape" : "[6, 1, 1500, 512]",
78
+ "name" : "v_cache2",
79
+ "type" : "State"
80
+ }
81
+ ],
82
+ "availability" : {
83
+ "macOS" : "15.0",
84
+ "tvOS" : "18.0",
85
+ "visionOS" : "2.0",
86
+ "watchOS" : "11.0",
87
+ "iOS" : "18.0",
88
+ "macCatalyst" : "18.0"
89
+ },
90
+ "modelType" : {
91
+ "name" : "MLModelType_mlProgram"
92
+ },
93
+ "userDefinedMetadata" : {
94
+ "com.github.apple.coremltools.source_dialect" : "TorchScript",
95
+ "com.github.apple.coremltools.source" : "torch==2.4.1",
96
+ "com.github.apple.coremltools.version" : "8.0"
97
+ },
98
+ "inputSchema" : [
99
+ {
100
+ "dataType" : "Int32",
101
+ "hasShapeFlexibility" : "1",
102
+ "isOptional" : "0",
103
+ "shapeFlexibility" : "1 × 1...448",
104
+ "shapeRange" : "[[1, 1], [1, 448]]",
105
+ "formattedType" : "MultiArray (Int32 1 × 1)",
106
+ "type" : "MultiArray",
107
+ "shape" : "[1, 1]",
108
+ "name" : "token_data",
109
+ "shortDescription" : ""
110
+ },
111
+ {
112
+ "dataType" : "Float16",
113
+ "hasShapeFlexibility" : "1",
114
+ "isOptional" : "0",
115
+ "shapeFlexibility" : "1 × 1...448",
116
+ "shapeRange" : "[[1, 1], [1, 448]]",
117
+ "formattedType" : "MultiArray (Float16 1 × 1)",
118
+ "type" : "MultiArray",
119
+ "shape" : "[1, 1]",
120
+ "name" : "offset_mask",
121
+ "shortDescription" : ""
122
+ }
123
+ ],
124
+ "generatedClassName" : "decoder_second",
125
+ "method" : "predict"
126
+ }
127
+ ]
base/decoder_second.mlmodelc/model.mil ADDED
The diff for this file is too large to render. See raw diff
 
base/decoder_second.mlmodelc/weights/weight.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:94338bcd9475d6d8848699ee40dd6fac40d1e597c1e28d124454a7bf37bff672
3
+ size 99759858
base/encoder.mlmodelc/analytics/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:405dc318443c493222a32916d66c5d908d7cc1d250f73e9a192d5b734a8494ed
3
+ size 243
base/encoder.mlmodelc/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7b04d5884b5a2d983f52a0e557aff2e7d3dff78b2a9f9d496a5280546bacfaff
3
+ size 318
base/encoder.mlmodelc/metadata.json ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "metadataOutputVersion" : "3.0",
4
+ "storagePrecision" : "Float16",
5
+ "outputSchema" : [
6
+ {
7
+ "hasShapeFlexibility" : "0",
8
+ "isOptional" : "0",
9
+ "dataType" : "Float16",
10
+ "formattedType" : "MultiArray (Float16 1 × 1500 × 512)",
11
+ "shortDescription" : "",
12
+ "shape" : "[1, 1500, 512]",
13
+ "name" : "output",
14
+ "type" : "MultiArray"
15
+ }
16
+ ],
17
+ "modelParameters" : [
18
+
19
+ ],
20
+ "specificationVersion" : 9,
21
+ "mlProgramOperationTypeHistogram" : {
22
+ "Ios18.mul" : 12,
23
+ "Ios18.softmax" : 6,
24
+ "Ios18.linear" : 36,
25
+ "Ios18.gelu" : 8,
26
+ "Ios18.layerNorm" : 13,
27
+ "Ios18.transpose" : 25,
28
+ "Ios18.matmul" : 12,
29
+ "Ios18.conv" : 2,
30
+ "Ios18.add" : 13,
31
+ "Ios18.reshape" : 24
32
+ },
33
+ "computePrecision" : "Mixed (Float16, Int32)",
34
+ "isUpdatable" : "0",
35
+ "stateSchema" : [
36
+
37
+ ],
38
+ "availability" : {
39
+ "macOS" : "15.0",
40
+ "tvOS" : "18.0",
41
+ "visionOS" : "2.0",
42
+ "watchOS" : "11.0",
43
+ "iOS" : "18.0",
44
+ "macCatalyst" : "18.0"
45
+ },
46
+ "modelType" : {
47
+ "name" : "MLModelType_mlProgram"
48
+ },
49
+ "userDefinedMetadata" : {
50
+ "com.github.apple.coremltools.source_dialect" : "TorchScript",
51
+ "com.github.apple.coremltools.source" : "torch==2.4.1",
52
+ "com.github.apple.coremltools.version" : "8.0"
53
+ },
54
+ "inputSchema" : [
55
+ {
56
+ "hasShapeFlexibility" : "0",
57
+ "isOptional" : "0",
58
+ "dataType" : "Float16",
59
+ "formattedType" : "MultiArray (Float16 1 × 80 × 3000)",
60
+ "shortDescription" : "",
61
+ "shape" : "[1, 80, 3000]",
62
+ "name" : "logmel_data",
63
+ "type" : "MultiArray"
64
+ }
65
+ ],
66
+ "generatedClassName" : "encoder",
67
+ "method" : "predict"
68
+ }
69
+ ]
base/encoder.mlmodelc/model.mil ADDED
@@ -0,0 +1,384 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ program(1.3)
2
+ [buildInfo = dict<string, string>({{"coremlc-component-MIL", "3400.43.1"}, {"coremlc-version", "3400.58.2"}, {"coremltools-component-torch", "2.4.1"}, {"coremltools-source-dialect", "TorchScript"}, {"coremltools-version", "8.0"}})]
3
+ {
4
+ func main<ios18>(tensor<fp16, [1, 80, 3000]> logmel_data) {
5
+ string var_32_pad_type_0 = const()[name = string("op_32_pad_type_0"), val = string("custom")];
6
+ tensor<int32, [2]> var_32_pad_0 = const()[name = string("op_32_pad_0"), val = tensor<int32, [2]>([1, 1])];
7
+ tensor<int32, [1]> var_32_strides_0 = const()[name = string("op_32_strides_0"), val = tensor<int32, [1]>([1])];
8
+ tensor<int32, [1]> var_32_dilations_0 = const()[name = string("op_32_dilations_0"), val = tensor<int32, [1]>([1])];
9
+ int32 var_32_groups_0 = const()[name = string("op_32_groups_0"), val = int32(1)];
10
+ tensor<fp16, [512, 80, 3]> weight_3_to_fp16 = const()[name = string("weight_3_to_fp16"), val = tensor<fp16, [512, 80, 3]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(64)))];
11
+ tensor<fp16, [512]> bias_3_to_fp16 = const()[name = string("bias_3_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(245888)))];
12
+ tensor<fp16, [1, 512, 3000]> var_32_cast_fp16 = conv(bias = bias_3_to_fp16, dilations = var_32_dilations_0, groups = var_32_groups_0, pad = var_32_pad_0, pad_type = var_32_pad_type_0, strides = var_32_strides_0, weight = weight_3_to_fp16, x = logmel_data)[name = string("op_32_cast_fp16")];
13
+ string input_1_mode_0 = const()[name = string("input_1_mode_0"), val = string("EXACT")];
14
+ tensor<fp16, [1, 512, 3000]> input_1_cast_fp16 = gelu(mode = input_1_mode_0, x = var_32_cast_fp16)[name = string("input_1_cast_fp16")];
15
+ string var_50_pad_type_0 = const()[name = string("op_50_pad_type_0"), val = string("custom")];
16
+ tensor<int32, [2]> var_50_pad_0 = const()[name = string("op_50_pad_0"), val = tensor<int32, [2]>([1, 1])];
17
+ tensor<int32, [1]> var_50_strides_0 = const()[name = string("op_50_strides_0"), val = tensor<int32, [1]>([2])];
18
+ tensor<int32, [1]> var_50_dilations_0 = const()[name = string("op_50_dilations_0"), val = tensor<int32, [1]>([1])];
19
+ int32 var_50_groups_0 = const()[name = string("op_50_groups_0"), val = int32(1)];
20
+ tensor<fp16, [512, 512, 3]> weight_7_to_fp16 = const()[name = string("weight_7_to_fp16"), val = tensor<fp16, [512, 512, 3]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(246976)))];
21
+ tensor<fp16, [512]> bias_7_to_fp16 = const()[name = string("bias_7_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(1819904)))];
22
+ tensor<fp16, [1, 512, 1500]> var_50_cast_fp16 = conv(bias = bias_7_to_fp16, dilations = var_50_dilations_0, groups = var_50_groups_0, pad = var_50_pad_0, pad_type = var_50_pad_type_0, strides = var_50_strides_0, weight = weight_7_to_fp16, x = input_1_cast_fp16)[name = string("op_50_cast_fp16")];
23
+ string x_3_mode_0 = const()[name = string("x_3_mode_0"), val = string("EXACT")];
24
+ tensor<fp16, [1, 512, 1500]> x_3_cast_fp16 = gelu(mode = x_3_mode_0, x = var_50_cast_fp16)[name = string("x_3_cast_fp16")];
25
+ tensor<int32, [3]> var_56 = const()[name = string("op_56"), val = tensor<int32, [3]>([0, 2, 1])];
26
+ tensor<fp16, [1500, 512]> positional_embedding_to_fp16 = const()[name = string("positional_embedding_to_fp16"), val = tensor<fp16, [1500, 512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(1820992)))];
27
+ tensor<fp16, [1, 1500, 512]> x_5_cast_fp16 = transpose(perm = var_56, x = x_3_cast_fp16)[name = string("transpose_60")];
28
+ tensor<fp16, [1, 1500, 512]> var_59_cast_fp16 = add(x = x_5_cast_fp16, y = positional_embedding_to_fp16)[name = string("op_59_cast_fp16")];
29
+ int32 var_72 = const()[name = string("op_72"), val = int32(-1)];
30
+ tensor<int32, [1]> var_88_axes_0 = const()[name = string("op_88_axes_0"), val = tensor<int32, [1]>([-1])];
31
+ tensor<fp16, [512]> blocks_0_attn_ln_weight_to_fp16 = const()[name = string("blocks_0_attn_ln_weight_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(3357056)))];
32
+ tensor<fp16, [512]> blocks_0_attn_ln_bias_to_fp16 = const()[name = string("blocks_0_attn_ln_bias_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(3358144)))];
33
+ fp16 var_78_to_fp16 = const()[name = string("op_78_to_fp16"), val = fp16(0x1.5p-17)];
34
+ tensor<fp16, [1, 1500, 512]> var_88_cast_fp16 = layer_norm(axes = var_88_axes_0, beta = blocks_0_attn_ln_bias_to_fp16, epsilon = var_78_to_fp16, gamma = blocks_0_attn_ln_weight_to_fp16, x = var_59_cast_fp16)[name = string("op_88_cast_fp16")];
35
+ tensor<fp16, [512, 512]> var_99_to_fp16 = const()[name = string("op_99_to_fp16"), val = tensor<fp16, [512, 512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(3359232)))];
36
+ tensor<fp16, [512]> var_100_to_fp16 = const()[name = string("op_100_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(3883584)))];
37
+ tensor<fp16, [1, 1500, 512]> linear_0_cast_fp16 = linear(bias = var_100_to_fp16, weight = var_99_to_fp16, x = var_88_cast_fp16)[name = string("linear_0_cast_fp16")];
38
+ tensor<fp16, [512, 512]> var_103_to_fp16 = const()[name = string("op_103_to_fp16"), val = tensor<fp16, [512, 512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(3884672)))];
39
+ tensor<fp16, [512]> linear_1_bias_0_to_fp16 = const()[name = string("linear_1_bias_0_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(4409024)))];
40
+ tensor<fp16, [1, 1500, 512]> linear_1_cast_fp16 = linear(bias = linear_1_bias_0_to_fp16, weight = var_103_to_fp16, x = var_88_cast_fp16)[name = string("linear_1_cast_fp16")];
41
+ tensor<fp16, [512, 512]> var_107_to_fp16 = const()[name = string("op_107_to_fp16"), val = tensor<fp16, [512, 512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(4410112)))];
42
+ tensor<fp16, [512]> var_108_to_fp16 = const()[name = string("op_108_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(4934464)))];
43
+ tensor<fp16, [1, 1500, 512]> linear_2_cast_fp16 = linear(bias = var_108_to_fp16, weight = var_107_to_fp16, x = var_88_cast_fp16)[name = string("linear_2_cast_fp16")];
44
+ tensor<int32, [4]> var_116 = const()[name = string("op_116"), val = tensor<int32, [4]>([1, 1500, 8, -1])];
45
+ tensor<fp16, [1, 1500, 8, 64]> var_117_cast_fp16 = reshape(shape = var_116, x = linear_0_cast_fp16)[name = string("op_117_cast_fp16")];
46
+ tensor<fp16, [1, 1, 1, 1]> const_42_to_fp16 = const()[name = string("const_42_to_fp16"), val = tensor<fp16, [1, 1, 1, 1]>([[[[0x1.6ap-2]]]])];
47
+ tensor<fp16, [1, 1500, 8, 64]> q_3_cast_fp16 = mul(x = var_117_cast_fp16, y = const_42_to_fp16)[name = string("q_3_cast_fp16")];
48
+ tensor<int32, [4]> var_123 = const()[name = string("op_123"), val = tensor<int32, [4]>([1, 1500, 8, -1])];
49
+ tensor<fp16, [1, 1500, 8, 64]> var_124_cast_fp16 = reshape(shape = var_123, x = linear_1_cast_fp16)[name = string("op_124_cast_fp16")];
50
+ tensor<fp16, [1, 1, 1, 1]> const_43_to_fp16 = const()[name = string("const_43_to_fp16"), val = tensor<fp16, [1, 1, 1, 1]>([[[[0x1.6ap-2]]]])];
51
+ tensor<fp16, [1, 1500, 8, 64]> k_3_cast_fp16 = mul(x = var_124_cast_fp16, y = const_43_to_fp16)[name = string("k_3_cast_fp16")];
52
+ tensor<int32, [4]> var_130 = const()[name = string("op_130"), val = tensor<int32, [4]>([1, 1500, 8, -1])];
53
+ tensor<fp16, [1, 1500, 8, 64]> var_131_cast_fp16 = reshape(shape = var_130, x = linear_2_cast_fp16)[name = string("op_131_cast_fp16")];
54
+ tensor<int32, [4]> var_132 = const()[name = string("op_132"), val = tensor<int32, [4]>([0, 2, 1, 3])];
55
+ bool qk_1_transpose_x_0 = const()[name = string("qk_1_transpose_x_0"), val = bool(false)];
56
+ bool qk_1_transpose_y_0 = const()[name = string("qk_1_transpose_y_0"), val = bool(false)];
57
+ tensor<int32, [4]> transpose_24_perm_0 = const()[name = string("transpose_24_perm_0"), val = tensor<int32, [4]>([0, 2, -3, -1])];
58
+ tensor<int32, [4]> transpose_25_perm_0 = const()[name = string("transpose_25_perm_0"), val = tensor<int32, [4]>([0, 2, -1, -3])];
59
+ tensor<fp16, [1, 8, 64, 1500]> transpose_25 = transpose(perm = transpose_25_perm_0, x = k_3_cast_fp16)[name = string("transpose_57")];
60
+ tensor<fp16, [1, 8, 1500, 64]> transpose_24 = transpose(perm = transpose_24_perm_0, x = q_3_cast_fp16)[name = string("transpose_58")];
61
+ tensor<fp16, [1, 8, 1500, 1500]> qk_1_cast_fp16 = matmul(transpose_x = qk_1_transpose_x_0, transpose_y = qk_1_transpose_y_0, x = transpose_24, y = transpose_25)[name = string("qk_1_cast_fp16")];
62
+ tensor<fp16, [1, 8, 1500, 1500]> var_136_cast_fp16 = softmax(axis = var_72, x = qk_1_cast_fp16)[name = string("op_136_cast_fp16")];
63
+ bool var_138_transpose_x_0 = const()[name = string("op_138_transpose_x_0"), val = bool(false)];
64
+ bool var_138_transpose_y_0 = const()[name = string("op_138_transpose_y_0"), val = bool(false)];
65
+ tensor<fp16, [1, 8, 1500, 64]> v_3_cast_fp16 = transpose(perm = var_132, x = var_131_cast_fp16)[name = string("transpose_59")];
66
+ tensor<fp16, [1, 8, 1500, 64]> var_138_cast_fp16 = matmul(transpose_x = var_138_transpose_x_0, transpose_y = var_138_transpose_y_0, x = var_136_cast_fp16, y = v_3_cast_fp16)[name = string("op_138_cast_fp16")];
67
+ tensor<int32, [4]> var_139 = const()[name = string("op_139"), val = tensor<int32, [4]>([0, 2, 1, 3])];
68
+ tensor<int32, [3]> concat_0 = const()[name = string("concat_0"), val = tensor<int32, [3]>([1, 1500, 512])];
69
+ tensor<fp16, [1, 1500, 8, 64]> var_140_cast_fp16 = transpose(perm = var_139, x = var_138_cast_fp16)[name = string("transpose_56")];
70
+ tensor<fp16, [1, 1500, 512]> x_11_cast_fp16 = reshape(shape = concat_0, x = var_140_cast_fp16)[name = string("x_11_cast_fp16")];
71
+ tensor<fp16, [512, 512]> var_144_to_fp16 = const()[name = string("op_144_to_fp16"), val = tensor<fp16, [512, 512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(4935552)))];
72
+ tensor<fp16, [512]> var_145_to_fp16 = const()[name = string("op_145_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(5459904)))];
73
+ tensor<fp16, [1, 1500, 512]> linear_3_cast_fp16 = linear(bias = var_145_to_fp16, weight = var_144_to_fp16, x = x_11_cast_fp16)[name = string("linear_3_cast_fp16")];
74
+ tensor<fp16, [1, 1500, 512]> x_13_cast_fp16 = add(x = var_59_cast_fp16, y = linear_3_cast_fp16)[name = string("x_13_cast_fp16")];
75
+ tensor<int32, [1]> var_152_axes_0 = const()[name = string("op_152_axes_0"), val = tensor<int32, [1]>([-1])];
76
+ tensor<fp16, [512]> blocks_0_mlp_ln_weight_to_fp16 = const()[name = string("blocks_0_mlp_ln_weight_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(5460992)))];
77
+ tensor<fp16, [512]> blocks_0_mlp_ln_bias_to_fp16 = const()[name = string("blocks_0_mlp_ln_bias_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(5462080)))];
78
+ tensor<fp16, [1, 1500, 512]> var_152_cast_fp16 = layer_norm(axes = var_152_axes_0, beta = blocks_0_mlp_ln_bias_to_fp16, epsilon = var_78_to_fp16, gamma = blocks_0_mlp_ln_weight_to_fp16, x = x_13_cast_fp16)[name = string("op_152_cast_fp16")];
79
+ tensor<fp16, [2048, 512]> var_161_to_fp16 = const()[name = string("op_161_to_fp16"), val = tensor<fp16, [2048, 512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(5463168)))];
80
+ tensor<fp16, [2048]> var_162_to_fp16 = const()[name = string("op_162_to_fp16"), val = tensor<fp16, [2048]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(7560384)))];
81
+ tensor<fp16, [1, 1500, 2048]> linear_4_cast_fp16 = linear(bias = var_162_to_fp16, weight = var_161_to_fp16, x = var_152_cast_fp16)[name = string("linear_4_cast_fp16")];
82
+ string x_17_mode_0 = const()[name = string("x_17_mode_0"), val = string("EXACT")];
83
+ tensor<fp16, [1, 1500, 2048]> x_17_cast_fp16 = gelu(mode = x_17_mode_0, x = linear_4_cast_fp16)[name = string("x_17_cast_fp16")];
84
+ tensor<fp16, [512, 2048]> var_167_to_fp16 = const()[name = string("op_167_to_fp16"), val = tensor<fp16, [512, 2048]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(7564544)))];
85
+ tensor<fp16, [512]> var_168_to_fp16 = const()[name = string("op_168_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(9661760)))];
86
+ tensor<fp16, [1, 1500, 512]> linear_5_cast_fp16 = linear(bias = var_168_to_fp16, weight = var_167_to_fp16, x = x_17_cast_fp16)[name = string("linear_5_cast_fp16")];
87
+ tensor<fp16, [1, 1500, 512]> x_19_cast_fp16 = add(x = x_13_cast_fp16, y = linear_5_cast_fp16)[name = string("x_19_cast_fp16")];
88
+ int32 var_178 = const()[name = string("op_178"), val = int32(-1)];
89
+ tensor<int32, [1]> var_194_axes_0 = const()[name = string("op_194_axes_0"), val = tensor<int32, [1]>([-1])];
90
+ tensor<fp16, [512]> blocks_1_attn_ln_weight_to_fp16 = const()[name = string("blocks_1_attn_ln_weight_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(9662848)))];
91
+ tensor<fp16, [512]> blocks_1_attn_ln_bias_to_fp16 = const()[name = string("blocks_1_attn_ln_bias_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(9663936)))];
92
+ fp16 var_184_to_fp16 = const()[name = string("op_184_to_fp16"), val = fp16(0x1.5p-17)];
93
+ tensor<fp16, [1, 1500, 512]> var_194_cast_fp16 = layer_norm(axes = var_194_axes_0, beta = blocks_1_attn_ln_bias_to_fp16, epsilon = var_184_to_fp16, gamma = blocks_1_attn_ln_weight_to_fp16, x = x_19_cast_fp16)[name = string("op_194_cast_fp16")];
94
+ tensor<fp16, [512, 512]> var_205_to_fp16 = const()[name = string("op_205_to_fp16"), val = tensor<fp16, [512, 512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(9665024)))];
95
+ tensor<fp16, [512]> var_206_to_fp16 = const()[name = string("op_206_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(10189376)))];
96
+ tensor<fp16, [1, 1500, 512]> linear_6_cast_fp16 = linear(bias = var_206_to_fp16, weight = var_205_to_fp16, x = var_194_cast_fp16)[name = string("linear_6_cast_fp16")];
97
+ tensor<fp16, [512, 512]> var_209_to_fp16 = const()[name = string("op_209_to_fp16"), val = tensor<fp16, [512, 512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(10190464)))];
98
+ tensor<fp16, [1, 1500, 512]> linear_7_cast_fp16 = linear(bias = linear_1_bias_0_to_fp16, weight = var_209_to_fp16, x = var_194_cast_fp16)[name = string("linear_7_cast_fp16")];
99
+ tensor<fp16, [512, 512]> var_213_to_fp16 = const()[name = string("op_213_to_fp16"), val = tensor<fp16, [512, 512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(10714816)))];
100
+ tensor<fp16, [512]> var_214_to_fp16 = const()[name = string("op_214_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(11239168)))];
101
+ tensor<fp16, [1, 1500, 512]> linear_8_cast_fp16 = linear(bias = var_214_to_fp16, weight = var_213_to_fp16, x = var_194_cast_fp16)[name = string("linear_8_cast_fp16")];
102
+ tensor<int32, [4]> var_222 = const()[name = string("op_222"), val = tensor<int32, [4]>([1, 1500, 8, -1])];
103
+ tensor<fp16, [1, 1500, 8, 64]> var_223_cast_fp16 = reshape(shape = var_222, x = linear_6_cast_fp16)[name = string("op_223_cast_fp16")];
104
+ tensor<fp16, [1, 1, 1, 1]> const_44_to_fp16 = const()[name = string("const_44_to_fp16"), val = tensor<fp16, [1, 1, 1, 1]>([[[[0x1.6ap-2]]]])];
105
+ tensor<fp16, [1, 1500, 8, 64]> q_7_cast_fp16 = mul(x = var_223_cast_fp16, y = const_44_to_fp16)[name = string("q_7_cast_fp16")];
106
+ tensor<int32, [4]> var_229 = const()[name = string("op_229"), val = tensor<int32, [4]>([1, 1500, 8, -1])];
107
+ tensor<fp16, [1, 1500, 8, 64]> var_230_cast_fp16 = reshape(shape = var_229, x = linear_7_cast_fp16)[name = string("op_230_cast_fp16")];
108
+ tensor<fp16, [1, 1, 1, 1]> const_45_to_fp16 = const()[name = string("const_45_to_fp16"), val = tensor<fp16, [1, 1, 1, 1]>([[[[0x1.6ap-2]]]])];
109
+ tensor<fp16, [1, 1500, 8, 64]> k_7_cast_fp16 = mul(x = var_230_cast_fp16, y = const_45_to_fp16)[name = string("k_7_cast_fp16")];
110
+ tensor<int32, [4]> var_236 = const()[name = string("op_236"), val = tensor<int32, [4]>([1, 1500, 8, -1])];
111
+ tensor<fp16, [1, 1500, 8, 64]> var_237_cast_fp16 = reshape(shape = var_236, x = linear_8_cast_fp16)[name = string("op_237_cast_fp16")];
112
+ tensor<int32, [4]> var_238 = const()[name = string("op_238"), val = tensor<int32, [4]>([0, 2, 1, 3])];
113
+ bool qk_3_transpose_x_0 = const()[name = string("qk_3_transpose_x_0"), val = bool(false)];
114
+ bool qk_3_transpose_y_0 = const()[name = string("qk_3_transpose_y_0"), val = bool(false)];
115
+ tensor<int32, [4]> transpose_26_perm_0 = const()[name = string("transpose_26_perm_0"), val = tensor<int32, [4]>([0, 2, -3, -1])];
116
+ tensor<int32, [4]> transpose_27_perm_0 = const()[name = string("transpose_27_perm_0"), val = tensor<int32, [4]>([0, 2, -1, -3])];
117
+ tensor<fp16, [1, 8, 64, 1500]> transpose_27 = transpose(perm = transpose_27_perm_0, x = k_7_cast_fp16)[name = string("transpose_53")];
118
+ tensor<fp16, [1, 8, 1500, 64]> transpose_26 = transpose(perm = transpose_26_perm_0, x = q_7_cast_fp16)[name = string("transpose_54")];
119
+ tensor<fp16, [1, 8, 1500, 1500]> qk_3_cast_fp16 = matmul(transpose_x = qk_3_transpose_x_0, transpose_y = qk_3_transpose_y_0, x = transpose_26, y = transpose_27)[name = string("qk_3_cast_fp16")];
120
+ tensor<fp16, [1, 8, 1500, 1500]> var_242_cast_fp16 = softmax(axis = var_178, x = qk_3_cast_fp16)[name = string("op_242_cast_fp16")];
121
+ bool var_244_transpose_x_0 = const()[name = string("op_244_transpose_x_0"), val = bool(false)];
122
+ bool var_244_transpose_y_0 = const()[name = string("op_244_transpose_y_0"), val = bool(false)];
123
+ tensor<fp16, [1, 8, 1500, 64]> v_7_cast_fp16 = transpose(perm = var_238, x = var_237_cast_fp16)[name = string("transpose_55")];
124
+ tensor<fp16, [1, 8, 1500, 64]> var_244_cast_fp16 = matmul(transpose_x = var_244_transpose_x_0, transpose_y = var_244_transpose_y_0, x = var_242_cast_fp16, y = v_7_cast_fp16)[name = string("op_244_cast_fp16")];
125
+ tensor<int32, [4]> var_245 = const()[name = string("op_245"), val = tensor<int32, [4]>([0, 2, 1, 3])];
126
+ tensor<int32, [3]> concat_1 = const()[name = string("concat_1"), val = tensor<int32, [3]>([1, 1500, 512])];
127
+ tensor<fp16, [1, 1500, 8, 64]> var_246_cast_fp16 = transpose(perm = var_245, x = var_244_cast_fp16)[name = string("transpose_52")];
128
+ tensor<fp16, [1, 1500, 512]> x_23_cast_fp16 = reshape(shape = concat_1, x = var_246_cast_fp16)[name = string("x_23_cast_fp16")];
129
+ tensor<fp16, [512, 512]> var_250_to_fp16 = const()[name = string("op_250_to_fp16"), val = tensor<fp16, [512, 512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(11240256)))];
130
+ tensor<fp16, [512]> var_251_to_fp16 = const()[name = string("op_251_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(11764608)))];
131
+ tensor<fp16, [1, 1500, 512]> linear_9_cast_fp16 = linear(bias = var_251_to_fp16, weight = var_250_to_fp16, x = x_23_cast_fp16)[name = string("linear_9_cast_fp16")];
132
+ tensor<fp16, [1, 1500, 512]> x_25_cast_fp16 = add(x = x_19_cast_fp16, y = linear_9_cast_fp16)[name = string("x_25_cast_fp16")];
133
+ tensor<int32, [1]> var_258_axes_0 = const()[name = string("op_258_axes_0"), val = tensor<int32, [1]>([-1])];
134
+ tensor<fp16, [512]> blocks_1_mlp_ln_weight_to_fp16 = const()[name = string("blocks_1_mlp_ln_weight_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(11765696)))];
135
+ tensor<fp16, [512]> blocks_1_mlp_ln_bias_to_fp16 = const()[name = string("blocks_1_mlp_ln_bias_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(11766784)))];
136
+ tensor<fp16, [1, 1500, 512]> var_258_cast_fp16 = layer_norm(axes = var_258_axes_0, beta = blocks_1_mlp_ln_bias_to_fp16, epsilon = var_184_to_fp16, gamma = blocks_1_mlp_ln_weight_to_fp16, x = x_25_cast_fp16)[name = string("op_258_cast_fp16")];
137
+ tensor<fp16, [2048, 512]> var_267_to_fp16 = const()[name = string("op_267_to_fp16"), val = tensor<fp16, [2048, 512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(11767872)))];
138
+ tensor<fp16, [2048]> var_268_to_fp16 = const()[name = string("op_268_to_fp16"), val = tensor<fp16, [2048]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(13865088)))];
139
+ tensor<fp16, [1, 1500, 2048]> linear_10_cast_fp16 = linear(bias = var_268_to_fp16, weight = var_267_to_fp16, x = var_258_cast_fp16)[name = string("linear_10_cast_fp16")];
140
+ string x_29_mode_0 = const()[name = string("x_29_mode_0"), val = string("EXACT")];
141
+ tensor<fp16, [1, 1500, 2048]> x_29_cast_fp16 = gelu(mode = x_29_mode_0, x = linear_10_cast_fp16)[name = string("x_29_cast_fp16")];
142
+ tensor<fp16, [512, 2048]> var_273_to_fp16 = const()[name = string("op_273_to_fp16"), val = tensor<fp16, [512, 2048]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(13869248)))];
143
+ tensor<fp16, [512]> var_274_to_fp16 = const()[name = string("op_274_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(15966464)))];
144
+ tensor<fp16, [1, 1500, 512]> linear_11_cast_fp16 = linear(bias = var_274_to_fp16, weight = var_273_to_fp16, x = x_29_cast_fp16)[name = string("linear_11_cast_fp16")];
145
+ tensor<fp16, [1, 1500, 512]> x_31_cast_fp16 = add(x = x_25_cast_fp16, y = linear_11_cast_fp16)[name = string("x_31_cast_fp16")];
146
+ int32 var_284 = const()[name = string("op_284"), val = int32(-1)];
147
+ tensor<int32, [1]> var_300_axes_0 = const()[name = string("op_300_axes_0"), val = tensor<int32, [1]>([-1])];
148
+ tensor<fp16, [512]> blocks_2_attn_ln_weight_to_fp16 = const()[name = string("blocks_2_attn_ln_weight_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(15967552)))];
149
+ tensor<fp16, [512]> blocks_2_attn_ln_bias_to_fp16 = const()[name = string("blocks_2_attn_ln_bias_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(15968640)))];
150
+ fp16 var_290_to_fp16 = const()[name = string("op_290_to_fp16"), val = fp16(0x1.5p-17)];
151
+ tensor<fp16, [1, 1500, 512]> var_300_cast_fp16 = layer_norm(axes = var_300_axes_0, beta = blocks_2_attn_ln_bias_to_fp16, epsilon = var_290_to_fp16, gamma = blocks_2_attn_ln_weight_to_fp16, x = x_31_cast_fp16)[name = string("op_300_cast_fp16")];
152
+ tensor<fp16, [512, 512]> var_311_to_fp16 = const()[name = string("op_311_to_fp16"), val = tensor<fp16, [512, 512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(15969728)))];
153
+ tensor<fp16, [512]> var_312_to_fp16 = const()[name = string("op_312_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(16494080)))];
154
+ tensor<fp16, [1, 1500, 512]> linear_12_cast_fp16 = linear(bias = var_312_to_fp16, weight = var_311_to_fp16, x = var_300_cast_fp16)[name = string("linear_12_cast_fp16")];
155
+ tensor<fp16, [512, 512]> var_315_to_fp16 = const()[name = string("op_315_to_fp16"), val = tensor<fp16, [512, 512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(16495168)))];
156
+ tensor<fp16, [1, 1500, 512]> linear_13_cast_fp16 = linear(bias = linear_1_bias_0_to_fp16, weight = var_315_to_fp16, x = var_300_cast_fp16)[name = string("linear_13_cast_fp16")];
157
+ tensor<fp16, [512, 512]> var_319_to_fp16 = const()[name = string("op_319_to_fp16"), val = tensor<fp16, [512, 512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(17019520)))];
158
+ tensor<fp16, [512]> var_320_to_fp16 = const()[name = string("op_320_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(17543872)))];
159
+ tensor<fp16, [1, 1500, 512]> linear_14_cast_fp16 = linear(bias = var_320_to_fp16, weight = var_319_to_fp16, x = var_300_cast_fp16)[name = string("linear_14_cast_fp16")];
160
+ tensor<int32, [4]> var_328 = const()[name = string("op_328"), val = tensor<int32, [4]>([1, 1500, 8, -1])];
161
+ tensor<fp16, [1, 1500, 8, 64]> var_329_cast_fp16 = reshape(shape = var_328, x = linear_12_cast_fp16)[name = string("op_329_cast_fp16")];
162
+ tensor<fp16, [1, 1, 1, 1]> const_46_to_fp16 = const()[name = string("const_46_to_fp16"), val = tensor<fp16, [1, 1, 1, 1]>([[[[0x1.6ap-2]]]])];
163
+ tensor<fp16, [1, 1500, 8, 64]> q_11_cast_fp16 = mul(x = var_329_cast_fp16, y = const_46_to_fp16)[name = string("q_11_cast_fp16")];
164
+ tensor<int32, [4]> var_335 = const()[name = string("op_335"), val = tensor<int32, [4]>([1, 1500, 8, -1])];
165
+ tensor<fp16, [1, 1500, 8, 64]> var_336_cast_fp16 = reshape(shape = var_335, x = linear_13_cast_fp16)[name = string("op_336_cast_fp16")];
166
+ tensor<fp16, [1, 1, 1, 1]> const_47_to_fp16 = const()[name = string("const_47_to_fp16"), val = tensor<fp16, [1, 1, 1, 1]>([[[[0x1.6ap-2]]]])];
167
+ tensor<fp16, [1, 1500, 8, 64]> k_11_cast_fp16 = mul(x = var_336_cast_fp16, y = const_47_to_fp16)[name = string("k_11_cast_fp16")];
168
+ tensor<int32, [4]> var_342 = const()[name = string("op_342"), val = tensor<int32, [4]>([1, 1500, 8, -1])];
169
+ tensor<fp16, [1, 1500, 8, 64]> var_343_cast_fp16 = reshape(shape = var_342, x = linear_14_cast_fp16)[name = string("op_343_cast_fp16")];
170
+ tensor<int32, [4]> var_344 = const()[name = string("op_344"), val = tensor<int32, [4]>([0, 2, 1, 3])];
171
+ bool qk_5_transpose_x_0 = const()[name = string("qk_5_transpose_x_0"), val = bool(false)];
172
+ bool qk_5_transpose_y_0 = const()[name = string("qk_5_transpose_y_0"), val = bool(false)];
173
+ tensor<int32, [4]> transpose_28_perm_0 = const()[name = string("transpose_28_perm_0"), val = tensor<int32, [4]>([0, 2, -3, -1])];
174
+ tensor<int32, [4]> transpose_29_perm_0 = const()[name = string("transpose_29_perm_0"), val = tensor<int32, [4]>([0, 2, -1, -3])];
175
+ tensor<fp16, [1, 8, 64, 1500]> transpose_29 = transpose(perm = transpose_29_perm_0, x = k_11_cast_fp16)[name = string("transpose_49")];
176
+ tensor<fp16, [1, 8, 1500, 64]> transpose_28 = transpose(perm = transpose_28_perm_0, x = q_11_cast_fp16)[name = string("transpose_50")];
177
+ tensor<fp16, [1, 8, 1500, 1500]> qk_5_cast_fp16 = matmul(transpose_x = qk_5_transpose_x_0, transpose_y = qk_5_transpose_y_0, x = transpose_28, y = transpose_29)[name = string("qk_5_cast_fp16")];
178
+ tensor<fp16, [1, 8, 1500, 1500]> var_348_cast_fp16 = softmax(axis = var_284, x = qk_5_cast_fp16)[name = string("op_348_cast_fp16")];
179
+ bool var_350_transpose_x_0 = const()[name = string("op_350_transpose_x_0"), val = bool(false)];
180
+ bool var_350_transpose_y_0 = const()[name = string("op_350_transpose_y_0"), val = bool(false)];
181
+ tensor<fp16, [1, 8, 1500, 64]> v_11_cast_fp16 = transpose(perm = var_344, x = var_343_cast_fp16)[name = string("transpose_51")];
182
+ tensor<fp16, [1, 8, 1500, 64]> var_350_cast_fp16 = matmul(transpose_x = var_350_transpose_x_0, transpose_y = var_350_transpose_y_0, x = var_348_cast_fp16, y = v_11_cast_fp16)[name = string("op_350_cast_fp16")];
183
+ tensor<int32, [4]> var_351 = const()[name = string("op_351"), val = tensor<int32, [4]>([0, 2, 1, 3])];
184
+ tensor<int32, [3]> concat_2 = const()[name = string("concat_2"), val = tensor<int32, [3]>([1, 1500, 512])];
185
+ tensor<fp16, [1, 1500, 8, 64]> var_352_cast_fp16 = transpose(perm = var_351, x = var_350_cast_fp16)[name = string("transpose_48")];
186
+ tensor<fp16, [1, 1500, 512]> x_35_cast_fp16 = reshape(shape = concat_2, x = var_352_cast_fp16)[name = string("x_35_cast_fp16")];
187
+ tensor<fp16, [512, 512]> var_356_to_fp16 = const()[name = string("op_356_to_fp16"), val = tensor<fp16, [512, 512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(17544960)))];
188
+ tensor<fp16, [512]> var_357_to_fp16 = const()[name = string("op_357_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(18069312)))];
189
+ tensor<fp16, [1, 1500, 512]> linear_15_cast_fp16 = linear(bias = var_357_to_fp16, weight = var_356_to_fp16, x = x_35_cast_fp16)[name = string("linear_15_cast_fp16")];
190
+ tensor<fp16, [1, 1500, 512]> x_37_cast_fp16 = add(x = x_31_cast_fp16, y = linear_15_cast_fp16)[name = string("x_37_cast_fp16")];
191
+ tensor<int32, [1]> var_364_axes_0 = const()[name = string("op_364_axes_0"), val = tensor<int32, [1]>([-1])];
192
+ tensor<fp16, [512]> blocks_2_mlp_ln_weight_to_fp16 = const()[name = string("blocks_2_mlp_ln_weight_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(18070400)))];
193
+ tensor<fp16, [512]> blocks_2_mlp_ln_bias_to_fp16 = const()[name = string("blocks_2_mlp_ln_bias_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(18071488)))];
194
+ tensor<fp16, [1, 1500, 512]> var_364_cast_fp16 = layer_norm(axes = var_364_axes_0, beta = blocks_2_mlp_ln_bias_to_fp16, epsilon = var_290_to_fp16, gamma = blocks_2_mlp_ln_weight_to_fp16, x = x_37_cast_fp16)[name = string("op_364_cast_fp16")];
195
+ tensor<fp16, [2048, 512]> var_373_to_fp16 = const()[name = string("op_373_to_fp16"), val = tensor<fp16, [2048, 512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(18072576)))];
196
+ tensor<fp16, [2048]> var_374_to_fp16 = const()[name = string("op_374_to_fp16"), val = tensor<fp16, [2048]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(20169792)))];
197
+ tensor<fp16, [1, 1500, 2048]> linear_16_cast_fp16 = linear(bias = var_374_to_fp16, weight = var_373_to_fp16, x = var_364_cast_fp16)[name = string("linear_16_cast_fp16")];
198
+ string x_41_mode_0 = const()[name = string("x_41_mode_0"), val = string("EXACT")];
199
+ tensor<fp16, [1, 1500, 2048]> x_41_cast_fp16 = gelu(mode = x_41_mode_0, x = linear_16_cast_fp16)[name = string("x_41_cast_fp16")];
200
+ tensor<fp16, [512, 2048]> var_379_to_fp16 = const()[name = string("op_379_to_fp16"), val = tensor<fp16, [512, 2048]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(20173952)))];
201
+ tensor<fp16, [512]> var_380_to_fp16 = const()[name = string("op_380_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(22271168)))];
202
+ tensor<fp16, [1, 1500, 512]> linear_17_cast_fp16 = linear(bias = var_380_to_fp16, weight = var_379_to_fp16, x = x_41_cast_fp16)[name = string("linear_17_cast_fp16")];
203
+ tensor<fp16, [1, 1500, 512]> x_43_cast_fp16 = add(x = x_37_cast_fp16, y = linear_17_cast_fp16)[name = string("x_43_cast_fp16")];
204
+ int32 var_390 = const()[name = string("op_390"), val = int32(-1)];
205
+ tensor<int32, [1]> var_406_axes_0 = const()[name = string("op_406_axes_0"), val = tensor<int32, [1]>([-1])];
206
+ tensor<fp16, [512]> blocks_3_attn_ln_weight_to_fp16 = const()[name = string("blocks_3_attn_ln_weight_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(22272256)))];
207
+ tensor<fp16, [512]> blocks_3_attn_ln_bias_to_fp16 = const()[name = string("blocks_3_attn_ln_bias_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(22273344)))];
208
+ fp16 var_396_to_fp16 = const()[name = string("op_396_to_fp16"), val = fp16(0x1.5p-17)];
209
+ tensor<fp16, [1, 1500, 512]> var_406_cast_fp16 = layer_norm(axes = var_406_axes_0, beta = blocks_3_attn_ln_bias_to_fp16, epsilon = var_396_to_fp16, gamma = blocks_3_attn_ln_weight_to_fp16, x = x_43_cast_fp16)[name = string("op_406_cast_fp16")];
210
+ tensor<fp16, [512, 512]> var_417_to_fp16 = const()[name = string("op_417_to_fp16"), val = tensor<fp16, [512, 512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(22274432)))];
211
+ tensor<fp16, [512]> var_418_to_fp16 = const()[name = string("op_418_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(22798784)))];
212
+ tensor<fp16, [1, 1500, 512]> linear_18_cast_fp16 = linear(bias = var_418_to_fp16, weight = var_417_to_fp16, x = var_406_cast_fp16)[name = string("linear_18_cast_fp16")];
213
+ tensor<fp16, [512, 512]> var_421_to_fp16 = const()[name = string("op_421_to_fp16"), val = tensor<fp16, [512, 512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(22799872)))];
214
+ tensor<fp16, [1, 1500, 512]> linear_19_cast_fp16 = linear(bias = linear_1_bias_0_to_fp16, weight = var_421_to_fp16, x = var_406_cast_fp16)[name = string("linear_19_cast_fp16")];
215
+ tensor<fp16, [512, 512]> var_425_to_fp16 = const()[name = string("op_425_to_fp16"), val = tensor<fp16, [512, 512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(23324224)))];
216
+ tensor<fp16, [512]> var_426_to_fp16 = const()[name = string("op_426_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(23848576)))];
217
+ tensor<fp16, [1, 1500, 512]> linear_20_cast_fp16 = linear(bias = var_426_to_fp16, weight = var_425_to_fp16, x = var_406_cast_fp16)[name = string("linear_20_cast_fp16")];
218
+ tensor<int32, [4]> var_434 = const()[name = string("op_434"), val = tensor<int32, [4]>([1, 1500, 8, -1])];
219
+ tensor<fp16, [1, 1500, 8, 64]> var_435_cast_fp16 = reshape(shape = var_434, x = linear_18_cast_fp16)[name = string("op_435_cast_fp16")];
220
+ tensor<fp16, [1, 1, 1, 1]> const_48_to_fp16 = const()[name = string("const_48_to_fp16"), val = tensor<fp16, [1, 1, 1, 1]>([[[[0x1.6ap-2]]]])];
221
+ tensor<fp16, [1, 1500, 8, 64]> q_15_cast_fp16 = mul(x = var_435_cast_fp16, y = const_48_to_fp16)[name = string("q_15_cast_fp16")];
222
+ tensor<int32, [4]> var_441 = const()[name = string("op_441"), val = tensor<int32, [4]>([1, 1500, 8, -1])];
223
+ tensor<fp16, [1, 1500, 8, 64]> var_442_cast_fp16 = reshape(shape = var_441, x = linear_19_cast_fp16)[name = string("op_442_cast_fp16")];
224
+ tensor<fp16, [1, 1, 1, 1]> const_49_to_fp16 = const()[name = string("const_49_to_fp16"), val = tensor<fp16, [1, 1, 1, 1]>([[[[0x1.6ap-2]]]])];
225
+ tensor<fp16, [1, 1500, 8, 64]> k_15_cast_fp16 = mul(x = var_442_cast_fp16, y = const_49_to_fp16)[name = string("k_15_cast_fp16")];
226
+ tensor<int32, [4]> var_448 = const()[name = string("op_448"), val = tensor<int32, [4]>([1, 1500, 8, -1])];
227
+ tensor<fp16, [1, 1500, 8, 64]> var_449_cast_fp16 = reshape(shape = var_448, x = linear_20_cast_fp16)[name = string("op_449_cast_fp16")];
228
+ tensor<int32, [4]> var_450 = const()[name = string("op_450"), val = tensor<int32, [4]>([0, 2, 1, 3])];
229
+ bool qk_7_transpose_x_0 = const()[name = string("qk_7_transpose_x_0"), val = bool(false)];
230
+ bool qk_7_transpose_y_0 = const()[name = string("qk_7_transpose_y_0"), val = bool(false)];
231
+ tensor<int32, [4]> transpose_30_perm_0 = const()[name = string("transpose_30_perm_0"), val = tensor<int32, [4]>([0, 2, -3, -1])];
232
+ tensor<int32, [4]> transpose_31_perm_0 = const()[name = string("transpose_31_perm_0"), val = tensor<int32, [4]>([0, 2, -1, -3])];
233
+ tensor<fp16, [1, 8, 64, 1500]> transpose_31 = transpose(perm = transpose_31_perm_0, x = k_15_cast_fp16)[name = string("transpose_45")];
234
+ tensor<fp16, [1, 8, 1500, 64]> transpose_30 = transpose(perm = transpose_30_perm_0, x = q_15_cast_fp16)[name = string("transpose_46")];
235
+ tensor<fp16, [1, 8, 1500, 1500]> qk_7_cast_fp16 = matmul(transpose_x = qk_7_transpose_x_0, transpose_y = qk_7_transpose_y_0, x = transpose_30, y = transpose_31)[name = string("qk_7_cast_fp16")];
236
+ tensor<fp16, [1, 8, 1500, 1500]> var_454_cast_fp16 = softmax(axis = var_390, x = qk_7_cast_fp16)[name = string("op_454_cast_fp16")];
237
+ bool var_456_transpose_x_0 = const()[name = string("op_456_transpose_x_0"), val = bool(false)];
238
+ bool var_456_transpose_y_0 = const()[name = string("op_456_transpose_y_0"), val = bool(false)];
239
+ tensor<fp16, [1, 8, 1500, 64]> v_15_cast_fp16 = transpose(perm = var_450, x = var_449_cast_fp16)[name = string("transpose_47")];
240
+ tensor<fp16, [1, 8, 1500, 64]> var_456_cast_fp16 = matmul(transpose_x = var_456_transpose_x_0, transpose_y = var_456_transpose_y_0, x = var_454_cast_fp16, y = v_15_cast_fp16)[name = string("op_456_cast_fp16")];
241
+ tensor<int32, [4]> var_457 = const()[name = string("op_457"), val = tensor<int32, [4]>([0, 2, 1, 3])];
242
+ tensor<int32, [3]> concat_3 = const()[name = string("concat_3"), val = tensor<int32, [3]>([1, 1500, 512])];
243
+ tensor<fp16, [1, 1500, 8, 64]> var_458_cast_fp16 = transpose(perm = var_457, x = var_456_cast_fp16)[name = string("transpose_44")];
244
+ tensor<fp16, [1, 1500, 512]> x_47_cast_fp16 = reshape(shape = concat_3, x = var_458_cast_fp16)[name = string("x_47_cast_fp16")];
245
+ tensor<fp16, [512, 512]> var_462_to_fp16 = const()[name = string("op_462_to_fp16"), val = tensor<fp16, [512, 512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(23849664)))];
246
+ tensor<fp16, [512]> var_463_to_fp16 = const()[name = string("op_463_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(24374016)))];
247
+ tensor<fp16, [1, 1500, 512]> linear_21_cast_fp16 = linear(bias = var_463_to_fp16, weight = var_462_to_fp16, x = x_47_cast_fp16)[name = string("linear_21_cast_fp16")];
248
+ tensor<fp16, [1, 1500, 512]> x_49_cast_fp16 = add(x = x_43_cast_fp16, y = linear_21_cast_fp16)[name = string("x_49_cast_fp16")];
249
+ tensor<int32, [1]> var_470_axes_0 = const()[name = string("op_470_axes_0"), val = tensor<int32, [1]>([-1])];
250
+ tensor<fp16, [512]> blocks_3_mlp_ln_weight_to_fp16 = const()[name = string("blocks_3_mlp_ln_weight_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(24375104)))];
251
+ tensor<fp16, [512]> blocks_3_mlp_ln_bias_to_fp16 = const()[name = string("blocks_3_mlp_ln_bias_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(24376192)))];
252
+ tensor<fp16, [1, 1500, 512]> var_470_cast_fp16 = layer_norm(axes = var_470_axes_0, beta = blocks_3_mlp_ln_bias_to_fp16, epsilon = var_396_to_fp16, gamma = blocks_3_mlp_ln_weight_to_fp16, x = x_49_cast_fp16)[name = string("op_470_cast_fp16")];
253
+ tensor<fp16, [2048, 512]> var_479_to_fp16 = const()[name = string("op_479_to_fp16"), val = tensor<fp16, [2048, 512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(24377280)))];
254
+ tensor<fp16, [2048]> var_480_to_fp16 = const()[name = string("op_480_to_fp16"), val = tensor<fp16, [2048]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(26474496)))];
255
+ tensor<fp16, [1, 1500, 2048]> linear_22_cast_fp16 = linear(bias = var_480_to_fp16, weight = var_479_to_fp16, x = var_470_cast_fp16)[name = string("linear_22_cast_fp16")];
256
+ string x_53_mode_0 = const()[name = string("x_53_mode_0"), val = string("EXACT")];
257
+ tensor<fp16, [1, 1500, 2048]> x_53_cast_fp16 = gelu(mode = x_53_mode_0, x = linear_22_cast_fp16)[name = string("x_53_cast_fp16")];
258
+ tensor<fp16, [512, 2048]> var_485_to_fp16 = const()[name = string("op_485_to_fp16"), val = tensor<fp16, [512, 2048]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(26478656)))];
259
+ tensor<fp16, [512]> var_486_to_fp16 = const()[name = string("op_486_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(28575872)))];
260
+ tensor<fp16, [1, 1500, 512]> linear_23_cast_fp16 = linear(bias = var_486_to_fp16, weight = var_485_to_fp16, x = x_53_cast_fp16)[name = string("linear_23_cast_fp16")];
261
+ tensor<fp16, [1, 1500, 512]> x_55_cast_fp16 = add(x = x_49_cast_fp16, y = linear_23_cast_fp16)[name = string("x_55_cast_fp16")];
262
+ int32 var_496 = const()[name = string("op_496"), val = int32(-1)];
263
+ tensor<int32, [1]> var_512_axes_0 = const()[name = string("op_512_axes_0"), val = tensor<int32, [1]>([-1])];
264
+ tensor<fp16, [512]> blocks_4_attn_ln_weight_to_fp16 = const()[name = string("blocks_4_attn_ln_weight_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(28576960)))];
265
+ tensor<fp16, [512]> blocks_4_attn_ln_bias_to_fp16 = const()[name = string("blocks_4_attn_ln_bias_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(28578048)))];
266
+ fp16 var_502_to_fp16 = const()[name = string("op_502_to_fp16"), val = fp16(0x1.5p-17)];
267
+ tensor<fp16, [1, 1500, 512]> var_512_cast_fp16 = layer_norm(axes = var_512_axes_0, beta = blocks_4_attn_ln_bias_to_fp16, epsilon = var_502_to_fp16, gamma = blocks_4_attn_ln_weight_to_fp16, x = x_55_cast_fp16)[name = string("op_512_cast_fp16")];
268
+ tensor<fp16, [512, 512]> var_523_to_fp16 = const()[name = string("op_523_to_fp16"), val = tensor<fp16, [512, 512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(28579136)))];
269
+ tensor<fp16, [512]> var_524_to_fp16 = const()[name = string("op_524_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(29103488)))];
270
+ tensor<fp16, [1, 1500, 512]> linear_24_cast_fp16 = linear(bias = var_524_to_fp16, weight = var_523_to_fp16, x = var_512_cast_fp16)[name = string("linear_24_cast_fp16")];
271
+ tensor<fp16, [512, 512]> var_527_to_fp16 = const()[name = string("op_527_to_fp16"), val = tensor<fp16, [512, 512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(29104576)))];
272
+ tensor<fp16, [1, 1500, 512]> linear_25_cast_fp16 = linear(bias = linear_1_bias_0_to_fp16, weight = var_527_to_fp16, x = var_512_cast_fp16)[name = string("linear_25_cast_fp16")];
273
+ tensor<fp16, [512, 512]> var_531_to_fp16 = const()[name = string("op_531_to_fp16"), val = tensor<fp16, [512, 512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(29628928)))];
274
+ tensor<fp16, [512]> var_532_to_fp16 = const()[name = string("op_532_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(30153280)))];
275
+ tensor<fp16, [1, 1500, 512]> linear_26_cast_fp16 = linear(bias = var_532_to_fp16, weight = var_531_to_fp16, x = var_512_cast_fp16)[name = string("linear_26_cast_fp16")];
276
+ tensor<int32, [4]> var_540 = const()[name = string("op_540"), val = tensor<int32, [4]>([1, 1500, 8, -1])];
277
+ tensor<fp16, [1, 1500, 8, 64]> var_541_cast_fp16 = reshape(shape = var_540, x = linear_24_cast_fp16)[name = string("op_541_cast_fp16")];
278
+ tensor<fp16, [1, 1, 1, 1]> const_50_to_fp16 = const()[name = string("const_50_to_fp16"), val = tensor<fp16, [1, 1, 1, 1]>([[[[0x1.6ap-2]]]])];
279
+ tensor<fp16, [1, 1500, 8, 64]> q_19_cast_fp16 = mul(x = var_541_cast_fp16, y = const_50_to_fp16)[name = string("q_19_cast_fp16")];
280
+ tensor<int32, [4]> var_547 = const()[name = string("op_547"), val = tensor<int32, [4]>([1, 1500, 8, -1])];
281
+ tensor<fp16, [1, 1500, 8, 64]> var_548_cast_fp16 = reshape(shape = var_547, x = linear_25_cast_fp16)[name = string("op_548_cast_fp16")];
282
+ tensor<fp16, [1, 1, 1, 1]> const_51_to_fp16 = const()[name = string("const_51_to_fp16"), val = tensor<fp16, [1, 1, 1, 1]>([[[[0x1.6ap-2]]]])];
283
+ tensor<fp16, [1, 1500, 8, 64]> k_19_cast_fp16 = mul(x = var_548_cast_fp16, y = const_51_to_fp16)[name = string("k_19_cast_fp16")];
284
+ tensor<int32, [4]> var_554 = const()[name = string("op_554"), val = tensor<int32, [4]>([1, 1500, 8, -1])];
285
+ tensor<fp16, [1, 1500, 8, 64]> var_555_cast_fp16 = reshape(shape = var_554, x = linear_26_cast_fp16)[name = string("op_555_cast_fp16")];
286
+ tensor<int32, [4]> var_556 = const()[name = string("op_556"), val = tensor<int32, [4]>([0, 2, 1, 3])];
287
+ bool qk_9_transpose_x_0 = const()[name = string("qk_9_transpose_x_0"), val = bool(false)];
288
+ bool qk_9_transpose_y_0 = const()[name = string("qk_9_transpose_y_0"), val = bool(false)];
289
+ tensor<int32, [4]> transpose_32_perm_0 = const()[name = string("transpose_32_perm_0"), val = tensor<int32, [4]>([0, 2, -3, -1])];
290
+ tensor<int32, [4]> transpose_33_perm_0 = const()[name = string("transpose_33_perm_0"), val = tensor<int32, [4]>([0, 2, -1, -3])];
291
+ tensor<fp16, [1, 8, 64, 1500]> transpose_33 = transpose(perm = transpose_33_perm_0, x = k_19_cast_fp16)[name = string("transpose_41")];
292
+ tensor<fp16, [1, 8, 1500, 64]> transpose_32 = transpose(perm = transpose_32_perm_0, x = q_19_cast_fp16)[name = string("transpose_42")];
293
+ tensor<fp16, [1, 8, 1500, 1500]> qk_9_cast_fp16 = matmul(transpose_x = qk_9_transpose_x_0, transpose_y = qk_9_transpose_y_0, x = transpose_32, y = transpose_33)[name = string("qk_9_cast_fp16")];
294
+ tensor<fp16, [1, 8, 1500, 1500]> var_560_cast_fp16 = softmax(axis = var_496, x = qk_9_cast_fp16)[name = string("op_560_cast_fp16")];
295
+ bool var_562_transpose_x_0 = const()[name = string("op_562_transpose_x_0"), val = bool(false)];
296
+ bool var_562_transpose_y_0 = const()[name = string("op_562_transpose_y_0"), val = bool(false)];
297
+ tensor<fp16, [1, 8, 1500, 64]> v_19_cast_fp16 = transpose(perm = var_556, x = var_555_cast_fp16)[name = string("transpose_43")];
298
+ tensor<fp16, [1, 8, 1500, 64]> var_562_cast_fp16 = matmul(transpose_x = var_562_transpose_x_0, transpose_y = var_562_transpose_y_0, x = var_560_cast_fp16, y = v_19_cast_fp16)[name = string("op_562_cast_fp16")];
299
+ tensor<int32, [4]> var_563 = const()[name = string("op_563"), val = tensor<int32, [4]>([0, 2, 1, 3])];
300
+ tensor<int32, [3]> concat_4 = const()[name = string("concat_4"), val = tensor<int32, [3]>([1, 1500, 512])];
301
+ tensor<fp16, [1, 1500, 8, 64]> var_564_cast_fp16 = transpose(perm = var_563, x = var_562_cast_fp16)[name = string("transpose_40")];
302
+ tensor<fp16, [1, 1500, 512]> x_59_cast_fp16 = reshape(shape = concat_4, x = var_564_cast_fp16)[name = string("x_59_cast_fp16")];
303
+ tensor<fp16, [512, 512]> var_568_to_fp16 = const()[name = string("op_568_to_fp16"), val = tensor<fp16, [512, 512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(30154368)))];
304
+ tensor<fp16, [512]> var_569_to_fp16 = const()[name = string("op_569_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(30678720)))];
305
+ tensor<fp16, [1, 1500, 512]> linear_27_cast_fp16 = linear(bias = var_569_to_fp16, weight = var_568_to_fp16, x = x_59_cast_fp16)[name = string("linear_27_cast_fp16")];
306
+ tensor<fp16, [1, 1500, 512]> x_61_cast_fp16 = add(x = x_55_cast_fp16, y = linear_27_cast_fp16)[name = string("x_61_cast_fp16")];
307
+ tensor<int32, [1]> var_576_axes_0 = const()[name = string("op_576_axes_0"), val = tensor<int32, [1]>([-1])];
308
+ tensor<fp16, [512]> blocks_4_mlp_ln_weight_to_fp16 = const()[name = string("blocks_4_mlp_ln_weight_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(30679808)))];
309
+ tensor<fp16, [512]> blocks_4_mlp_ln_bias_to_fp16 = const()[name = string("blocks_4_mlp_ln_bias_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(30680896)))];
310
+ tensor<fp16, [1, 1500, 512]> var_576_cast_fp16 = layer_norm(axes = var_576_axes_0, beta = blocks_4_mlp_ln_bias_to_fp16, epsilon = var_502_to_fp16, gamma = blocks_4_mlp_ln_weight_to_fp16, x = x_61_cast_fp16)[name = string("op_576_cast_fp16")];
311
+ tensor<fp16, [2048, 512]> var_585_to_fp16 = const()[name = string("op_585_to_fp16"), val = tensor<fp16, [2048, 512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(30681984)))];
312
+ tensor<fp16, [2048]> var_586_to_fp16 = const()[name = string("op_586_to_fp16"), val = tensor<fp16, [2048]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(32779200)))];
313
+ tensor<fp16, [1, 1500, 2048]> linear_28_cast_fp16 = linear(bias = var_586_to_fp16, weight = var_585_to_fp16, x = var_576_cast_fp16)[name = string("linear_28_cast_fp16")];
314
+ string x_65_mode_0 = const()[name = string("x_65_mode_0"), val = string("EXACT")];
315
+ tensor<fp16, [1, 1500, 2048]> x_65_cast_fp16 = gelu(mode = x_65_mode_0, x = linear_28_cast_fp16)[name = string("x_65_cast_fp16")];
316
+ tensor<fp16, [512, 2048]> var_591_to_fp16 = const()[name = string("op_591_to_fp16"), val = tensor<fp16, [512, 2048]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(32783360)))];
317
+ tensor<fp16, [512]> var_592_to_fp16 = const()[name = string("op_592_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(34880576)))];
318
+ tensor<fp16, [1, 1500, 512]> linear_29_cast_fp16 = linear(bias = var_592_to_fp16, weight = var_591_to_fp16, x = x_65_cast_fp16)[name = string("linear_29_cast_fp16")];
319
+ tensor<fp16, [1, 1500, 512]> x_67_cast_fp16 = add(x = x_61_cast_fp16, y = linear_29_cast_fp16)[name = string("x_67_cast_fp16")];
320
+ int32 var_602 = const()[name = string("op_602"), val = int32(-1)];
321
+ tensor<int32, [1]> var_618_axes_0 = const()[name = string("op_618_axes_0"), val = tensor<int32, [1]>([-1])];
322
+ tensor<fp16, [512]> blocks_5_attn_ln_weight_to_fp16 = const()[name = string("blocks_5_attn_ln_weight_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(34881664)))];
323
+ tensor<fp16, [512]> blocks_5_attn_ln_bias_to_fp16 = const()[name = string("blocks_5_attn_ln_bias_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(34882752)))];
324
+ fp16 var_608_to_fp16 = const()[name = string("op_608_to_fp16"), val = fp16(0x1.5p-17)];
325
+ tensor<fp16, [1, 1500, 512]> var_618_cast_fp16 = layer_norm(axes = var_618_axes_0, beta = blocks_5_attn_ln_bias_to_fp16, epsilon = var_608_to_fp16, gamma = blocks_5_attn_ln_weight_to_fp16, x = x_67_cast_fp16)[name = string("op_618_cast_fp16")];
326
+ tensor<fp16, [512, 512]> var_629_to_fp16 = const()[name = string("op_629_to_fp16"), val = tensor<fp16, [512, 512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(34883840)))];
327
+ tensor<fp16, [512]> var_630_to_fp16 = const()[name = string("op_630_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(35408192)))];
328
+ tensor<fp16, [1, 1500, 512]> linear_30_cast_fp16 = linear(bias = var_630_to_fp16, weight = var_629_to_fp16, x = var_618_cast_fp16)[name = string("linear_30_cast_fp16")];
329
+ tensor<fp16, [512, 512]> var_633_to_fp16 = const()[name = string("op_633_to_fp16"), val = tensor<fp16, [512, 512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(35409280)))];
330
+ tensor<fp16, [1, 1500, 512]> linear_31_cast_fp16 = linear(bias = linear_1_bias_0_to_fp16, weight = var_633_to_fp16, x = var_618_cast_fp16)[name = string("linear_31_cast_fp16")];
331
+ tensor<fp16, [512, 512]> var_637_to_fp16 = const()[name = string("op_637_to_fp16"), val = tensor<fp16, [512, 512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(35933632)))];
332
+ tensor<fp16, [512]> var_638_to_fp16 = const()[name = string("op_638_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(36457984)))];
333
+ tensor<fp16, [1, 1500, 512]> linear_32_cast_fp16 = linear(bias = var_638_to_fp16, weight = var_637_to_fp16, x = var_618_cast_fp16)[name = string("linear_32_cast_fp16")];
334
+ tensor<int32, [4]> var_646 = const()[name = string("op_646"), val = tensor<int32, [4]>([1, 1500, 8, -1])];
335
+ tensor<fp16, [1, 1500, 8, 64]> var_647_cast_fp16 = reshape(shape = var_646, x = linear_30_cast_fp16)[name = string("op_647_cast_fp16")];
336
+ tensor<fp16, [1, 1, 1, 1]> const_52_to_fp16 = const()[name = string("const_52_to_fp16"), val = tensor<fp16, [1, 1, 1, 1]>([[[[0x1.6ap-2]]]])];
337
+ tensor<fp16, [1, 1500, 8, 64]> q_cast_fp16 = mul(x = var_647_cast_fp16, y = const_52_to_fp16)[name = string("q_cast_fp16")];
338
+ tensor<int32, [4]> var_653 = const()[name = string("op_653"), val = tensor<int32, [4]>([1, 1500, 8, -1])];
339
+ tensor<fp16, [1, 1500, 8, 64]> var_654_cast_fp16 = reshape(shape = var_653, x = linear_31_cast_fp16)[name = string("op_654_cast_fp16")];
340
+ tensor<fp16, [1, 1, 1, 1]> const_53_to_fp16 = const()[name = string("const_53_to_fp16"), val = tensor<fp16, [1, 1, 1, 1]>([[[[0x1.6ap-2]]]])];
341
+ tensor<fp16, [1, 1500, 8, 64]> k_cast_fp16 = mul(x = var_654_cast_fp16, y = const_53_to_fp16)[name = string("k_cast_fp16")];
342
+ tensor<int32, [4]> var_660 = const()[name = string("op_660"), val = tensor<int32, [4]>([1, 1500, 8, -1])];
343
+ tensor<fp16, [1, 1500, 8, 64]> var_661_cast_fp16 = reshape(shape = var_660, x = linear_32_cast_fp16)[name = string("op_661_cast_fp16")];
344
+ tensor<int32, [4]> var_662 = const()[name = string("op_662"), val = tensor<int32, [4]>([0, 2, 1, 3])];
345
+ bool qk_transpose_x_0 = const()[name = string("qk_transpose_x_0"), val = bool(false)];
346
+ bool qk_transpose_y_0 = const()[name = string("qk_transpose_y_0"), val = bool(false)];
347
+ tensor<int32, [4]> transpose_34_perm_0 = const()[name = string("transpose_34_perm_0"), val = tensor<int32, [4]>([0, 2, -3, -1])];
348
+ tensor<int32, [4]> transpose_35_perm_0 = const()[name = string("transpose_35_perm_0"), val = tensor<int32, [4]>([0, 2, -1, -3])];
349
+ tensor<fp16, [1, 8, 64, 1500]> transpose_35 = transpose(perm = transpose_35_perm_0, x = k_cast_fp16)[name = string("transpose_37")];
350
+ tensor<fp16, [1, 8, 1500, 64]> transpose_34 = transpose(perm = transpose_34_perm_0, x = q_cast_fp16)[name = string("transpose_38")];
351
+ tensor<fp16, [1, 8, 1500, 1500]> qk_cast_fp16 = matmul(transpose_x = qk_transpose_x_0, transpose_y = qk_transpose_y_0, x = transpose_34, y = transpose_35)[name = string("qk_cast_fp16")];
352
+ tensor<fp16, [1, 8, 1500, 1500]> var_666_cast_fp16 = softmax(axis = var_602, x = qk_cast_fp16)[name = string("op_666_cast_fp16")];
353
+ bool var_668_transpose_x_0 = const()[name = string("op_668_transpose_x_0"), val = bool(false)];
354
+ bool var_668_transpose_y_0 = const()[name = string("op_668_transpose_y_0"), val = bool(false)];
355
+ tensor<fp16, [1, 8, 1500, 64]> v_cast_fp16 = transpose(perm = var_662, x = var_661_cast_fp16)[name = string("transpose_39")];
356
+ tensor<fp16, [1, 8, 1500, 64]> var_668_cast_fp16 = matmul(transpose_x = var_668_transpose_x_0, transpose_y = var_668_transpose_y_0, x = var_666_cast_fp16, y = v_cast_fp16)[name = string("op_668_cast_fp16")];
357
+ tensor<int32, [4]> var_669 = const()[name = string("op_669"), val = tensor<int32, [4]>([0, 2, 1, 3])];
358
+ tensor<int32, [3]> concat_5 = const()[name = string("concat_5"), val = tensor<int32, [3]>([1, 1500, 512])];
359
+ tensor<fp16, [1, 1500, 8, 64]> var_670_cast_fp16 = transpose(perm = var_669, x = var_668_cast_fp16)[name = string("transpose_36")];
360
+ tensor<fp16, [1, 1500, 512]> x_71_cast_fp16 = reshape(shape = concat_5, x = var_670_cast_fp16)[name = string("x_71_cast_fp16")];
361
+ tensor<fp16, [512, 512]> var_674_to_fp16 = const()[name = string("op_674_to_fp16"), val = tensor<fp16, [512, 512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(36459072)))];
362
+ tensor<fp16, [512]> var_675_to_fp16 = const()[name = string("op_675_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(36983424)))];
363
+ tensor<fp16, [1, 1500, 512]> linear_33_cast_fp16 = linear(bias = var_675_to_fp16, weight = var_674_to_fp16, x = x_71_cast_fp16)[name = string("linear_33_cast_fp16")];
364
+ tensor<fp16, [1, 1500, 512]> x_73_cast_fp16 = add(x = x_67_cast_fp16, y = linear_33_cast_fp16)[name = string("x_73_cast_fp16")];
365
+ tensor<int32, [1]> var_682_axes_0 = const()[name = string("op_682_axes_0"), val = tensor<int32, [1]>([-1])];
366
+ tensor<fp16, [512]> blocks_5_mlp_ln_weight_to_fp16 = const()[name = string("blocks_5_mlp_ln_weight_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(36984512)))];
367
+ tensor<fp16, [512]> blocks_5_mlp_ln_bias_to_fp16 = const()[name = string("blocks_5_mlp_ln_bias_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(36985600)))];
368
+ tensor<fp16, [1, 1500, 512]> var_682_cast_fp16 = layer_norm(axes = var_682_axes_0, beta = blocks_5_mlp_ln_bias_to_fp16, epsilon = var_608_to_fp16, gamma = blocks_5_mlp_ln_weight_to_fp16, x = x_73_cast_fp16)[name = string("op_682_cast_fp16")];
369
+ tensor<fp16, [2048, 512]> var_691_to_fp16 = const()[name = string("op_691_to_fp16"), val = tensor<fp16, [2048, 512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(36986688)))];
370
+ tensor<fp16, [2048]> var_692_to_fp16 = const()[name = string("op_692_to_fp16"), val = tensor<fp16, [2048]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(39083904)))];
371
+ tensor<fp16, [1, 1500, 2048]> linear_34_cast_fp16 = linear(bias = var_692_to_fp16, weight = var_691_to_fp16, x = var_682_cast_fp16)[name = string("linear_34_cast_fp16")];
372
+ string x_77_mode_0 = const()[name = string("x_77_mode_0"), val = string("EXACT")];
373
+ tensor<fp16, [1, 1500, 2048]> x_77_cast_fp16 = gelu(mode = x_77_mode_0, x = linear_34_cast_fp16)[name = string("x_77_cast_fp16")];
374
+ tensor<fp16, [512, 2048]> var_697_to_fp16 = const()[name = string("op_697_to_fp16"), val = tensor<fp16, [512, 2048]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(39088064)))];
375
+ tensor<fp16, [512]> var_698_to_fp16 = const()[name = string("op_698_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(41185280)))];
376
+ tensor<fp16, [1, 1500, 512]> linear_35_cast_fp16 = linear(bias = var_698_to_fp16, weight = var_697_to_fp16, x = x_77_cast_fp16)[name = string("linear_35_cast_fp16")];
377
+ tensor<fp16, [1, 1500, 512]> x_cast_fp16 = add(x = x_73_cast_fp16, y = linear_35_cast_fp16)[name = string("x_cast_fp16")];
378
+ tensor<int32, [1]> var_711_axes_0 = const()[name = string("op_711_axes_0"), val = tensor<int32, [1]>([-1])];
379
+ tensor<fp16, [512]> ln_post_weight_to_fp16 = const()[name = string("ln_post_weight_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(41186368)))];
380
+ tensor<fp16, [512]> ln_post_bias_to_fp16 = const()[name = string("ln_post_bias_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(41187456)))];
381
+ fp16 var_702_to_fp16 = const()[name = string("op_702_to_fp16"), val = fp16(0x1.5p-17)];
382
+ tensor<fp16, [1, 1500, 512]> output = layer_norm(axes = var_711_axes_0, beta = ln_post_bias_to_fp16, epsilon = var_702_to_fp16, gamma = ln_post_weight_to_fp16, x = x_cast_fp16)[name = string("op_711_cast_fp16")];
383
+ } -> (output);
384
+ }
base/encoder.mlmodelc/weights/weight.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c45bee989219532c4cec616d439c51f280ac9d7b04f7847c4b7d7daba1d47523
3
+ size 41188544
base/model_dims.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "n_mels": 80,
3
+ "n_audio_ctx": 1500,
4
+ "n_audio_state": 512,
5
+ "n_audio_head": 8,
6
+ "n_audio_layer": 6,
7
+ "n_vocab": 51865,
8
+ "n_text_ctx": 448,
9
+ "n_text_state": 512,
10
+ "n_text_head": 8,
11
+ "n_text_layer": 6
12
+ }
compile_model.sh ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ for d in work/*
4
+ do
5
+ echo $d
6
+ pushd $d >/dev/null
7
+
8
+ if [ -d encoder ]; then
9
+ xcrun coremlcompiler compile encoder/chunked_pipeline.mlpackage .
10
+ rm -rf encoder.mlmodelc
11
+ mv chunked_pipeline.mlmodelc encoder.mlmodelc
12
+ else
13
+ xcrun coremlcompiler compile encoder.mlpackage .
14
+ fi
15
+ xcrun coremlcompiler compile decoder_first.mlpackage .
16
+ xcrun coremlcompiler compile decoder_second.mlpackage .
17
+
18
+ popd >/dev/null
19
+ done
20
+
21
+ mkdir -p output
22
+ for d in work/*
23
+ do
24
+ out=${d/work/output}
25
+ mkdir -p $out
26
+ mv $d/*.mlmodelc $d/model_dims.json $out/
27
+ done
28
+
29
+ mkdir -p index
30
+ for d in output/*
31
+ do
32
+ model=${d##*/}
33
+ (cd $d && find * -type f) > index/$model
34
+ done
index/base ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ decoder_first.mlmodelc/weights/weight.bin
2
+ decoder_first.mlmodelc/metadata.json
3
+ decoder_first.mlmodelc/model.mil
4
+ decoder_first.mlmodelc/coremldata.bin
5
+ decoder_first.mlmodelc/analytics/coremldata.bin
6
+ decoder_second.mlmodelc/weights/weight.bin
7
+ decoder_second.mlmodelc/metadata.json
8
+ decoder_second.mlmodelc/model.mil
9
+ decoder_second.mlmodelc/coremldata.bin
10
+ decoder_second.mlmodelc/analytics/coremldata.bin
11
+ encoder.mlmodelc/weights/weight.bin
12
+ encoder.mlmodelc/metadata.json
13
+ encoder.mlmodelc/model.mil
14
+ encoder.mlmodelc/coremldata.bin
15
+ encoder.mlmodelc/analytics/coremldata.bin
16
+ model_dims.json
index/large-v2 ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ decoder_first.mlmodelc/weights/weight.bin
2
+ decoder_first.mlmodelc/metadata.json
3
+ decoder_first.mlmodelc/model.mil
4
+ decoder_first.mlmodelc/coremldata.bin
5
+ decoder_first.mlmodelc/analytics/coremldata.bin
6
+ decoder_second.mlmodelc/weights/weight.bin
7
+ decoder_second.mlmodelc/metadata.json
8
+ decoder_second.mlmodelc/model.mil
9
+ decoder_second.mlmodelc/coremldata.bin
10
+ decoder_second.mlmodelc/analytics/coremldata.bin
11
+ encoder.mlmodelc/metadata.json
12
+ encoder.mlmodelc/model0/weights/0-weight.bin
13
+ encoder.mlmodelc/model0/model.mil
14
+ encoder.mlmodelc/model0/coremldata.bin
15
+ encoder.mlmodelc/model0/analytics/coremldata.bin
16
+ encoder.mlmodelc/model1/weights/1-weight.bin
17
+ encoder.mlmodelc/model1/model.mil
18
+ encoder.mlmodelc/model1/coremldata.bin
19
+ encoder.mlmodelc/model1/analytics/coremldata.bin
20
+ encoder.mlmodelc/coremldata.bin
21
+ encoder.mlmodelc/analytics/coremldata.bin
22
+ model_dims.json
index/large-v3 ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ decoder_first.mlmodelc/weights/weight.bin
2
+ decoder_first.mlmodelc/metadata.json
3
+ decoder_first.mlmodelc/model.mil
4
+ decoder_first.mlmodelc/coremldata.bin
5
+ decoder_first.mlmodelc/analytics/coremldata.bin
6
+ decoder_second.mlmodelc/weights/weight.bin
7
+ decoder_second.mlmodelc/metadata.json
8
+ decoder_second.mlmodelc/model.mil
9
+ decoder_second.mlmodelc/coremldata.bin
10
+ decoder_second.mlmodelc/analytics/coremldata.bin
11
+ encoder.mlmodelc/metadata.json
12
+ encoder.mlmodelc/model0/weights/0-weight.bin
13
+ encoder.mlmodelc/model0/model.mil
14
+ encoder.mlmodelc/model0/coremldata.bin
15
+ encoder.mlmodelc/model0/analytics/coremldata.bin
16
+ encoder.mlmodelc/model1/weights/1-weight.bin
17
+ encoder.mlmodelc/model1/model.mil
18
+ encoder.mlmodelc/model1/coremldata.bin
19
+ encoder.mlmodelc/model1/analytics/coremldata.bin
20
+ encoder.mlmodelc/coremldata.bin
21
+ encoder.mlmodelc/analytics/coremldata.bin
22
+ model_dims.json
index/medium ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ decoder_first.mlmodelc/weights/weight.bin
2
+ decoder_first.mlmodelc/metadata.json
3
+ decoder_first.mlmodelc/model.mil
4
+ decoder_first.mlmodelc/coremldata.bin
5
+ decoder_first.mlmodelc/analytics/coremldata.bin
6
+ decoder_second.mlmodelc/weights/weight.bin
7
+ decoder_second.mlmodelc/metadata.json
8
+ decoder_second.mlmodelc/model.mil
9
+ decoder_second.mlmodelc/coremldata.bin
10
+ decoder_second.mlmodelc/analytics/coremldata.bin
11
+ encoder.mlmodelc/weights/weight.bin
12
+ encoder.mlmodelc/metadata.json
13
+ encoder.mlmodelc/model.mil
14
+ encoder.mlmodelc/coremldata.bin
15
+ encoder.mlmodelc/analytics/coremldata.bin
16
+ model_dims.json
index/small ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ decoder_first.mlmodelc/weights/weight.bin
2
+ decoder_first.mlmodelc/metadata.json
3
+ decoder_first.mlmodelc/model.mil
4
+ decoder_first.mlmodelc/coremldata.bin
5
+ decoder_first.mlmodelc/analytics/coremldata.bin
6
+ decoder_second.mlmodelc/weights/weight.bin
7
+ decoder_second.mlmodelc/metadata.json
8
+ decoder_second.mlmodelc/model.mil
9
+ decoder_second.mlmodelc/coremldata.bin
10
+ decoder_second.mlmodelc/analytics/coremldata.bin
11
+ encoder.mlmodelc/weights/weight.bin
12
+ encoder.mlmodelc/metadata.json
13
+ encoder.mlmodelc/model.mil
14
+ encoder.mlmodelc/coremldata.bin
15
+ encoder.mlmodelc/analytics/coremldata.bin
16
+ model_dims.json
index/tiny ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ decoder_first.mlmodelc/weights/weight.bin
2
+ decoder_first.mlmodelc/metadata.json
3
+ decoder_first.mlmodelc/model.mil
4
+ decoder_first.mlmodelc/coremldata.bin
5
+ decoder_first.mlmodelc/analytics/coremldata.bin
6
+ decoder_second.mlmodelc/weights/weight.bin
7
+ decoder_second.mlmodelc/metadata.json
8
+ decoder_second.mlmodelc/model.mil
9
+ decoder_second.mlmodelc/coremldata.bin
10
+ decoder_second.mlmodelc/analytics/coremldata.bin
11
+ encoder.mlmodelc/weights/weight.bin
12
+ encoder.mlmodelc/metadata.json
13
+ encoder.mlmodelc/model.mil
14
+ encoder.mlmodelc/coremldata.bin
15
+ encoder.mlmodelc/analytics/coremldata.bin
16
+ model_dims.json
large-v2/decoder_first.mlmodelc/analytics/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a564dfd67cfcb3c0ee8cd9f7ef9f303fbfc561e635709bd3a46c5870571079de
3
+ size 243
large-v2/decoder_first.mlmodelc/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c6214be9e110a102836fb1fdb960a2fb564e60f5d9e3d1e25a9b7f978309480e
3
+ size 453
large-v2/decoder_first.mlmodelc/metadata.json ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "metadataOutputVersion" : "3.0",
4
+ "storagePrecision" : "Float16",
5
+ "outputSchema" : [
6
+ {
7
+ "hasShapeFlexibility" : "0",
8
+ "isOptional" : "0",
9
+ "dataType" : "Float16",
10
+ "formattedType" : "MultiArray (Float16)",
11
+ "shortDescription" : "",
12
+ "shape" : "[]",
13
+ "name" : "dummy",
14
+ "type" : "MultiArray"
15
+ }
16
+ ],
17
+ "modelParameters" : [
18
+
19
+ ],
20
+ "specificationVersion" : 9,
21
+ "mlProgramOperationTypeHistogram" : {
22
+ "Ios18.writeState" : 66,
23
+ "Shape" : 64,
24
+ "Ios18.linear" : 64,
25
+ "Identity" : 1,
26
+ "Ios18.gather" : 64,
27
+ "Ios18.concat" : 64,
28
+ "Ios18.sliceUpdate" : 66,
29
+ "Ios18.cast" : 128,
30
+ "Ios18.expandDims" : 64,
31
+ "Ios18.readState" : 66
32
+ },
33
+ "computePrecision" : "Mixed (Float16, Int16, Int32, UInt16)",
34
+ "isUpdatable" : "0",
35
+ "stateSchema" : [
36
+ {
37
+ "dataType" : "Float16",
38
+ "isOptional" : "0",
39
+ "formattedType" : "State (Float16 32 × 1 × 448 × 1280)",
40
+ "shortDescription" : "",
41
+ "shape" : "[32, 1, 448, 1280]",
42
+ "name" : "k_cache1",
43
+ "type" : "State"
44
+ },
45
+ {
46
+ "dataType" : "Float16",
47
+ "isOptional" : "0",
48
+ "formattedType" : "State (Float16 32 × 1 × 448 × 1280)",
49
+ "shortDescription" : "",
50
+ "shape" : "[32, 1, 448, 1280]",
51
+ "name" : "v_cache1",
52
+ "type" : "State"
53
+ },
54
+ {
55
+ "dataType" : "Float16",
56
+ "isOptional" : "0",
57
+ "formattedType" : "State (Float16 32 × 1 × 1500 × 1280)",
58
+ "shortDescription" : "",
59
+ "shape" : "[32, 1, 1500, 1280]",
60
+ "name" : "k_cache2",
61
+ "type" : "State"
62
+ },
63
+ {
64
+ "dataType" : "Float16",
65
+ "isOptional" : "0",
66
+ "formattedType" : "State (Float16 32 × 1 × 1500 × 1280)",
67
+ "shortDescription" : "",
68
+ "shape" : "[32, 1, 1500, 1280]",
69
+ "name" : "v_cache2",
70
+ "type" : "State"
71
+ }
72
+ ],
73
+ "availability" : {
74
+ "macOS" : "15.0",
75
+ "tvOS" : "18.0",
76
+ "visionOS" : "2.0",
77
+ "watchOS" : "11.0",
78
+ "iOS" : "18.0",
79
+ "macCatalyst" : "18.0"
80
+ },
81
+ "modelType" : {
82
+ "name" : "MLModelType_mlProgram"
83
+ },
84
+ "userDefinedMetadata" : {
85
+ "com.github.apple.coremltools.source_dialect" : "TorchScript",
86
+ "com.github.apple.coremltools.source" : "torch==2.4.1",
87
+ "com.github.apple.coremltools.version" : "8.0"
88
+ },
89
+ "inputSchema" : [
90
+ {
91
+ "dataType" : "Float16",
92
+ "hasShapeFlexibility" : "1",
93
+ "isOptional" : "0",
94
+ "shapeFlexibility" : "1 × 1...1500 × 1280",
95
+ "shapeRange" : "[[1, 1], [1, 1500], [1280, 1280]]",
96
+ "formattedType" : "MultiArray (Float16 1 × 1 × 1280)",
97
+ "type" : "MultiArray",
98
+ "shape" : "[1, 1, 1280]",
99
+ "name" : "audio_data",
100
+ "shortDescription" : ""
101
+ }
102
+ ],
103
+ "generatedClassName" : "decoder_first",
104
+ "method" : "predict"
105
+ }
106
+ ]
large-v2/decoder_first.mlmodelc/model.mil ADDED
The diff for this file is too large to render. See raw diff
 
large-v2/decoder_first.mlmodelc/weights/weight.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6ac46c34d51832dd11fbc34c772a9a35a5fb4cace68406b7044dd4ba652dca1c
3
+ size 246506176
large-v2/decoder_second.mlmodelc/analytics/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1afd3cf2ab05eb2b7268afb62f418b5df01b6b5d60d746bdeec2b5ad8d760f65
3
+ size 243
large-v2/decoder_second.mlmodelc/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c5601244df54c60a16c26b761742867d06c6ef440ab8b0776ce5f6d1b4875c95
3
+ size 487
large-v2/decoder_second.mlmodelc/metadata.json ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "metadataOutputVersion" : "3.0",
4
+ "storagePrecision" : "Float16",
5
+ "outputSchema" : [
6
+ {
7
+ "hasShapeFlexibility" : "0",
8
+ "isOptional" : "0",
9
+ "dataType" : "Float16",
10
+ "formattedType" : "MultiArray (Float16)",
11
+ "shortDescription" : "",
12
+ "shape" : "[]",
13
+ "name" : "logits",
14
+ "type" : "MultiArray"
15
+ }
16
+ ],
17
+ "modelParameters" : [
18
+
19
+ ],
20
+ "specificationVersion" : 9,
21
+ "mlProgramOperationTypeHistogram" : {
22
+ "Ios18.linear" : 257,
23
+ "Ios18.readState" : 66,
24
+ "Ios18.expandDims" : 33,
25
+ "Ios18.sub" : 1,
26
+ "Ios18.matmul" : 128,
27
+ "Ios18.gelu" : 32,
28
+ "Ios18.gather" : 35,
29
+ "Ios18.concat" : 162,
30
+ "Shape" : 34,
31
+ "Ios18.add" : 161,
32
+ "Ios18.sliceUpdate" : 128,
33
+ "Ios18.sliceByIndex" : 257,
34
+ "Ios18.layerNorm" : 97,
35
+ "Ios18.cast" : 68,
36
+ "Ios18.transpose" : 256,
37
+ "Ios18.writeState" : 64,
38
+ "Ios18.reshape" : 256,
39
+ "Ios18.softmax" : 64,
40
+ "Ios18.mul" : 128
41
+ },
42
+ "computePrecision" : "Mixed (Float16, Int16, Int32, UInt16)",
43
+ "isUpdatable" : "0",
44
+ "stateSchema" : [
45
+ {
46
+ "dataType" : "Float16",
47
+ "isOptional" : "0",
48
+ "formattedType" : "State (Float16 32 × 1 × 448 × 1280)",
49
+ "shortDescription" : "",
50
+ "shape" : "[32, 1, 448, 1280]",
51
+ "name" : "k_cache1",
52
+ "type" : "State"
53
+ },
54
+ {
55
+ "dataType" : "Float16",
56
+ "isOptional" : "0",
57
+ "formattedType" : "State (Float16 32 × 1 × 448 × 1280)",
58
+ "shortDescription" : "",
59
+ "shape" : "[32, 1, 448, 1280]",
60
+ "name" : "v_cache1",
61
+ "type" : "State"
62
+ },
63
+ {
64
+ "dataType" : "Float16",
65
+ "isOptional" : "0",
66
+ "formattedType" : "State (Float16 32 × 1 × 1500 × 1280)",
67
+ "shortDescription" : "",
68
+ "shape" : "[32, 1, 1500, 1280]",
69
+ "name" : "k_cache2",
70
+ "type" : "State"
71
+ },
72
+ {
73
+ "dataType" : "Float16",
74
+ "isOptional" : "0",
75
+ "formattedType" : "State (Float16 32 × 1 × 1500 × 1280)",
76
+ "shortDescription" : "",
77
+ "shape" : "[32, 1, 1500, 1280]",
78
+ "name" : "v_cache2",
79
+ "type" : "State"
80
+ }
81
+ ],
82
+ "availability" : {
83
+ "macOS" : "15.0",
84
+ "tvOS" : "18.0",
85
+ "visionOS" : "2.0",
86
+ "watchOS" : "11.0",
87
+ "iOS" : "18.0",
88
+ "macCatalyst" : "18.0"
89
+ },
90
+ "modelType" : {
91
+ "name" : "MLModelType_mlProgram"
92
+ },
93
+ "userDefinedMetadata" : {
94
+ "com.github.apple.coremltools.source_dialect" : "TorchScript",
95
+ "com.github.apple.coremltools.source" : "torch==2.4.1",
96
+ "com.github.apple.coremltools.version" : "8.0"
97
+ },
98
+ "inputSchema" : [
99
+ {
100
+ "dataType" : "Int32",
101
+ "hasShapeFlexibility" : "1",
102
+ "isOptional" : "0",
103
+ "shapeFlexibility" : "1 × 1...448",
104
+ "shapeRange" : "[[1, 1], [1, 448]]",
105
+ "formattedType" : "MultiArray (Int32 1 × 1)",
106
+ "type" : "MultiArray",
107
+ "shape" : "[1, 1]",
108
+ "name" : "token_data",
109
+ "shortDescription" : ""
110
+ },
111
+ {
112
+ "dataType" : "Float16",
113
+ "hasShapeFlexibility" : "1",
114
+ "isOptional" : "0",
115
+ "shapeFlexibility" : "1 × 1...448",
116
+ "shapeRange" : "[[1, 1], [1, 448]]",
117
+ "formattedType" : "MultiArray (Float16 1 × 1)",
118
+ "type" : "MultiArray",
119
+ "shape" : "[1, 1]",
120
+ "name" : "offset_mask",
121
+ "shortDescription" : ""
122
+ }
123
+ ],
124
+ "generatedClassName" : "decoder_second",
125
+ "method" : "predict"
126
+ }
127
+ ]
large-v2/decoder_second.mlmodelc/model.mil ADDED
The diff for this file is too large to render. See raw diff
 
large-v2/decoder_second.mlmodelc/weights/weight.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bf6ddb1f8892bfddf9f96b11c6b596934c1fe6c01839f81c06d8d2e094f19533
3
+ size 1607634802
large-v2/encoder.mlmodelc/analytics/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:abf4666de503da73cee5379b4dfa17d4a3f06bff3c0c8e310d0a0e1cf2554f87
3
+ size 202
large-v2/encoder.mlmodelc/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e2ae6d3a42a2ca10398635e0b210846dcbc24a31184c93f9302694163bcadaf
3
+ size 196
large-v2/encoder.mlmodelc/metadata.json ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "metadataOutputVersion" : "3.0",
4
+ "storagePrecision" : "Float16",
5
+ "outputSchema" : [
6
+ {
7
+ "hasShapeFlexibility" : "0",
8
+ "isOptional" : "0",
9
+ "dataType" : "Float16",
10
+ "formattedType" : "MultiArray (Float16 1 × 1500 × 1280)",
11
+ "shortDescription" : "",
12
+ "shape" : "[1, 1500, 1280]",
13
+ "name" : "output",
14
+ "type" : "MultiArray"
15
+ }
16
+ ],
17
+ "modelParameters" : [
18
+
19
+ ],
20
+ "specificationVersion" : 9,
21
+ "mlProgramOperationTypeHistogram" : {
22
+ "Ios18.add" : 65,
23
+ "Ios18.reshape" : 128,
24
+ "Ios18.linear" : 192,
25
+ "Ios18.gelu" : 34,
26
+ "Ios18.matmul" : 64,
27
+ "Ios18.transpose" : 129,
28
+ "Ios18.layerNorm" : 65,
29
+ "Ios18.conv" : 2,
30
+ "Ios18.cast" : 4,
31
+ "Ios18.softmax" : 32,
32
+ "Ios18.mul" : 64
33
+ },
34
+ "computePrecision" : "Mixed (Float16, Float32, Int32)",
35
+ "isUpdatable" : "0",
36
+ "stateSchema" : [
37
+
38
+ ],
39
+ "availability" : {
40
+ "macOS" : "15.0",
41
+ "tvOS" : "18.0",
42
+ "visionOS" : "2.0",
43
+ "watchOS" : "11.0",
44
+ "iOS" : "18.0",
45
+ "macCatalyst" : "18.0"
46
+ },
47
+ "modelType" : {
48
+ "name" : "MLModelType_pipeline",
49
+ "structure" : [
50
+ {
51
+ "name" : "MLModelType_mlProgram"
52
+ },
53
+ {
54
+ "name" : "MLModelType_mlProgram"
55
+ }
56
+ ]
57
+ },
58
+ "userDefinedMetadata" : {
59
+
60
+ },
61
+ "inputSchema" : [
62
+ {
63
+ "hasShapeFlexibility" : "0",
64
+ "isOptional" : "0",
65
+ "dataType" : "Float16",
66
+ "formattedType" : "MultiArray (Float16 1 × 80 × 3000)",
67
+ "shortDescription" : "",
68
+ "shape" : "[1, 80, 3000]",
69
+ "name" : "logmel_data",
70
+ "type" : "MultiArray"
71
+ }
72
+ ],
73
+ "generatedClassName" : "chunked_pipeline",
74
+ "method" : "predict"
75
+ }
76
+ ]
large-v2/encoder.mlmodelc/model0/analytics/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5a8281049b2a65a3be541cfd9f949e84b8fe1c5251ce90e46da1626fed54e58a
3
+ size 108
large-v2/encoder.mlmodelc/model0/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4a30736ebb8641d231fc84aa2d3d05770adb9603bdca174d439416450827b75a
3
+ size 200
large-v2/encoder.mlmodelc/model0/model.mil ADDED
The diff for this file is too large to render. See raw diff
 
large-v2/encoder.mlmodelc/model0/weights/0-weight.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f0d6315a62c6344e1bf4ac88f7f7c8408cc886645c98e8989f249229fd9e9c70
3
+ size 643945408
large-v2/encoder.mlmodelc/model1/analytics/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5a8281049b2a65a3be541cfd9f949e84b8fe1c5251ce90e46da1626fed54e58a
3
+ size 108
large-v2/encoder.mlmodelc/model1/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:70195139816248a2b1fbef695f96decb60b35af6f364f84a7d2293a3d0a09e11
3
+ size 196
large-v2/encoder.mlmodelc/model1/model.mil ADDED
The diff for this file is too large to render. See raw diff
 
large-v2/encoder.mlmodelc/model1/weights/1-weight.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b18deffd43b1f394f0f9d6434ef3e042c9e0424f8b590891a5cb0c21e4951163
3
+ size 629660416
large-v2/model_dims.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "n_mels": 80,
3
+ "n_audio_ctx": 1500,
4
+ "n_audio_state": 1280,
5
+ "n_audio_head": 20,
6
+ "n_audio_layer": 32,
7
+ "n_vocab": 51865,
8
+ "n_text_ctx": 448,
9
+ "n_text_state": 1280,
10
+ "n_text_head": 20,
11
+ "n_text_layer": 32
12
+ }
large-v3/decoder_first.mlmodelc/analytics/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a564dfd67cfcb3c0ee8cd9f7ef9f303fbfc561e635709bd3a46c5870571079de
3
+ size 243
large-v3/decoder_first.mlmodelc/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c6214be9e110a102836fb1fdb960a2fb564e60f5d9e3d1e25a9b7f978309480e
3
+ size 453
large-v3/decoder_first.mlmodelc/metadata.json ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "metadataOutputVersion" : "3.0",
4
+ "storagePrecision" : "Float16",
5
+ "outputSchema" : [
6
+ {
7
+ "hasShapeFlexibility" : "0",
8
+ "isOptional" : "0",
9
+ "dataType" : "Float16",
10
+ "formattedType" : "MultiArray (Float16)",
11
+ "shortDescription" : "",
12
+ "shape" : "[]",
13
+ "name" : "dummy",
14
+ "type" : "MultiArray"
15
+ }
16
+ ],
17
+ "modelParameters" : [
18
+
19
+ ],
20
+ "specificationVersion" : 9,
21
+ "mlProgramOperationTypeHistogram" : {
22
+ "Ios18.writeState" : 66,
23
+ "Shape" : 64,
24
+ "Ios18.linear" : 64,
25
+ "Identity" : 1,
26
+ "Ios18.gather" : 64,
27
+ "Ios18.concat" : 64,
28
+ "Ios18.sliceUpdate" : 66,
29
+ "Ios18.cast" : 128,
30
+ "Ios18.expandDims" : 64,
31
+ "Ios18.readState" : 66
32
+ },
33
+ "computePrecision" : "Mixed (Float16, Int16, Int32, UInt16)",
34
+ "isUpdatable" : "0",
35
+ "stateSchema" : [
36
+ {
37
+ "dataType" : "Float16",
38
+ "isOptional" : "0",
39
+ "formattedType" : "State (Float16 32 × 1 × 448 × 1280)",
40
+ "shortDescription" : "",
41
+ "shape" : "[32, 1, 448, 1280]",
42
+ "name" : "k_cache1",
43
+ "type" : "State"
44
+ },
45
+ {
46
+ "dataType" : "Float16",
47
+ "isOptional" : "0",
48
+ "formattedType" : "State (Float16 32 × 1 × 448 × 1280)",
49
+ "shortDescription" : "",
50
+ "shape" : "[32, 1, 448, 1280]",
51
+ "name" : "v_cache1",
52
+ "type" : "State"
53
+ },
54
+ {
55
+ "dataType" : "Float16",
56
+ "isOptional" : "0",
57
+ "formattedType" : "State (Float16 32 × 1 × 1500 × 1280)",
58
+ "shortDescription" : "",
59
+ "shape" : "[32, 1, 1500, 1280]",
60
+ "name" : "k_cache2",
61
+ "type" : "State"
62
+ },
63
+ {
64
+ "dataType" : "Float16",
65
+ "isOptional" : "0",
66
+ "formattedType" : "State (Float16 32 × 1 × 1500 × 1280)",
67
+ "shortDescription" : "",
68
+ "shape" : "[32, 1, 1500, 1280]",
69
+ "name" : "v_cache2",
70
+ "type" : "State"
71
+ }
72
+ ],
73
+ "availability" : {
74
+ "macOS" : "15.0",
75
+ "tvOS" : "18.0",
76
+ "visionOS" : "2.0",
77
+ "watchOS" : "11.0",
78
+ "iOS" : "18.0",
79
+ "macCatalyst" : "18.0"
80
+ },
81
+ "modelType" : {
82
+ "name" : "MLModelType_mlProgram"
83
+ },
84
+ "userDefinedMetadata" : {
85
+ "com.github.apple.coremltools.source_dialect" : "TorchScript",
86
+ "com.github.apple.coremltools.source" : "torch==2.4.1",
87
+ "com.github.apple.coremltools.version" : "8.0"
88
+ },
89
+ "inputSchema" : [
90
+ {
91
+ "dataType" : "Float16",
92
+ "hasShapeFlexibility" : "1",
93
+ "isOptional" : "0",
94
+ "shapeFlexibility" : "1 × 1...1500 × 1280",
95
+ "shapeRange" : "[[1, 1], [1, 1500], [1280, 1280]]",
96
+ "formattedType" : "MultiArray (Float16 1 × 1 × 1280)",
97
+ "type" : "MultiArray",
98
+ "shape" : "[1, 1, 1280]",
99
+ "name" : "audio_data",
100
+ "shortDescription" : ""
101
+ }
102
+ ],
103
+ "generatedClassName" : "decoder_first",
104
+ "method" : "predict"
105
+ }
106
+ ]
large-v3/decoder_first.mlmodelc/model.mil ADDED
The diff for this file is too large to render. See raw diff
 
large-v3/decoder_first.mlmodelc/weights/weight.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:346ed969f2a1ddb144c4add194c7b2a9a7d7b4a2e536d1e4a2afbfe5a4f62818
3
+ size 246506176