dearyoungjo commited on
Commit
b9f11cf
·
verified ·
1 Parent(s): 0ffc4e7

whisperkittools-0999a613c56c462b063b6b25d96260e1fc6ee2de generated files: dearyoungjo_Whisper-Medicalv1CNB1

Browse files
dearyoungjo_Whisper-Medicalv1CNB1/AudioEncoder.mlmodelc/analytics/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b58d36a7f4a729570b46b424ed8d847baefa07580e6cb9d47773ae738f8b845a
3
+ size 243
dearyoungjo_Whisper-Medicalv1CNB1/AudioEncoder.mlmodelc/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ffa9eb76e8e9d9be75a4d527e5249e61d67fd43081c5aa110fd24efa6c8c5ea3
3
+ size 348
dearyoungjo_Whisper-Medicalv1CNB1/AudioEncoder.mlmodelc/metadata.json ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "metadataOutputVersion" : "3.0",
4
+ "storagePrecision" : "Float16",
5
+ "outputSchema" : [
6
+ {
7
+ "hasShapeFlexibility" : "0",
8
+ "isOptional" : "0",
9
+ "dataType" : "Float16",
10
+ "formattedType" : "MultiArray (Float16 1 × 1280 × 1 × 1500)",
11
+ "shortDescription" : "",
12
+ "shape" : "[1, 1280, 1, 1500]",
13
+ "name" : "encoder_output_embeds",
14
+ "type" : "MultiArray"
15
+ }
16
+ ],
17
+ "modelParameters" : [
18
+
19
+ ],
20
+ "specificationVersion" : 7,
21
+ "mlProgramOperationTypeHistogram" : {
22
+ "Concat" : 672,
23
+ "Ios16.add" : 65,
24
+ "Ios16.mul" : 2560,
25
+ "SliceByIndex" : 4480,
26
+ "Transpose" : 32,
27
+ "Ios16.batchNorm" : 65,
28
+ "Ios16.gelu" : 34,
29
+ "Ios16.einsum" : 5120,
30
+ "Ios16.softmax" : 2560,
31
+ "Ios16.layerNorm" : 65,
32
+ "Ios16.conv" : 194
33
+ },
34
+ "computePrecision" : "Mixed (Float16, Int32)",
35
+ "isUpdatable" : "0",
36
+ "stateSchema" : [
37
+
38
+ ],
39
+ "availability" : {
40
+ "macOS" : "13.0",
41
+ "tvOS" : "16.0",
42
+ "visionOS" : "1.0",
43
+ "watchOS" : "9.0",
44
+ "iOS" : "16.0",
45
+ "macCatalyst" : "16.0"
46
+ },
47
+ "modelType" : {
48
+ "name" : "MLModelType_mlProgram"
49
+ },
50
+ "userDefinedMetadata" : {
51
+ "com.github.apple.coremltools.source_dialect" : "TorchScript",
52
+ "com.github.apple.coremltools.source" : "torch==2.4.1",
53
+ "com.github.apple.coremltools.version" : "8.0"
54
+ },
55
+ "inputSchema" : [
56
+ {
57
+ "hasShapeFlexibility" : "0",
58
+ "isOptional" : "0",
59
+ "dataType" : "Float16",
60
+ "formattedType" : "MultiArray (Float16 1 × 128 × 1 × 3000)",
61
+ "shortDescription" : "",
62
+ "shape" : "[1, 128, 1, 3000]",
63
+ "name" : "melspectrogram_features",
64
+ "type" : "MultiArray"
65
+ }
66
+ ],
67
+ "generatedClassName" : "AudioEncoder",
68
+ "method" : "predict"
69
+ }
70
+ ]
dearyoungjo_Whisper-Medicalv1CNB1/AudioEncoder.mlmodelc/model.mil ADDED
The diff for this file is too large to render. See raw diff
 
dearyoungjo_Whisper-Medicalv1CNB1/AudioEncoder.mlmodelc/weights/weight.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d4fdc2ccbd5fd8e86b5a7831203ce37408fa587f7bbc743519e11a376f94211e
3
+ size 1273974400
dearyoungjo_Whisper-Medicalv1CNB1/MelSpectrogram.mlmodelc/analytics/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c5be419f8622083ac7046306400643539f0e7577c843448c36defc090d41e7ce
3
+ size 243
dearyoungjo_Whisper-Medicalv1CNB1/MelSpectrogram.mlmodelc/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2bfc12cffc2e45e039c7a18f384f09adffb72c182fcd93f9413d405d1a6c1130
3
+ size 329
dearyoungjo_Whisper-Medicalv1CNB1/MelSpectrogram.mlmodelc/metadata.json ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "metadataOutputVersion" : "3.0",
4
+ "storagePrecision" : "Float16",
5
+ "outputSchema" : [
6
+ {
7
+ "hasShapeFlexibility" : "0",
8
+ "isOptional" : "0",
9
+ "dataType" : "Float16",
10
+ "formattedType" : "MultiArray (Float16 1 × 128 × 1 × 3000)",
11
+ "shortDescription" : "",
12
+ "shape" : "[1, 128, 1, 3000]",
13
+ "name" : "melspectrogram_features",
14
+ "type" : "MultiArray"
15
+ }
16
+ ],
17
+ "modelParameters" : [
18
+
19
+ ],
20
+ "specificationVersion" : 7,
21
+ "mlProgramOperationTypeHistogram" : {
22
+ "Ios16.reshape" : 2,
23
+ "Ios16.mul" : 2,
24
+ "SliceByIndex" : 1,
25
+ "Ios16.sub" : 1,
26
+ "Ios16.log" : 1,
27
+ "Ios16.square" : 2,
28
+ "Ios16.add" : 3,
29
+ "Squeeze" : 2,
30
+ "Ios16.matmul" : 1,
31
+ "Ios16.conv" : 2,
32
+ "Ios16.maximum" : 1,
33
+ "ExpandDims" : 4,
34
+ "Ios16.reduceMax" : 1,
35
+ "Identity" : 1,
36
+ "Pad" : 1
37
+ },
38
+ "computePrecision" : "Mixed (Float16, Int32)",
39
+ "isUpdatable" : "0",
40
+ "stateSchema" : [
41
+
42
+ ],
43
+ "availability" : {
44
+ "macOS" : "13.0",
45
+ "tvOS" : "16.0",
46
+ "visionOS" : "1.0",
47
+ "watchOS" : "9.0",
48
+ "iOS" : "16.0",
49
+ "macCatalyst" : "16.0"
50
+ },
51
+ "modelType" : {
52
+ "name" : "MLModelType_mlProgram"
53
+ },
54
+ "userDefinedMetadata" : {
55
+ "com.github.apple.coremltools.source_dialect" : "TorchScript",
56
+ "com.github.apple.coremltools.source" : "torch==2.4.1",
57
+ "com.github.apple.coremltools.version" : "8.0"
58
+ },
59
+ "inputSchema" : [
60
+ {
61
+ "hasShapeFlexibility" : "0",
62
+ "isOptional" : "0",
63
+ "dataType" : "Float16",
64
+ "formattedType" : "MultiArray (Float16 480000)",
65
+ "shortDescription" : "",
66
+ "shape" : "[480000]",
67
+ "name" : "audio",
68
+ "type" : "MultiArray"
69
+ }
70
+ ],
71
+ "generatedClassName" : "MelSpectrogram",
72
+ "method" : "predict"
73
+ }
74
+ ]
dearyoungjo_Whisper-Medicalv1CNB1/MelSpectrogram.mlmodelc/model.mil ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ program(1.0)
2
+ [buildInfo = dict<tensor<string, []>, tensor<string, []>>({{"coremlc-component-MIL", "3400.43.1"}, {"coremlc-version", "3400.58.2"}, {"coremltools-component-torch", "2.4.1"}, {"coremltools-source-dialect", "TorchScript"}, {"coremltools-version", "8.0"}})]
3
+ {
4
+ func main<ios16>(tensor<fp16, [480000]> audio) {
5
+ tensor<int32, [3]> var_10 = const()[name = tensor<string, []>("op_10"), val = tensor<int32, [3]>([1, 1, 480000])];
6
+ tensor<fp16, [1, 1, 480000]> input_1_cast_fp16 = reshape(shape = var_10, x = audio)[name = tensor<string, []>("input_1_cast_fp16")];
7
+ tensor<int32, [6]> input_3_pad_0 = const()[name = tensor<string, []>("input_3_pad_0"), val = tensor<int32, [6]>([0, 0, 0, 0, 200, 200])];
8
+ tensor<string, []> input_3_mode_0 = const()[name = tensor<string, []>("input_3_mode_0"), val = tensor<string, []>("reflect")];
9
+ tensor<fp16, []> const_1_to_fp16 = const()[name = tensor<string, []>("const_1_to_fp16"), val = tensor<fp16, []>(0x0p+0)];
10
+ tensor<fp16, [1, 1, 480400]> input_3_cast_fp16 = pad(constant_val = const_1_to_fp16, mode = input_3_mode_0, pad = input_3_pad_0, x = input_1_cast_fp16)[name = tensor<string, []>("input_3_cast_fp16")];
11
+ tensor<int32, [1]> var_22 = const()[name = tensor<string, []>("op_22"), val = tensor<int32, [1]>([480400])];
12
+ tensor<fp16, [480400]> input_cast_fp16 = reshape(shape = var_22, x = input_3_cast_fp16)[name = tensor<string, []>("input_cast_fp16")];
13
+ tensor<int32, [1]> expand_dims_0_axes_0 = const()[name = tensor<string, []>("expand_dims_0_axes_0"), val = tensor<int32, [1]>([0])];
14
+ tensor<fp16, [1, 480400]> expand_dims_0_cast_fp16 = expand_dims(axes = expand_dims_0_axes_0, x = input_cast_fp16)[name = tensor<string, []>("expand_dims_0_cast_fp16")];
15
+ tensor<int32, [1]> expand_dims_3 = const()[name = tensor<string, []>("expand_dims_3"), val = tensor<int32, [1]>([160])];
16
+ tensor<int32, [1]> expand_dims_4_axes_0 = const()[name = tensor<string, []>("expand_dims_4_axes_0"), val = tensor<int32, [1]>([1])];
17
+ tensor<fp16, [1, 1, 480400]> expand_dims_4_cast_fp16 = expand_dims(axes = expand_dims_4_axes_0, x = expand_dims_0_cast_fp16)[name = tensor<string, []>("expand_dims_4_cast_fp16")];
18
+ tensor<string, []> conv_0_pad_type_0 = const()[name = tensor<string, []>("conv_0_pad_type_0"), val = tensor<string, []>("valid")];
19
+ tensor<int32, [2]> conv_0_pad_0 = const()[name = tensor<string, []>("conv_0_pad_0"), val = tensor<int32, [2]>([0, 0])];
20
+ tensor<int32, [1]> conv_0_dilations_0 = const()[name = tensor<string, []>("conv_0_dilations_0"), val = tensor<int32, [1]>([1])];
21
+ tensor<int32, []> conv_0_groups_0 = const()[name = tensor<string, []>("conv_0_groups_0"), val = tensor<int32, []>(1)];
22
+ tensor<fp16, [201, 1, 400]> expand_dims_1_to_fp16 = const()[name = tensor<string, []>("expand_dims_1_to_fp16"), val = tensor<fp16, [201, 1, 400]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(64)))];
23
+ tensor<fp16, [1, 201, 3001]> conv_0_cast_fp16 = conv(dilations = conv_0_dilations_0, groups = conv_0_groups_0, pad = conv_0_pad_0, pad_type = conv_0_pad_type_0, strides = expand_dims_3, weight = expand_dims_1_to_fp16, x = expand_dims_4_cast_fp16)[name = tensor<string, []>("conv_0_cast_fp16")];
24
+ tensor<string, []> conv_1_pad_type_0 = const()[name = tensor<string, []>("conv_1_pad_type_0"), val = tensor<string, []>("valid")];
25
+ tensor<int32, [2]> conv_1_pad_0 = const()[name = tensor<string, []>("conv_1_pad_0"), val = tensor<int32, [2]>([0, 0])];
26
+ tensor<int32, [1]> conv_1_dilations_0 = const()[name = tensor<string, []>("conv_1_dilations_0"), val = tensor<int32, [1]>([1])];
27
+ tensor<int32, []> conv_1_groups_0 = const()[name = tensor<string, []>("conv_1_groups_0"), val = tensor<int32, []>(1)];
28
+ tensor<fp16, [201, 1, 400]> expand_dims_2_to_fp16 = const()[name = tensor<string, []>("expand_dims_2_to_fp16"), val = tensor<fp16, [201, 1, 400]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(160960)))];
29
+ tensor<fp16, [1, 201, 3001]> conv_1_cast_fp16 = conv(dilations = conv_1_dilations_0, groups = conv_1_groups_0, pad = conv_1_pad_0, pad_type = conv_1_pad_type_0, strides = expand_dims_3, weight = expand_dims_2_to_fp16, x = expand_dims_4_cast_fp16)[name = tensor<string, []>("conv_1_cast_fp16")];
30
+ tensor<int32, [1]> squeeze_0_axes_0 = const()[name = tensor<string, []>("squeeze_0_axes_0"), val = tensor<int32, [1]>([0])];
31
+ tensor<fp16, [201, 3001]> squeeze_0_cast_fp16 = squeeze(axes = squeeze_0_axes_0, x = conv_0_cast_fp16)[name = tensor<string, []>("squeeze_0_cast_fp16")];
32
+ tensor<int32, [1]> squeeze_1_axes_0 = const()[name = tensor<string, []>("squeeze_1_axes_0"), val = tensor<int32, [1]>([0])];
33
+ tensor<fp16, [201, 3001]> squeeze_1_cast_fp16 = squeeze(axes = squeeze_1_axes_0, x = conv_1_cast_fp16)[name = tensor<string, []>("squeeze_1_cast_fp16")];
34
+ tensor<fp16, [201, 3001]> square_0_cast_fp16 = square(x = squeeze_0_cast_fp16)[name = tensor<string, []>("square_0_cast_fp16")];
35
+ tensor<fp16, [201, 3001]> square_1_cast_fp16 = square(x = squeeze_1_cast_fp16)[name = tensor<string, []>("square_1_cast_fp16")];
36
+ tensor<fp16, [201, 3001]> add_1_cast_fp16 = add(x = square_0_cast_fp16, y = square_1_cast_fp16)[name = tensor<string, []>("add_1_cast_fp16")];
37
+ tensor<fp16, [201, 3001]> magnitudes_1_cast_fp16 = identity(x = add_1_cast_fp16)[name = tensor<string, []>("magnitudes_1_cast_fp16")];
38
+ tensor<int32, [2]> magnitudes_begin_0 = const()[name = tensor<string, []>("magnitudes_begin_0"), val = tensor<int32, [2]>([0, 0])];
39
+ tensor<int32, [2]> magnitudes_end_0 = const()[name = tensor<string, []>("magnitudes_end_0"), val = tensor<int32, [2]>([201, 3000])];
40
+ tensor<bool, [2]> magnitudes_end_mask_0 = const()[name = tensor<string, []>("magnitudes_end_mask_0"), val = tensor<bool, [2]>([true, false])];
41
+ tensor<fp16, [201, 3000]> magnitudes_cast_fp16 = slice_by_index(begin = magnitudes_begin_0, end = magnitudes_end_0, end_mask = magnitudes_end_mask_0, x = magnitudes_1_cast_fp16)[name = tensor<string, []>("magnitudes_cast_fp16")];
42
+ tensor<bool, []> mel_spec_1_transpose_x_0 = const()[name = tensor<string, []>("mel_spec_1_transpose_x_0"), val = tensor<bool, []>(false)];
43
+ tensor<bool, []> mel_spec_1_transpose_y_0 = const()[name = tensor<string, []>("mel_spec_1_transpose_y_0"), val = tensor<bool, []>(false)];
44
+ tensor<fp16, [128, 201]> mel_filters_to_fp16 = const()[name = tensor<string, []>("mel_filters_to_fp16"), val = tensor<fp16, [128, 201]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(321856)))];
45
+ tensor<fp16, [128, 3000]> mel_spec_1_cast_fp16 = matmul(transpose_x = mel_spec_1_transpose_x_0, transpose_y = mel_spec_1_transpose_y_0, x = mel_filters_to_fp16, y = magnitudes_cast_fp16)[name = tensor<string, []>("mel_spec_1_cast_fp16")];
46
+ tensor<fp16, []> var_41_to_fp16 = const()[name = tensor<string, []>("op_41_to_fp16"), val = tensor<fp16, []>(0x1p-24)];
47
+ tensor<fp16, [128, 3000]> mel_spec_cast_fp16 = add(x = mel_spec_1_cast_fp16, y = var_41_to_fp16)[name = tensor<string, []>("mel_spec_cast_fp16")];
48
+ tensor<fp16, []> log_0_epsilon_0_to_fp16 = const()[name = tensor<string, []>("log_0_epsilon_0_to_fp16"), val = tensor<fp16, []>(0x0p+0)];
49
+ tensor<fp16, [128, 3000]> log_0_cast_fp16 = log(epsilon = log_0_epsilon_0_to_fp16, x = mel_spec_cast_fp16)[name = tensor<string, []>("log_0_cast_fp16")];
50
+ tensor<fp16, []> mul_0_y_0_to_fp16 = const()[name = tensor<string, []>("mul_0_y_0_to_fp16"), val = tensor<fp16, []>(0x1.bccp-2)];
51
+ tensor<fp16, [128, 3000]> mul_0_cast_fp16 = mul(x = log_0_cast_fp16, y = mul_0_y_0_to_fp16)[name = tensor<string, []>("mul_0_cast_fp16")];
52
+ tensor<bool, []> var_44_keep_dims_0 = const()[name = tensor<string, []>("op_44_keep_dims_0"), val = tensor<bool, []>(false)];
53
+ tensor<fp16, []> var_44_cast_fp16 = reduce_max(keep_dims = var_44_keep_dims_0, x = mul_0_cast_fp16)[name = tensor<string, []>("op_44_cast_fp16")];
54
+ tensor<fp16, []> var_46_to_fp16 = const()[name = tensor<string, []>("op_46_to_fp16"), val = tensor<fp16, []>(0x1p+3)];
55
+ tensor<fp16, []> var_47_cast_fp16 = sub(x = var_44_cast_fp16, y = var_46_to_fp16)[name = tensor<string, []>("op_47_cast_fp16")];
56
+ tensor<fp16, [128, 3000]> log_spec_3_cast_fp16 = maximum(x = mul_0_cast_fp16, y = var_47_cast_fp16)[name = tensor<string, []>("log_spec_3_cast_fp16")];
57
+ tensor<fp16, []> var_50_to_fp16 = const()[name = tensor<string, []>("op_50_to_fp16"), val = tensor<fp16, []>(0x1p+2)];
58
+ tensor<fp16, [128, 3000]> var_51_cast_fp16 = add(x = log_spec_3_cast_fp16, y = var_50_to_fp16)[name = tensor<string, []>("op_51_cast_fp16")];
59
+ tensor<fp16, []> _inversed_log_spec_y_0_to_fp16 = const()[name = tensor<string, []>("_inversed_log_spec_y_0_to_fp16"), val = tensor<fp16, []>(0x1p-2)];
60
+ tensor<fp16, [128, 3000]> _inversed_log_spec_cast_fp16 = mul(x = var_51_cast_fp16, y = _inversed_log_spec_y_0_to_fp16)[name = tensor<string, []>("_inversed_log_spec_cast_fp16")];
61
+ tensor<int32, [1]> var_55_axes_0 = const()[name = tensor<string, []>("op_55_axes_0"), val = tensor<int32, [1]>([0])];
62
+ tensor<fp16, [1, 128, 3000]> var_55_cast_fp16 = expand_dims(axes = var_55_axes_0, x = _inversed_log_spec_cast_fp16)[name = tensor<string, []>("op_55_cast_fp16")];
63
+ tensor<int32, [1]> var_62_axes_0 = const()[name = tensor<string, []>("op_62_axes_0"), val = tensor<int32, [1]>([2])];
64
+ tensor<fp16, [1, 128, 1, 3000]> melspectrogram_features = expand_dims(axes = var_62_axes_0, x = var_55_cast_fp16)[name = tensor<string, []>("op_62_cast_fp16")];
65
+ } -> (melspectrogram_features);
66
+ }
dearyoungjo_Whisper-Medicalv1CNB1/MelSpectrogram.mlmodelc/weights/weight.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:009d9fb8f6b589accfa08cebf1c712ef07c3405229ce3cfb3a57ee033c9d8a49
3
+ size 373376
dearyoungjo_Whisper-Medicalv1CNB1/TextDecoder.mlmodelc/analytics/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:52791d27621455111777a65df133dfd72bf2907e8b6b4986017678ee5e26c78b
3
+ size 243
dearyoungjo_Whisper-Medicalv1CNB1/TextDecoder.mlmodelc/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a8739b30988c13a4f2bcc4a9b2bb35f928f22f7066783d6f75e3fba621ecb95a
3
+ size 633
dearyoungjo_Whisper-Medicalv1CNB1/TextDecoder.mlmodelc/metadata.json ADDED
@@ -0,0 +1,168 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "metadataOutputVersion" : "3.0",
4
+ "storagePrecision" : "Float16",
5
+ "outputSchema" : [
6
+ {
7
+ "hasShapeFlexibility" : "0",
8
+ "isOptional" : "0",
9
+ "dataType" : "Float16",
10
+ "formattedType" : "MultiArray (Float16 1 × 1 × 51866)",
11
+ "shortDescription" : "",
12
+ "shape" : "[1, 1, 51866]",
13
+ "name" : "logits",
14
+ "type" : "MultiArray"
15
+ },
16
+ {
17
+ "hasShapeFlexibility" : "0",
18
+ "isOptional" : "0",
19
+ "dataType" : "Float16",
20
+ "formattedType" : "MultiArray (Float16 1 × 2560 × 1 × 1)",
21
+ "shortDescription" : "",
22
+ "shape" : "[1, 2560, 1, 1]",
23
+ "name" : "key_cache_updates",
24
+ "type" : "MultiArray"
25
+ },
26
+ {
27
+ "hasShapeFlexibility" : "0",
28
+ "isOptional" : "0",
29
+ "dataType" : "Float16",
30
+ "formattedType" : "MultiArray (Float16 1 × 2560 × 1 × 1)",
31
+ "shortDescription" : "",
32
+ "shape" : "[1, 2560, 1, 1]",
33
+ "name" : "value_cache_updates",
34
+ "type" : "MultiArray"
35
+ },
36
+ {
37
+ "hasShapeFlexibility" : "0",
38
+ "isOptional" : "0",
39
+ "dataType" : "Float16",
40
+ "formattedType" : "MultiArray (Float16 1 × 1500)",
41
+ "shortDescription" : "",
42
+ "shape" : "[1, 1500]",
43
+ "name" : "alignment_heads_weights",
44
+ "type" : "MultiArray"
45
+ }
46
+ ],
47
+ "modelParameters" : [
48
+
49
+ ],
50
+ "specificationVersion" : 7,
51
+ "mlProgramOperationTypeHistogram" : {
52
+ "Ios16.linear" : 1,
53
+ "Concat" : 3,
54
+ "Ios16.reduceMean" : 1,
55
+ "Ios16.mul" : 12,
56
+ "Ios16.layerNorm" : 7,
57
+ "SliceByIndex" : 40,
58
+ "Ios16.sub" : 1,
59
+ "Transpose" : 1,
60
+ "Ios16.conv" : 20,
61
+ "Ios16.add" : 13,
62
+ "Squeeze" : 1,
63
+ "Ios16.matmul" : 8,
64
+ "Ios16.softmax" : 4,
65
+ "Ios16.gelu" : 2,
66
+ "ExpandDims" : 6,
67
+ "Ios16.batchNorm" : 7,
68
+ "Split" : 2,
69
+ "Ios16.gather" : 2,
70
+ "Ios16.reshape" : 16
71
+ },
72
+ "computePrecision" : "Mixed (Float16, Int32)",
73
+ "isUpdatable" : "0",
74
+ "stateSchema" : [
75
+
76
+ ],
77
+ "availability" : {
78
+ "macOS" : "13.0",
79
+ "tvOS" : "16.0",
80
+ "visionOS" : "1.0",
81
+ "watchOS" : "9.0",
82
+ "iOS" : "16.0",
83
+ "macCatalyst" : "16.0"
84
+ },
85
+ "modelType" : {
86
+ "name" : "MLModelType_mlProgram"
87
+ },
88
+ "userDefinedMetadata" : {
89
+ "com.github.apple.coremltools.source_dialect" : "TorchScript",
90
+ "com.github.apple.coremltools.source" : "torch==2.4.1",
91
+ "com.github.apple.coremltools.version" : "8.0"
92
+ },
93
+ "inputSchema" : [
94
+ {
95
+ "hasShapeFlexibility" : "0",
96
+ "isOptional" : "0",
97
+ "dataType" : "Int32",
98
+ "formattedType" : "MultiArray (Int32 1)",
99
+ "shortDescription" : "",
100
+ "shape" : "[1]",
101
+ "name" : "input_ids",
102
+ "type" : "MultiArray"
103
+ },
104
+ {
105
+ "hasShapeFlexibility" : "0",
106
+ "isOptional" : "0",
107
+ "dataType" : "Int32",
108
+ "formattedType" : "MultiArray (Int32 1)",
109
+ "shortDescription" : "",
110
+ "shape" : "[1]",
111
+ "name" : "cache_length",
112
+ "type" : "MultiArray"
113
+ },
114
+ {
115
+ "hasShapeFlexibility" : "0",
116
+ "isOptional" : "0",
117
+ "dataType" : "Float16",
118
+ "formattedType" : "MultiArray (Float16 1 × 2560 × 1 × 448)",
119
+ "shortDescription" : "",
120
+ "shape" : "[1, 2560, 1, 448]",
121
+ "name" : "key_cache",
122
+ "type" : "MultiArray"
123
+ },
124
+ {
125
+ "hasShapeFlexibility" : "0",
126
+ "isOptional" : "0",
127
+ "dataType" : "Float16",
128
+ "formattedType" : "MultiArray (Float16 1 × 2560 × 1 × 448)",
129
+ "shortDescription" : "",
130
+ "shape" : "[1, 2560, 1, 448]",
131
+ "name" : "value_cache",
132
+ "type" : "MultiArray"
133
+ },
134
+ {
135
+ "hasShapeFlexibility" : "0",
136
+ "isOptional" : "0",
137
+ "dataType" : "Float16",
138
+ "formattedType" : "MultiArray (Float16 1 × 448)",
139
+ "shortDescription" : "",
140
+ "shape" : "[1, 448]",
141
+ "name" : "kv_cache_update_mask",
142
+ "type" : "MultiArray"
143
+ },
144
+ {
145
+ "hasShapeFlexibility" : "0",
146
+ "isOptional" : "0",
147
+ "dataType" : "Float16",
148
+ "formattedType" : "MultiArray (Float16 1 × 1280 × 1 × 1500)",
149
+ "shortDescription" : "",
150
+ "shape" : "[1, 1280, 1, 1500]",
151
+ "name" : "encoder_output_embeds",
152
+ "type" : "MultiArray"
153
+ },
154
+ {
155
+ "hasShapeFlexibility" : "0",
156
+ "isOptional" : "0",
157
+ "dataType" : "Float16",
158
+ "formattedType" : "MultiArray (Float16 1 × 448)",
159
+ "shortDescription" : "",
160
+ "shape" : "[1, 448]",
161
+ "name" : "decoder_key_padding_mask",
162
+ "type" : "MultiArray"
163
+ }
164
+ ],
165
+ "generatedClassName" : "TextDecoder",
166
+ "method" : "predict"
167
+ }
168
+ ]
dearyoungjo_Whisper-Medicalv1CNB1/TextDecoder.mlmodelc/model.mil ADDED
@@ -0,0 +1,534 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ program(1.0)
2
+ [buildInfo = dict<tensor<string, []>, tensor<string, []>>({{"coremlc-component-MIL", "3400.43.1"}, {"coremlc-version", "3400.58.2"}, {"coremltools-component-torch", "2.4.1"}, {"coremltools-source-dialect", "TorchScript"}, {"coremltools-version", "8.0"}})]
3
+ {
4
+ func main<ios16>(tensor<int32, [1]> cache_length, tensor<fp16, [1, 448]> decoder_key_padding_mask, tensor<fp16, [1, 1280, 1, 1500]> encoder_output_embeds, tensor<int32, [1]> input_ids, tensor<fp16, [1, 2560, 1, 448]> key_cache, tensor<fp16, [1, 448]> kv_cache_update_mask, tensor<fp16, [1, 2560, 1, 448]> value_cache) {
5
+ tensor<int32, []> var_20_axis_0 = const()[name = tensor<string, []>("op_20_axis_0"), val = tensor<int32, []>(0)];
6
+ tensor<int32, []> var_20_batch_dims_0 = const()[name = tensor<string, []>("op_20_batch_dims_0"), val = tensor<int32, []>(0)];
7
+ tensor<fp16, [51866, 1280]> embed_tokens_weight_to_fp16 = const()[name = tensor<string, []>("embed_tokens_weight_to_fp16"), val = tensor<fp16, [51866, 1280]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(64)))];
8
+ tensor<fp16, [1, 1280]> var_20_cast_fp16 = gather(axis = var_20_axis_0, batch_dims = var_20_batch_dims_0, indices = input_ids, x = embed_tokens_weight_to_fp16)[name = tensor<string, []>("op_20_cast_fp16")];
9
+ tensor<int32, []> var_24_axis_0 = const()[name = tensor<string, []>("op_24_axis_0"), val = tensor<int32, []>(0)];
10
+ tensor<int32, []> var_24_batch_dims_0 = const()[name = tensor<string, []>("op_24_batch_dims_0"), val = tensor<int32, []>(0)];
11
+ tensor<fp16, [448, 1280]> embed_positions_weight_to_fp16 = const()[name = tensor<string, []>("embed_positions_weight_to_fp16"), val = tensor<fp16, [448, 1280]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(132777088)))];
12
+ tensor<fp16, [1, 1280]> var_24_cast_fp16 = gather(axis = var_24_axis_0, batch_dims = var_24_batch_dims_0, indices = cache_length, x = embed_positions_weight_to_fp16)[name = tensor<string, []>("op_24_cast_fp16")];
13
+ tensor<fp16, [1, 1280]> hidden_states_1_cast_fp16 = add(x = var_20_cast_fp16, y = var_24_cast_fp16)[name = tensor<string, []>("hidden_states_1_cast_fp16")];
14
+ tensor<int32, [1]> var_38_axes_0 = const()[name = tensor<string, []>("op_38_axes_0"), val = tensor<int32, [1]>([2])];
15
+ tensor<fp16, [1, 1280, 1]> var_38_cast_fp16 = expand_dims(axes = var_38_axes_0, x = hidden_states_1_cast_fp16)[name = tensor<string, []>("op_38_cast_fp16")];
16
+ tensor<int32, [1]> inputs_1_axes_0 = const()[name = tensor<string, []>("inputs_1_axes_0"), val = tensor<int32, [1]>([3])];
17
+ tensor<fp16, [1, 1280, 1, 1]> inputs_1_cast_fp16 = expand_dims(axes = inputs_1_axes_0, x = var_38_cast_fp16)[name = tensor<string, []>("inputs_1_cast_fp16")];
18
+ tensor<int32, [2]> tile_0 = const()[name = tensor<string, []>("tile_0"), val = tensor<int32, [2]>([1280, 1280])];
19
+ tensor<int32, []> var_43_axis_0 = const()[name = tensor<string, []>("op_43_axis_0"), val = tensor<int32, []>(1)];
20
+ tensor<fp16, [1, 1280, 1, 448]> var_43_cast_fp16_0, tensor<fp16, [1, 1280, 1, 448]> var_43_cast_fp16_1 = split(axis = var_43_axis_0, split_sizes = tile_0, x = key_cache)[name = tensor<string, []>("op_43_cast_fp16")];
21
+ tensor<int32, [2]> tile_1 = const()[name = tensor<string, []>("tile_1"), val = tensor<int32, [2]>([1280, 1280])];
22
+ tensor<int32, []> var_48_axis_0 = const()[name = tensor<string, []>("op_48_axis_0"), val = tensor<int32, []>(1)];
23
+ tensor<fp16, [1, 1280, 1, 448]> var_48_cast_fp16_0, tensor<fp16, [1, 1280, 1, 448]> var_48_cast_fp16_1 = split(axis = var_48_axis_0, split_sizes = tile_1, x = value_cache)[name = tensor<string, []>("op_48_cast_fp16")];
24
+ tensor<int32, []> var_56 = const()[name = tensor<string, []>("op_56"), val = tensor<int32, []>(3)];
25
+ tensor<int32, [1]> out_1_axes_0 = const()[name = tensor<string, []>("out_1_axes_0"), val = tensor<int32, [1]>([1])];
26
+ tensor<fp16, []> var_82_to_fp16 = const()[name = tensor<string, []>("op_82_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
27
+ tensor<fp16, [1, 1280, 1, 1]> out_1_cast_fp16 = layer_norm(axes = out_1_axes_0, epsilon = var_82_to_fp16, x = inputs_1_cast_fp16)[name = tensor<string, []>("out_1_cast_fp16")];
28
+ tensor<fp16, [1280]> obj_1_mean_0_to_fp16 = const()[name = tensor<string, []>("obj_1_mean_0_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(133924032)))];
29
+ tensor<fp16, [1280]> obj_1_variance_0_to_fp16 = const()[name = tensor<string, []>("obj_1_variance_0_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(133926656)))];
30
+ tensor<fp16, [1280]> obj_1_gamma_0_to_fp16 = const()[name = tensor<string, []>("obj_1_gamma_0_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(133929280)))];
31
+ tensor<fp16, [1280]> obj_1_beta_0_to_fp16 = const()[name = tensor<string, []>("obj_1_beta_0_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(133931904)))];
32
+ tensor<fp16, []> obj_1_epsilon_0_to_fp16 = const()[name = tensor<string, []>("obj_1_epsilon_0_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
33
+ tensor<fp16, [1, 1280, 1, 1]> obj_1_cast_fp16 = batch_norm(beta = obj_1_beta_0_to_fp16, epsilon = obj_1_epsilon_0_to_fp16, gamma = obj_1_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_1_cast_fp16)[name = tensor<string, []>("obj_1_cast_fp16")];
34
+ tensor<string, []> query_1_pad_type_0 = const()[name = tensor<string, []>("query_1_pad_type_0"), val = tensor<string, []>("valid")];
35
+ tensor<int32, [2]> query_1_strides_0 = const()[name = tensor<string, []>("query_1_strides_0"), val = tensor<int32, [2]>([1, 1])];
36
+ tensor<int32, [4]> query_1_pad_0 = const()[name = tensor<string, []>("query_1_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
37
+ tensor<int32, [2]> query_1_dilations_0 = const()[name = tensor<string, []>("query_1_dilations_0"), val = tensor<int32, [2]>([1, 1])];
38
+ tensor<int32, []> query_1_groups_0 = const()[name = tensor<string, []>("query_1_groups_0"), val = tensor<int32, []>(1)];
39
+ tensor<fp16, [1280, 1280, 1, 1]> layers_0_self_attn_q_proj_weight_to_fp16 = const()[name = tensor<string, []>("layers_0_self_attn_q_proj_weight_to_fp16"), val = tensor<fp16, [1280, 1280, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(133934528)))];
40
+ tensor<fp16, [1280]> layers_0_self_attn_q_proj_bias_to_fp16 = const()[name = tensor<string, []>("layers_0_self_attn_q_proj_bias_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(137211392)))];
41
+ tensor<fp16, [1, 1280, 1, 1]> query_1_cast_fp16 = conv(bias = layers_0_self_attn_q_proj_bias_to_fp16, dilations = query_1_dilations_0, groups = query_1_groups_0, pad = query_1_pad_0, pad_type = query_1_pad_type_0, strides = query_1_strides_0, weight = layers_0_self_attn_q_proj_weight_to_fp16, x = obj_1_cast_fp16)[name = tensor<string, []>("query_1_cast_fp16")];
42
+ tensor<string, []> current_key_1_pad_type_0 = const()[name = tensor<string, []>("current_key_1_pad_type_0"), val = tensor<string, []>("valid")];
43
+ tensor<int32, [2]> current_key_1_strides_0 = const()[name = tensor<string, []>("current_key_1_strides_0"), val = tensor<int32, [2]>([1, 1])];
44
+ tensor<int32, [4]> current_key_1_pad_0 = const()[name = tensor<string, []>("current_key_1_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
45
+ tensor<int32, [2]> current_key_1_dilations_0 = const()[name = tensor<string, []>("current_key_1_dilations_0"), val = tensor<int32, [2]>([1, 1])];
46
+ tensor<int32, []> current_key_1_groups_0 = const()[name = tensor<string, []>("current_key_1_groups_0"), val = tensor<int32, []>(1)];
47
+ tensor<fp16, [1280, 1280, 1, 1]> layers_0_self_attn_k_proj_weight_to_fp16 = const()[name = tensor<string, []>("layers_0_self_attn_k_proj_weight_to_fp16"), val = tensor<fp16, [1280, 1280, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(137214016)))];
48
+ tensor<fp16, [1, 1280, 1, 1]> current_key_1_cast_fp16 = conv(dilations = current_key_1_dilations_0, groups = current_key_1_groups_0, pad = current_key_1_pad_0, pad_type = current_key_1_pad_type_0, strides = current_key_1_strides_0, weight = layers_0_self_attn_k_proj_weight_to_fp16, x = obj_1_cast_fp16)[name = tensor<string, []>("current_key_1_cast_fp16")];
49
+ tensor<string, []> current_value_1_pad_type_0 = const()[name = tensor<string, []>("current_value_1_pad_type_0"), val = tensor<string, []>("valid")];
50
+ tensor<int32, [2]> current_value_1_strides_0 = const()[name = tensor<string, []>("current_value_1_strides_0"), val = tensor<int32, [2]>([1, 1])];
51
+ tensor<int32, [4]> current_value_1_pad_0 = const()[name = tensor<string, []>("current_value_1_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
52
+ tensor<int32, [2]> current_value_1_dilations_0 = const()[name = tensor<string, []>("current_value_1_dilations_0"), val = tensor<int32, [2]>([1, 1])];
53
+ tensor<int32, []> current_value_1_groups_0 = const()[name = tensor<string, []>("current_value_1_groups_0"), val = tensor<int32, []>(1)];
54
+ tensor<fp16, [1280, 1280, 1, 1]> layers_0_self_attn_v_proj_weight_to_fp16 = const()[name = tensor<string, []>("layers_0_self_attn_v_proj_weight_to_fp16"), val = tensor<fp16, [1280, 1280, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(140490880)))];
55
+ tensor<fp16, [1280]> layers_0_self_attn_v_proj_bias_to_fp16 = const()[name = tensor<string, []>("layers_0_self_attn_v_proj_bias_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(143767744)))];
56
+ tensor<fp16, [1, 1280, 1, 1]> current_value_1_cast_fp16 = conv(bias = layers_0_self_attn_v_proj_bias_to_fp16, dilations = current_value_1_dilations_0, groups = current_value_1_groups_0, pad = current_value_1_pad_0, pad_type = current_value_1_pad_type_0, strides = current_value_1_strides_0, weight = layers_0_self_attn_v_proj_weight_to_fp16, x = obj_1_cast_fp16)[name = tensor<string, []>("current_value_1_cast_fp16")];
57
+ tensor<int32, [1]> var_117_axes_0 = const()[name = tensor<string, []>("op_117_axes_0"), val = tensor<int32, [1]>([1])];
58
+ tensor<fp16, [1, 1, 448]> var_117_cast_fp16 = expand_dims(axes = var_117_axes_0, x = kv_cache_update_mask)[name = tensor<string, []>("op_117_cast_fp16")];
59
+ tensor<int32, [1]> var_118_axes_0 = const()[name = tensor<string, []>("op_118_axes_0"), val = tensor<int32, [1]>([2])];
60
+ tensor<fp16, [1, 1, 1, 448]> var_118_cast_fp16 = expand_dims(axes = var_118_axes_0, x = var_117_cast_fp16)[name = tensor<string, []>("op_118_cast_fp16")];
61
+ tensor<fp16, [1, 1280, 1, 448]> var_120_cast_fp16 = mul(x = current_key_1_cast_fp16, y = var_118_cast_fp16)[name = tensor<string, []>("op_120_cast_fp16")];
62
+ tensor<fp16, []> var_57_to_fp16 = const()[name = tensor<string, []>("op_57_to_fp16"), val = tensor<fp16, []>(0x1p+0)];
63
+ tensor<fp16, [1, 1, 1, 448]> var_121_cast_fp16 = sub(x = var_57_to_fp16, y = var_118_cast_fp16)[name = tensor<string, []>("op_121_cast_fp16")];
64
+ tensor<fp16, [1, 1280, 1, 448]> var_122_cast_fp16 = mul(x = var_43_cast_fp16_0, y = var_121_cast_fp16)[name = tensor<string, []>("op_122_cast_fp16")];
65
+ tensor<fp16, [1, 1280, 1, 448]> key_1_cast_fp16 = add(x = var_120_cast_fp16, y = var_122_cast_fp16)[name = tensor<string, []>("key_1_cast_fp16")];
66
+ tensor<fp16, [1, 1280, 1, 448]> var_124_cast_fp16 = mul(x = current_value_1_cast_fp16, y = var_118_cast_fp16)[name = tensor<string, []>("op_124_cast_fp16")];
67
+ tensor<fp16, [1, 1280, 1, 448]> var_126_cast_fp16 = mul(x = var_48_cast_fp16_0, y = var_121_cast_fp16)[name = tensor<string, []>("op_126_cast_fp16")];
68
+ tensor<fp16, [1, 1280, 1, 448]> value_1_cast_fp16 = add(x = var_124_cast_fp16, y = var_126_cast_fp16)[name = tensor<string, []>("value_1_cast_fp16")];
69
+ tensor<int32, [4]> var_129 = const()[name = tensor<string, []>("op_129"), val = tensor<int32, [4]>([1, 20, 64, -1])];
70
+ tensor<fp16, [1, 20, 64, 1]> mh_q_1_cast_fp16 = reshape(shape = var_129, x = query_1_cast_fp16)[name = tensor<string, []>("mh_q_1_cast_fp16")];
71
+ tensor<fp16, []> var_131_to_fp16 = const()[name = tensor<string, []>("op_131_to_fp16"), val = tensor<fp16, []>(0x1p-3)];
72
+ tensor<fp16, [1, 20, 64, 1]> var_132_cast_fp16 = mul(x = mh_q_1_cast_fp16, y = var_131_to_fp16)[name = tensor<string, []>("op_132_cast_fp16")];
73
+ tensor<int32, [4]> var_133 = const()[name = tensor<string, []>("op_133"), val = tensor<int32, [4]>([1, 20, 64, -1])];
74
+ tensor<fp16, [1, 20, 64, 448]> var_134_cast_fp16 = reshape(shape = var_133, x = key_1_cast_fp16)[name = tensor<string, []>("op_134_cast_fp16")];
75
+ tensor<bool, []> mh_w_1_transpose_x_0 = const()[name = tensor<string, []>("mh_w_1_transpose_x_0"), val = tensor<bool, []>(true)];
76
+ tensor<bool, []> mh_w_1_transpose_y_0 = const()[name = tensor<string, []>("mh_w_1_transpose_y_0"), val = tensor<bool, []>(false)];
77
+ tensor<fp16, [1, 20, 1, 448]> mh_w_1_cast_fp16 = matmul(transpose_x = mh_w_1_transpose_x_0, transpose_y = mh_w_1_transpose_y_0, x = var_132_cast_fp16, y = var_134_cast_fp16)[name = tensor<string, []>("mh_w_1_cast_fp16")];
78
+ tensor<int32, [1]> var_138_axes_0 = const()[name = tensor<string, []>("op_138_axes_0"), val = tensor<int32, [1]>([1])];
79
+ tensor<fp16, [1, 1, 448]> var_138_cast_fp16 = expand_dims(axes = var_138_axes_0, x = decoder_key_padding_mask)[name = tensor<string, []>("op_138_cast_fp16")];
80
+ tensor<int32, [1]> var_139_axes_0 = const()[name = tensor<string, []>("op_139_axes_0"), val = tensor<int32, [1]>([2])];
81
+ tensor<fp16, [1, 1, 1, 448]> var_139_cast_fp16 = expand_dims(axes = var_139_axes_0, x = var_138_cast_fp16)[name = tensor<string, []>("op_139_cast_fp16")];
82
+ tensor<fp16, [1, 20, 1, 448]> mh_w_3_cast_fp16 = add(x = mh_w_1_cast_fp16, y = var_139_cast_fp16)[name = tensor<string, []>("mh_w_3_cast_fp16")];
83
+ tensor<fp16, [1, 20, 1, 448]> var_142_cast_fp16 = softmax(axis = var_56, x = mh_w_3_cast_fp16)[name = tensor<string, []>("op_142_cast_fp16")];
84
+ tensor<int32, [4]> var_143 = const()[name = tensor<string, []>("op_143"), val = tensor<int32, [4]>([1, 20, 64, -1])];
85
+ tensor<fp16, [1, 20, 64, 448]> var_144_cast_fp16 = reshape(shape = var_143, x = value_1_cast_fp16)[name = tensor<string, []>("op_144_cast_fp16")];
86
+ tensor<bool, []> attn_1_transpose_x_0 = const()[name = tensor<string, []>("attn_1_transpose_x_0"), val = tensor<bool, []>(false)];
87
+ tensor<bool, []> attn_1_transpose_y_0 = const()[name = tensor<string, []>("attn_1_transpose_y_0"), val = tensor<bool, []>(true)];
88
+ tensor<fp16, [1, 20, 64, 1]> attn_1_cast_fp16 = matmul(transpose_x = attn_1_transpose_x_0, transpose_y = attn_1_transpose_y_0, x = var_144_cast_fp16, y = var_142_cast_fp16)[name = tensor<string, []>("attn_1_cast_fp16")];
89
+ tensor<int32, [4]> var_147 = const()[name = tensor<string, []>("op_147"), val = tensor<int32, [4]>([1, 1280, 1, -1])];
90
+ tensor<fp16, [1, 1280, 1, 1]> input_1_cast_fp16 = reshape(shape = var_147, x = attn_1_cast_fp16)[name = tensor<string, []>("input_1_cast_fp16")];
91
+ tensor<string, []> obj_7_pad_type_0 = const()[name = tensor<string, []>("obj_7_pad_type_0"), val = tensor<string, []>("valid")];
92
+ tensor<int32, [2]> obj_7_strides_0 = const()[name = tensor<string, []>("obj_7_strides_0"), val = tensor<int32, [2]>([1, 1])];
93
+ tensor<int32, [4]> obj_7_pad_0 = const()[name = tensor<string, []>("obj_7_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
94
+ tensor<int32, [2]> obj_7_dilations_0 = const()[name = tensor<string, []>("obj_7_dilations_0"), val = tensor<int32, [2]>([1, 1])];
95
+ tensor<int32, []> obj_7_groups_0 = const()[name = tensor<string, []>("obj_7_groups_0"), val = tensor<int32, []>(1)];
96
+ tensor<fp16, [1280, 1280, 1, 1]> layers_0_self_attn_o_proj_weight_to_fp16 = const()[name = tensor<string, []>("layers_0_self_attn_o_proj_weight_to_fp16"), val = tensor<fp16, [1280, 1280, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(143770368)))];
97
+ tensor<fp16, [1280]> layers_0_self_attn_o_proj_bias_to_fp16 = const()[name = tensor<string, []>("layers_0_self_attn_o_proj_bias_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(147047232)))];
98
+ tensor<fp16, [1, 1280, 1, 1]> obj_7_cast_fp16 = conv(bias = layers_0_self_attn_o_proj_bias_to_fp16, dilations = obj_7_dilations_0, groups = obj_7_groups_0, pad = obj_7_pad_0, pad_type = obj_7_pad_type_0, strides = obj_7_strides_0, weight = layers_0_self_attn_o_proj_weight_to_fp16, x = input_1_cast_fp16)[name = tensor<string, []>("obj_7_cast_fp16")];
99
+ tensor<fp16, [1, 1280, 1, 1]> inputs_3_cast_fp16 = add(x = inputs_1_cast_fp16, y = obj_7_cast_fp16)[name = tensor<string, []>("inputs_3_cast_fp16")];
100
+ tensor<int32, [1]> out_3_axes_0 = const()[name = tensor<string, []>("out_3_axes_0"), val = tensor<int32, [1]>([1])];
101
+ tensor<fp16, []> var_169_to_fp16 = const()[name = tensor<string, []>("op_169_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
102
+ tensor<fp16, [1, 1280, 1, 1]> out_3_cast_fp16 = layer_norm(axes = out_3_axes_0, epsilon = var_169_to_fp16, x = inputs_3_cast_fp16)[name = tensor<string, []>("out_3_cast_fp16")];
103
+ tensor<fp16, [1280]> obj_9_gamma_0_to_fp16 = const()[name = tensor<string, []>("obj_9_gamma_0_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(147049856)))];
104
+ tensor<fp16, [1280]> obj_9_beta_0_to_fp16 = const()[name = tensor<string, []>("obj_9_beta_0_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(147052480)))];
105
+ tensor<fp16, []> obj_9_epsilon_0_to_fp16 = const()[name = tensor<string, []>("obj_9_epsilon_0_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
106
+ tensor<fp16, [1, 1280, 1, 1]> obj_9_cast_fp16 = batch_norm(beta = obj_9_beta_0_to_fp16, epsilon = obj_9_epsilon_0_to_fp16, gamma = obj_9_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_3_cast_fp16)[name = tensor<string, []>("obj_9_cast_fp16")];
107
+ tensor<string, []> query_3_pad_type_0 = const()[name = tensor<string, []>("query_3_pad_type_0"), val = tensor<string, []>("valid")];
108
+ tensor<int32, [2]> query_3_strides_0 = const()[name = tensor<string, []>("query_3_strides_0"), val = tensor<int32, [2]>([1, 1])];
109
+ tensor<int32, [4]> query_3_pad_0 = const()[name = tensor<string, []>("query_3_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
110
+ tensor<int32, [2]> query_3_dilations_0 = const()[name = tensor<string, []>("query_3_dilations_0"), val = tensor<int32, [2]>([1, 1])];
111
+ tensor<int32, []> query_3_groups_0 = const()[name = tensor<string, []>("query_3_groups_0"), val = tensor<int32, []>(1)];
112
+ tensor<fp16, [1280, 1280, 1, 1]> layers_0_encoder_attn_q_proj_weight_to_fp16 = const()[name = tensor<string, []>("layers_0_encoder_attn_q_proj_weight_to_fp16"), val = tensor<fp16, [1280, 1280, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(147055104)))];
113
+ tensor<fp16, [1280]> layers_0_encoder_attn_q_proj_bias_to_fp16 = const()[name = tensor<string, []>("layers_0_encoder_attn_q_proj_bias_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(150331968)))];
114
+ tensor<fp16, [1, 1280, 1, 1]> query_3_cast_fp16 = conv(bias = layers_0_encoder_attn_q_proj_bias_to_fp16, dilations = query_3_dilations_0, groups = query_3_groups_0, pad = query_3_pad_0, pad_type = query_3_pad_type_0, strides = query_3_strides_0, weight = layers_0_encoder_attn_q_proj_weight_to_fp16, x = obj_9_cast_fp16)[name = tensor<string, []>("query_3_cast_fp16")];
115
+ tensor<string, []> key_3_pad_type_0 = const()[name = tensor<string, []>("key_3_pad_type_0"), val = tensor<string, []>("valid")];
116
+ tensor<int32, [2]> key_3_strides_0 = const()[name = tensor<string, []>("key_3_strides_0"), val = tensor<int32, [2]>([1, 1])];
117
+ tensor<int32, [4]> key_3_pad_0 = const()[name = tensor<string, []>("key_3_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
118
+ tensor<int32, [2]> key_3_dilations_0 = const()[name = tensor<string, []>("key_3_dilations_0"), val = tensor<int32, [2]>([1, 1])];
119
+ tensor<int32, []> key_3_groups_0 = const()[name = tensor<string, []>("key_3_groups_0"), val = tensor<int32, []>(1)];
120
+ tensor<fp16, [1280, 1280, 1, 1]> layers_0_encoder_attn_k_proj_weight_to_fp16 = const()[name = tensor<string, []>("layers_0_encoder_attn_k_proj_weight_to_fp16"), val = tensor<fp16, [1280, 1280, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(150334592)))];
121
+ tensor<fp16, [1, 1280, 1, 1500]> key_3_cast_fp16 = conv(dilations = key_3_dilations_0, groups = key_3_groups_0, pad = key_3_pad_0, pad_type = key_3_pad_type_0, strides = key_3_strides_0, weight = layers_0_encoder_attn_k_proj_weight_to_fp16, x = encoder_output_embeds)[name = tensor<string, []>("key_3_cast_fp16")];
122
+ tensor<string, []> value_3_pad_type_0 = const()[name = tensor<string, []>("value_3_pad_type_0"), val = tensor<string, []>("valid")];
123
+ tensor<int32, [2]> value_3_strides_0 = const()[name = tensor<string, []>("value_3_strides_0"), val = tensor<int32, [2]>([1, 1])];
124
+ tensor<int32, [4]> value_3_pad_0 = const()[name = tensor<string, []>("value_3_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
125
+ tensor<int32, [2]> value_3_dilations_0 = const()[name = tensor<string, []>("value_3_dilations_0"), val = tensor<int32, [2]>([1, 1])];
126
+ tensor<int32, []> value_3_groups_0 = const()[name = tensor<string, []>("value_3_groups_0"), val = tensor<int32, []>(1)];
127
+ tensor<fp16, [1280, 1280, 1, 1]> layers_0_encoder_attn_v_proj_weight_to_fp16 = const()[name = tensor<string, []>("layers_0_encoder_attn_v_proj_weight_to_fp16"), val = tensor<fp16, [1280, 1280, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(153611456)))];
128
+ tensor<fp16, [1280]> layers_0_encoder_attn_v_proj_bias_to_fp16 = const()[name = tensor<string, []>("layers_0_encoder_attn_v_proj_bias_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(156888320)))];
129
+ tensor<fp16, [1, 1280, 1, 1500]> value_3_cast_fp16 = conv(bias = layers_0_encoder_attn_v_proj_bias_to_fp16, dilations = value_3_dilations_0, groups = value_3_groups_0, pad = value_3_pad_0, pad_type = value_3_pad_type_0, strides = value_3_strides_0, weight = layers_0_encoder_attn_v_proj_weight_to_fp16, x = encoder_output_embeds)[name = tensor<string, []>("value_3_cast_fp16")];
130
+ tensor<int32, [4]> var_204 = const()[name = tensor<string, []>("op_204"), val = tensor<int32, [4]>([1, 20, 64, -1])];
131
+ tensor<fp16, [1, 20, 64, 1]> mh_q_3_cast_fp16 = reshape(shape = var_204, x = query_3_cast_fp16)[name = tensor<string, []>("mh_q_3_cast_fp16")];
132
+ tensor<fp16, []> var_206_to_fp16 = const()[name = tensor<string, []>("op_206_to_fp16"), val = tensor<fp16, []>(0x1p-3)];
133
+ tensor<fp16, [1, 20, 64, 1]> var_207_cast_fp16 = mul(x = mh_q_3_cast_fp16, y = var_206_to_fp16)[name = tensor<string, []>("op_207_cast_fp16")];
134
+ tensor<int32, [4]> var_208 = const()[name = tensor<string, []>("op_208"), val = tensor<int32, [4]>([1, 20, 64, -1])];
135
+ tensor<fp16, [1, 20, 64, 1500]> var_209_cast_fp16 = reshape(shape = var_208, x = key_3_cast_fp16)[name = tensor<string, []>("op_209_cast_fp16")];
136
+ tensor<bool, []> mh_w_5_transpose_x_0 = const()[name = tensor<string, []>("mh_w_5_transpose_x_0"), val = tensor<bool, []>(true)];
137
+ tensor<bool, []> mh_w_5_transpose_y_0 = const()[name = tensor<string, []>("mh_w_5_transpose_y_0"), val = tensor<bool, []>(false)];
138
+ tensor<fp16, [1, 20, 1, 1500]> mh_w_5_cast_fp16 = matmul(transpose_x = mh_w_5_transpose_x_0, transpose_y = mh_w_5_transpose_y_0, x = var_207_cast_fp16, y = var_209_cast_fp16)[name = tensor<string, []>("mh_w_5_cast_fp16")];
139
+ tensor<fp16, [1, 20, 1, 1500]> obj_13_cast_fp16 = softmax(axis = var_56, x = mh_w_5_cast_fp16)[name = tensor<string, []>("obj_13_cast_fp16")];
140
+ tensor<int32, [4]> var_213 = const()[name = tensor<string, []>("op_213"), val = tensor<int32, [4]>([1, 20, 64, -1])];
141
+ tensor<fp16, [1, 20, 64, 1500]> var_214_cast_fp16 = reshape(shape = var_213, x = value_3_cast_fp16)[name = tensor<string, []>("op_214_cast_fp16")];
142
+ tensor<bool, []> attn_3_transpose_x_0 = const()[name = tensor<string, []>("attn_3_transpose_x_0"), val = tensor<bool, []>(false)];
143
+ tensor<bool, []> attn_3_transpose_y_0 = const()[name = tensor<string, []>("attn_3_transpose_y_0"), val = tensor<bool, []>(true)];
144
+ tensor<fp16, [1, 20, 64, 1]> attn_3_cast_fp16 = matmul(transpose_x = attn_3_transpose_x_0, transpose_y = attn_3_transpose_y_0, x = var_214_cast_fp16, y = obj_13_cast_fp16)[name = tensor<string, []>("attn_3_cast_fp16")];
145
+ tensor<int32, [4]> var_217 = const()[name = tensor<string, []>("op_217"), val = tensor<int32, [4]>([1, 1280, 1, -1])];
146
+ tensor<fp16, [1, 1280, 1, 1]> input_3_cast_fp16 = reshape(shape = var_217, x = attn_3_cast_fp16)[name = tensor<string, []>("input_3_cast_fp16")];
147
+ tensor<string, []> obj_11_pad_type_0 = const()[name = tensor<string, []>("obj_11_pad_type_0"), val = tensor<string, []>("valid")];
148
+ tensor<int32, [2]> obj_11_strides_0 = const()[name = tensor<string, []>("obj_11_strides_0"), val = tensor<int32, [2]>([1, 1])];
149
+ tensor<int32, [4]> obj_11_pad_0 = const()[name = tensor<string, []>("obj_11_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
150
+ tensor<int32, [2]> obj_11_dilations_0 = const()[name = tensor<string, []>("obj_11_dilations_0"), val = tensor<int32, [2]>([1, 1])];
151
+ tensor<int32, []> obj_11_groups_0 = const()[name = tensor<string, []>("obj_11_groups_0"), val = tensor<int32, []>(1)];
152
+ tensor<fp16, [1280, 1280, 1, 1]> layers_0_encoder_attn_o_proj_weight_to_fp16 = const()[name = tensor<string, []>("layers_0_encoder_attn_o_proj_weight_to_fp16"), val = tensor<fp16, [1280, 1280, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(156890944)))];
153
+ tensor<fp16, [1280]> layers_0_encoder_attn_o_proj_bias_to_fp16 = const()[name = tensor<string, []>("layers_0_encoder_attn_o_proj_bias_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(160167808)))];
154
+ tensor<fp16, [1, 1280, 1, 1]> obj_11_cast_fp16 = conv(bias = layers_0_encoder_attn_o_proj_bias_to_fp16, dilations = obj_11_dilations_0, groups = obj_11_groups_0, pad = obj_11_pad_0, pad_type = obj_11_pad_type_0, strides = obj_11_strides_0, weight = layers_0_encoder_attn_o_proj_weight_to_fp16, x = input_3_cast_fp16)[name = tensor<string, []>("obj_11_cast_fp16")];
155
+ tensor<fp16, [1, 1280, 1, 1]> inputs_5_cast_fp16 = add(x = inputs_3_cast_fp16, y = obj_11_cast_fp16)[name = tensor<string, []>("inputs_5_cast_fp16")];
156
+ tensor<int32, [1]> out_5_axes_0 = const()[name = tensor<string, []>("out_5_axes_0"), val = tensor<int32, [1]>([1])];
157
+ tensor<fp16, []> var_235_to_fp16 = const()[name = tensor<string, []>("op_235_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
158
+ tensor<fp16, [1, 1280, 1, 1]> out_5_cast_fp16 = layer_norm(axes = out_5_axes_0, epsilon = var_235_to_fp16, x = inputs_5_cast_fp16)[name = tensor<string, []>("out_5_cast_fp16")];
159
+ tensor<fp16, [1280]> input_5_gamma_0_to_fp16 = const()[name = tensor<string, []>("input_5_gamma_0_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(160170432)))];
160
+ tensor<fp16, [1280]> input_5_beta_0_to_fp16 = const()[name = tensor<string, []>("input_5_beta_0_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(160173056)))];
161
+ tensor<fp16, []> input_5_epsilon_0_to_fp16 = const()[name = tensor<string, []>("input_5_epsilon_0_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
162
+ tensor<fp16, [1, 1280, 1, 1]> input_5_cast_fp16 = batch_norm(beta = input_5_beta_0_to_fp16, epsilon = input_5_epsilon_0_to_fp16, gamma = input_5_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_5_cast_fp16)[name = tensor<string, []>("input_5_cast_fp16")];
163
+ tensor<string, []> input_7_pad_type_0 = const()[name = tensor<string, []>("input_7_pad_type_0"), val = tensor<string, []>("valid")];
164
+ tensor<int32, [2]> input_7_strides_0 = const()[name = tensor<string, []>("input_7_strides_0"), val = tensor<int32, [2]>([1, 1])];
165
+ tensor<int32, [4]> input_7_pad_0 = const()[name = tensor<string, []>("input_7_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
166
+ tensor<int32, [2]> input_7_dilations_0 = const()[name = tensor<string, []>("input_7_dilations_0"), val = tensor<int32, [2]>([1, 1])];
167
+ tensor<int32, []> input_7_groups_0 = const()[name = tensor<string, []>("input_7_groups_0"), val = tensor<int32, []>(1)];
168
+ tensor<fp16, [5120, 1280, 1, 1]> layers_0_fc1_weight_to_fp16 = const()[name = tensor<string, []>("layers_0_fc1_weight_to_fp16"), val = tensor<fp16, [5120, 1280, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(160175680)))];
169
+ tensor<fp16, [5120]> layers_0_fc1_bias_to_fp16 = const()[name = tensor<string, []>("layers_0_fc1_bias_to_fp16"), val = tensor<fp16, [5120]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(173282944)))];
170
+ tensor<fp16, [1, 5120, 1, 1]> input_7_cast_fp16 = conv(bias = layers_0_fc1_bias_to_fp16, dilations = input_7_dilations_0, groups = input_7_groups_0, pad = input_7_pad_0, pad_type = input_7_pad_type_0, strides = input_7_strides_0, weight = layers_0_fc1_weight_to_fp16, x = input_5_cast_fp16)[name = tensor<string, []>("input_7_cast_fp16")];
171
+ tensor<string, []> input_9_mode_0 = const()[name = tensor<string, []>("input_9_mode_0"), val = tensor<string, []>("EXACT")];
172
+ tensor<fp16, [1, 5120, 1, 1]> input_9_cast_fp16 = gelu(mode = input_9_mode_0, x = input_7_cast_fp16)[name = tensor<string, []>("input_9_cast_fp16")];
173
+ tensor<string, []> hidden_states_3_pad_type_0 = const()[name = tensor<string, []>("hidden_states_3_pad_type_0"), val = tensor<string, []>("valid")];
174
+ tensor<int32, [2]> hidden_states_3_strides_0 = const()[name = tensor<string, []>("hidden_states_3_strides_0"), val = tensor<int32, [2]>([1, 1])];
175
+ tensor<int32, [4]> hidden_states_3_pad_0 = const()[name = tensor<string, []>("hidden_states_3_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
176
+ tensor<int32, [2]> hidden_states_3_dilations_0 = const()[name = tensor<string, []>("hidden_states_3_dilations_0"), val = tensor<int32, [2]>([1, 1])];
177
+ tensor<int32, []> hidden_states_3_groups_0 = const()[name = tensor<string, []>("hidden_states_3_groups_0"), val = tensor<int32, []>(1)];
178
+ tensor<fp16, [1280, 5120, 1, 1]> layers_0_fc2_weight_to_fp16 = const()[name = tensor<string, []>("layers_0_fc2_weight_to_fp16"), val = tensor<fp16, [1280, 5120, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(173293248)))];
179
+ tensor<fp16, [1280]> layers_0_fc2_bias_to_fp16 = const()[name = tensor<string, []>("layers_0_fc2_bias_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(186400512)))];
180
+ tensor<fp16, [1, 1280, 1, 1]> hidden_states_3_cast_fp16 = conv(bias = layers_0_fc2_bias_to_fp16, dilations = hidden_states_3_dilations_0, groups = hidden_states_3_groups_0, pad = hidden_states_3_pad_0, pad_type = hidden_states_3_pad_type_0, strides = hidden_states_3_strides_0, weight = layers_0_fc2_weight_to_fp16, x = input_9_cast_fp16)[name = tensor<string, []>("hidden_states_3_cast_fp16")];
181
+ tensor<fp16, [1, 1280, 1, 1]> inputs_7_cast_fp16 = add(x = inputs_5_cast_fp16, y = hidden_states_3_cast_fp16)[name = tensor<string, []>("inputs_7_cast_fp16")];
182
+ tensor<int32, []> var_270 = const()[name = tensor<string, []>("op_270"), val = tensor<int32, []>(3)];
183
+ tensor<int32, [1]> out_7_axes_0 = const()[name = tensor<string, []>("out_7_axes_0"), val = tensor<int32, [1]>([1])];
184
+ tensor<fp16, []> var_296_to_fp16 = const()[name = tensor<string, []>("op_296_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
185
+ tensor<fp16, [1, 1280, 1, 1]> out_7_cast_fp16 = layer_norm(axes = out_7_axes_0, epsilon = var_296_to_fp16, x = inputs_7_cast_fp16)[name = tensor<string, []>("out_7_cast_fp16")];
186
+ tensor<fp16, [1280]> obj_15_gamma_0_to_fp16 = const()[name = tensor<string, []>("obj_15_gamma_0_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(186403136)))];
187
+ tensor<fp16, [1280]> obj_15_beta_0_to_fp16 = const()[name = tensor<string, []>("obj_15_beta_0_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(186405760)))];
188
+ tensor<fp16, []> obj_15_epsilon_0_to_fp16 = const()[name = tensor<string, []>("obj_15_epsilon_0_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
189
+ tensor<fp16, [1, 1280, 1, 1]> obj_15_cast_fp16 = batch_norm(beta = obj_15_beta_0_to_fp16, epsilon = obj_15_epsilon_0_to_fp16, gamma = obj_15_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_7_cast_fp16)[name = tensor<string, []>("obj_15_cast_fp16")];
190
+ tensor<string, []> query_5_pad_type_0 = const()[name = tensor<string, []>("query_5_pad_type_0"), val = tensor<string, []>("valid")];
191
+ tensor<int32, [2]> query_5_strides_0 = const()[name = tensor<string, []>("query_5_strides_0"), val = tensor<int32, [2]>([1, 1])];
192
+ tensor<int32, [4]> query_5_pad_0 = const()[name = tensor<string, []>("query_5_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
193
+ tensor<int32, [2]> query_5_dilations_0 = const()[name = tensor<string, []>("query_5_dilations_0"), val = tensor<int32, [2]>([1, 1])];
194
+ tensor<int32, []> query_5_groups_0 = const()[name = tensor<string, []>("query_5_groups_0"), val = tensor<int32, []>(1)];
195
+ tensor<fp16, [1280, 1280, 1, 1]> layers_1_self_attn_q_proj_weight_to_fp16 = const()[name = tensor<string, []>("layers_1_self_attn_q_proj_weight_to_fp16"), val = tensor<fp16, [1280, 1280, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(186408384)))];
196
+ tensor<fp16, [1280]> layers_1_self_attn_q_proj_bias_to_fp16 = const()[name = tensor<string, []>("layers_1_self_attn_q_proj_bias_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(189685248)))];
197
+ tensor<fp16, [1, 1280, 1, 1]> query_5_cast_fp16 = conv(bias = layers_1_self_attn_q_proj_bias_to_fp16, dilations = query_5_dilations_0, groups = query_5_groups_0, pad = query_5_pad_0, pad_type = query_5_pad_type_0, strides = query_5_strides_0, weight = layers_1_self_attn_q_proj_weight_to_fp16, x = obj_15_cast_fp16)[name = tensor<string, []>("query_5_cast_fp16")];
198
+ tensor<string, []> current_key_pad_type_0 = const()[name = tensor<string, []>("current_key_pad_type_0"), val = tensor<string, []>("valid")];
199
+ tensor<int32, [2]> current_key_strides_0 = const()[name = tensor<string, []>("current_key_strides_0"), val = tensor<int32, [2]>([1, 1])];
200
+ tensor<int32, [4]> current_key_pad_0 = const()[name = tensor<string, []>("current_key_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
201
+ tensor<int32, [2]> current_key_dilations_0 = const()[name = tensor<string, []>("current_key_dilations_0"), val = tensor<int32, [2]>([1, 1])];
202
+ tensor<int32, []> current_key_groups_0 = const()[name = tensor<string, []>("current_key_groups_0"), val = tensor<int32, []>(1)];
203
+ tensor<fp16, [1280, 1280, 1, 1]> layers_1_self_attn_k_proj_weight_to_fp16 = const()[name = tensor<string, []>("layers_1_self_attn_k_proj_weight_to_fp16"), val = tensor<fp16, [1280, 1280, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(189687872)))];
204
+ tensor<fp16, [1, 1280, 1, 1]> current_key_cast_fp16 = conv(dilations = current_key_dilations_0, groups = current_key_groups_0, pad = current_key_pad_0, pad_type = current_key_pad_type_0, strides = current_key_strides_0, weight = layers_1_self_attn_k_proj_weight_to_fp16, x = obj_15_cast_fp16)[name = tensor<string, []>("current_key_cast_fp16")];
205
+ tensor<string, []> current_value_pad_type_0 = const()[name = tensor<string, []>("current_value_pad_type_0"), val = tensor<string, []>("valid")];
206
+ tensor<int32, [2]> current_value_strides_0 = const()[name = tensor<string, []>("current_value_strides_0"), val = tensor<int32, [2]>([1, 1])];
207
+ tensor<int32, [4]> current_value_pad_0 = const()[name = tensor<string, []>("current_value_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
208
+ tensor<int32, [2]> current_value_dilations_0 = const()[name = tensor<string, []>("current_value_dilations_0"), val = tensor<int32, [2]>([1, 1])];
209
+ tensor<int32, []> current_value_groups_0 = const()[name = tensor<string, []>("current_value_groups_0"), val = tensor<int32, []>(1)];
210
+ tensor<fp16, [1280, 1280, 1, 1]> layers_1_self_attn_v_proj_weight_to_fp16 = const()[name = tensor<string, []>("layers_1_self_attn_v_proj_weight_to_fp16"), val = tensor<fp16, [1280, 1280, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(192964736)))];
211
+ tensor<fp16, [1280]> layers_1_self_attn_v_proj_bias_to_fp16 = const()[name = tensor<string, []>("layers_1_self_attn_v_proj_bias_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(196241600)))];
212
+ tensor<fp16, [1, 1280, 1, 1]> current_value_cast_fp16 = conv(bias = layers_1_self_attn_v_proj_bias_to_fp16, dilations = current_value_dilations_0, groups = current_value_groups_0, pad = current_value_pad_0, pad_type = current_value_pad_type_0, strides = current_value_strides_0, weight = layers_1_self_attn_v_proj_weight_to_fp16, x = obj_15_cast_fp16)[name = tensor<string, []>("current_value_cast_fp16")];
213
+ tensor<fp16, [1, 1280, 1, 448]> var_334_cast_fp16 = mul(x = current_key_cast_fp16, y = var_118_cast_fp16)[name = tensor<string, []>("op_334_cast_fp16")];
214
+ tensor<fp16, [1, 1280, 1, 448]> var_336_cast_fp16 = mul(x = var_43_cast_fp16_1, y = var_121_cast_fp16)[name = tensor<string, []>("op_336_cast_fp16")];
215
+ tensor<fp16, [1, 1280, 1, 448]> key_5_cast_fp16 = add(x = var_334_cast_fp16, y = var_336_cast_fp16)[name = tensor<string, []>("key_5_cast_fp16")];
216
+ tensor<fp16, [1, 1280, 1, 448]> var_338_cast_fp16 = mul(x = current_value_cast_fp16, y = var_118_cast_fp16)[name = tensor<string, []>("op_338_cast_fp16")];
217
+ tensor<fp16, [1, 1280, 1, 448]> var_340_cast_fp16 = mul(x = var_48_cast_fp16_1, y = var_121_cast_fp16)[name = tensor<string, []>("op_340_cast_fp16")];
218
+ tensor<fp16, [1, 1280, 1, 448]> value_5_cast_fp16 = add(x = var_338_cast_fp16, y = var_340_cast_fp16)[name = tensor<string, []>("value_5_cast_fp16")];
219
+ tensor<int32, [4]> var_343 = const()[name = tensor<string, []>("op_343"), val = tensor<int32, [4]>([1, 20, 64, -1])];
220
+ tensor<fp16, [1, 20, 64, 1]> mh_q_5_cast_fp16 = reshape(shape = var_343, x = query_5_cast_fp16)[name = tensor<string, []>("mh_q_5_cast_fp16")];
221
+ tensor<fp16, []> var_345_to_fp16 = const()[name = tensor<string, []>("op_345_to_fp16"), val = tensor<fp16, []>(0x1p-3)];
222
+ tensor<fp16, [1, 20, 64, 1]> var_346_cast_fp16 = mul(x = mh_q_5_cast_fp16, y = var_345_to_fp16)[name = tensor<string, []>("op_346_cast_fp16")];
223
+ tensor<int32, [4]> var_347 = const()[name = tensor<string, []>("op_347"), val = tensor<int32, [4]>([1, 20, 64, -1])];
224
+ tensor<fp16, [1, 20, 64, 448]> var_348_cast_fp16 = reshape(shape = var_347, x = key_5_cast_fp16)[name = tensor<string, []>("op_348_cast_fp16")];
225
+ tensor<bool, []> mh_w_7_transpose_x_0 = const()[name = tensor<string, []>("mh_w_7_transpose_x_0"), val = tensor<bool, []>(true)];
226
+ tensor<bool, []> mh_w_7_transpose_y_0 = const()[name = tensor<string, []>("mh_w_7_transpose_y_0"), val = tensor<bool, []>(false)];
227
+ tensor<fp16, [1, 20, 1, 448]> mh_w_7_cast_fp16 = matmul(transpose_x = mh_w_7_transpose_x_0, transpose_y = mh_w_7_transpose_y_0, x = var_346_cast_fp16, y = var_348_cast_fp16)[name = tensor<string, []>("mh_w_7_cast_fp16")];
228
+ tensor<fp16, [1, 20, 1, 448]> mh_w_9_cast_fp16 = add(x = mh_w_7_cast_fp16, y = var_139_cast_fp16)[name = tensor<string, []>("mh_w_9_cast_fp16")];
229
+ tensor<fp16, [1, 20, 1, 448]> var_356_cast_fp16 = softmax(axis = var_270, x = mh_w_9_cast_fp16)[name = tensor<string, []>("op_356_cast_fp16")];
230
+ tensor<int32, [4]> var_357 = const()[name = tensor<string, []>("op_357"), val = tensor<int32, [4]>([1, 20, 64, -1])];
231
+ tensor<fp16, [1, 20, 64, 448]> var_358_cast_fp16 = reshape(shape = var_357, x = value_5_cast_fp16)[name = tensor<string, []>("op_358_cast_fp16")];
232
+ tensor<bool, []> attn_5_transpose_x_0 = const()[name = tensor<string, []>("attn_5_transpose_x_0"), val = tensor<bool, []>(false)];
233
+ tensor<bool, []> attn_5_transpose_y_0 = const()[name = tensor<string, []>("attn_5_transpose_y_0"), val = tensor<bool, []>(true)];
234
+ tensor<fp16, [1, 20, 64, 1]> attn_5_cast_fp16 = matmul(transpose_x = attn_5_transpose_x_0, transpose_y = attn_5_transpose_y_0, x = var_358_cast_fp16, y = var_356_cast_fp16)[name = tensor<string, []>("attn_5_cast_fp16")];
235
+ tensor<int32, [4]> var_361 = const()[name = tensor<string, []>("op_361"), val = tensor<int32, [4]>([1, 1280, 1, -1])];
236
+ tensor<fp16, [1, 1280, 1, 1]> input_11_cast_fp16 = reshape(shape = var_361, x = attn_5_cast_fp16)[name = tensor<string, []>("input_11_cast_fp16")];
237
+ tensor<string, []> obj_21_pad_type_0 = const()[name = tensor<string, []>("obj_21_pad_type_0"), val = tensor<string, []>("valid")];
238
+ tensor<int32, [2]> obj_21_strides_0 = const()[name = tensor<string, []>("obj_21_strides_0"), val = tensor<int32, [2]>([1, 1])];
239
+ tensor<int32, [4]> obj_21_pad_0 = const()[name = tensor<string, []>("obj_21_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
240
+ tensor<int32, [2]> obj_21_dilations_0 = const()[name = tensor<string, []>("obj_21_dilations_0"), val = tensor<int32, [2]>([1, 1])];
241
+ tensor<int32, []> obj_21_groups_0 = const()[name = tensor<string, []>("obj_21_groups_0"), val = tensor<int32, []>(1)];
242
+ tensor<fp16, [1280, 1280, 1, 1]> layers_1_self_attn_o_proj_weight_to_fp16 = const()[name = tensor<string, []>("layers_1_self_attn_o_proj_weight_to_fp16"), val = tensor<fp16, [1280, 1280, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(196244224)))];
243
+ tensor<fp16, [1280]> layers_1_self_attn_o_proj_bias_to_fp16 = const()[name = tensor<string, []>("layers_1_self_attn_o_proj_bias_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(199521088)))];
244
+ tensor<fp16, [1, 1280, 1, 1]> obj_21_cast_fp16 = conv(bias = layers_1_self_attn_o_proj_bias_to_fp16, dilations = obj_21_dilations_0, groups = obj_21_groups_0, pad = obj_21_pad_0, pad_type = obj_21_pad_type_0, strides = obj_21_strides_0, weight = layers_1_self_attn_o_proj_weight_to_fp16, x = input_11_cast_fp16)[name = tensor<string, []>("obj_21_cast_fp16")];
245
+ tensor<fp16, [1, 1280, 1, 1]> inputs_9_cast_fp16 = add(x = inputs_7_cast_fp16, y = obj_21_cast_fp16)[name = tensor<string, []>("inputs_9_cast_fp16")];
246
+ tensor<int32, [1]> out_9_axes_0 = const()[name = tensor<string, []>("out_9_axes_0"), val = tensor<int32, [1]>([1])];
247
+ tensor<fp16, []> var_383_to_fp16 = const()[name = tensor<string, []>("op_383_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
248
+ tensor<fp16, [1, 1280, 1, 1]> out_9_cast_fp16 = layer_norm(axes = out_9_axes_0, epsilon = var_383_to_fp16, x = inputs_9_cast_fp16)[name = tensor<string, []>("out_9_cast_fp16")];
249
+ tensor<fp16, [1280]> obj_23_gamma_0_to_fp16 = const()[name = tensor<string, []>("obj_23_gamma_0_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(199523712)))];
250
+ tensor<fp16, [1280]> obj_23_beta_0_to_fp16 = const()[name = tensor<string, []>("obj_23_beta_0_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(199526336)))];
251
+ tensor<fp16, []> obj_23_epsilon_0_to_fp16 = const()[name = tensor<string, []>("obj_23_epsilon_0_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
252
+ tensor<fp16, [1, 1280, 1, 1]> obj_23_cast_fp16 = batch_norm(beta = obj_23_beta_0_to_fp16, epsilon = obj_23_epsilon_0_to_fp16, gamma = obj_23_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_9_cast_fp16)[name = tensor<string, []>("obj_23_cast_fp16")];
253
+ tensor<string, []> query_pad_type_0 = const()[name = tensor<string, []>("query_pad_type_0"), val = tensor<string, []>("valid")];
254
+ tensor<int32, [2]> query_strides_0 = const()[name = tensor<string, []>("query_strides_0"), val = tensor<int32, [2]>([1, 1])];
255
+ tensor<int32, [4]> query_pad_0 = const()[name = tensor<string, []>("query_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
256
+ tensor<int32, [2]> query_dilations_0 = const()[name = tensor<string, []>("query_dilations_0"), val = tensor<int32, [2]>([1, 1])];
257
+ tensor<int32, []> query_groups_0 = const()[name = tensor<string, []>("query_groups_0"), val = tensor<int32, []>(1)];
258
+ tensor<fp16, [1280, 1280, 1, 1]> layers_1_encoder_attn_q_proj_weight_to_fp16 = const()[name = tensor<string, []>("layers_1_encoder_attn_q_proj_weight_to_fp16"), val = tensor<fp16, [1280, 1280, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(199528960)))];
259
+ tensor<fp16, [1280]> layers_1_encoder_attn_q_proj_bias_to_fp16 = const()[name = tensor<string, []>("layers_1_encoder_attn_q_proj_bias_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(202805824)))];
260
+ tensor<fp16, [1, 1280, 1, 1]> query_cast_fp16 = conv(bias = layers_1_encoder_attn_q_proj_bias_to_fp16, dilations = query_dilations_0, groups = query_groups_0, pad = query_pad_0, pad_type = query_pad_type_0, strides = query_strides_0, weight = layers_1_encoder_attn_q_proj_weight_to_fp16, x = obj_23_cast_fp16)[name = tensor<string, []>("query_cast_fp16")];
261
+ tensor<string, []> key_pad_type_0 = const()[name = tensor<string, []>("key_pad_type_0"), val = tensor<string, []>("valid")];
262
+ tensor<int32, [2]> key_strides_0 = const()[name = tensor<string, []>("key_strides_0"), val = tensor<int32, [2]>([1, 1])];
263
+ tensor<int32, [4]> key_pad_0 = const()[name = tensor<string, []>("key_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
264
+ tensor<int32, [2]> key_dilations_0 = const()[name = tensor<string, []>("key_dilations_0"), val = tensor<int32, [2]>([1, 1])];
265
+ tensor<int32, []> key_groups_0 = const()[name = tensor<string, []>("key_groups_0"), val = tensor<int32, []>(1)];
266
+ tensor<fp16, [1280, 1280, 1, 1]> layers_1_encoder_attn_k_proj_weight_to_fp16 = const()[name = tensor<string, []>("layers_1_encoder_attn_k_proj_weight_to_fp16"), val = tensor<fp16, [1280, 1280, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(202808448)))];
267
+ tensor<fp16, [1, 1280, 1, 1500]> key_cast_fp16 = conv(dilations = key_dilations_0, groups = key_groups_0, pad = key_pad_0, pad_type = key_pad_type_0, strides = key_strides_0, weight = layers_1_encoder_attn_k_proj_weight_to_fp16, x = encoder_output_embeds)[name = tensor<string, []>("key_cast_fp16")];
268
+ tensor<string, []> value_pad_type_0 = const()[name = tensor<string, []>("value_pad_type_0"), val = tensor<string, []>("valid")];
269
+ tensor<int32, [2]> value_strides_0 = const()[name = tensor<string, []>("value_strides_0"), val = tensor<int32, [2]>([1, 1])];
270
+ tensor<int32, [4]> value_pad_0 = const()[name = tensor<string, []>("value_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
271
+ tensor<int32, [2]> value_dilations_0 = const()[name = tensor<string, []>("value_dilations_0"), val = tensor<int32, [2]>([1, 1])];
272
+ tensor<int32, []> value_groups_0 = const()[name = tensor<string, []>("value_groups_0"), val = tensor<int32, []>(1)];
273
+ tensor<fp16, [1280, 1280, 1, 1]> layers_1_encoder_attn_v_proj_weight_to_fp16 = const()[name = tensor<string, []>("layers_1_encoder_attn_v_proj_weight_to_fp16"), val = tensor<fp16, [1280, 1280, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(206085312)))];
274
+ tensor<fp16, [1280]> layers_1_encoder_attn_v_proj_bias_to_fp16 = const()[name = tensor<string, []>("layers_1_encoder_attn_v_proj_bias_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(209362176)))];
275
+ tensor<fp16, [1, 1280, 1, 1500]> value_cast_fp16 = conv(bias = layers_1_encoder_attn_v_proj_bias_to_fp16, dilations = value_dilations_0, groups = value_groups_0, pad = value_pad_0, pad_type = value_pad_type_0, strides = value_strides_0, weight = layers_1_encoder_attn_v_proj_weight_to_fp16, x = encoder_output_embeds)[name = tensor<string, []>("value_cast_fp16")];
276
+ tensor<int32, [4]> var_418 = const()[name = tensor<string, []>("op_418"), val = tensor<int32, [4]>([1, 20, 64, -1])];
277
+ tensor<fp16, [1, 20, 64, 1]> mh_q_cast_fp16 = reshape(shape = var_418, x = query_cast_fp16)[name = tensor<string, []>("mh_q_cast_fp16")];
278
+ tensor<fp16, []> var_420_to_fp16 = const()[name = tensor<string, []>("op_420_to_fp16"), val = tensor<fp16, []>(0x1p-3)];
279
+ tensor<fp16, [1, 20, 64, 1]> var_421_cast_fp16 = mul(x = mh_q_cast_fp16, y = var_420_to_fp16)[name = tensor<string, []>("op_421_cast_fp16")];
280
+ tensor<int32, [4]> var_422 = const()[name = tensor<string, []>("op_422"), val = tensor<int32, [4]>([1, 20, 64, -1])];
281
+ tensor<fp16, [1, 20, 64, 1500]> var_423_cast_fp16 = reshape(shape = var_422, x = key_cast_fp16)[name = tensor<string, []>("op_423_cast_fp16")];
282
+ tensor<bool, []> mh_w_transpose_x_0 = const()[name = tensor<string, []>("mh_w_transpose_x_0"), val = tensor<bool, []>(true)];
283
+ tensor<bool, []> mh_w_transpose_y_0 = const()[name = tensor<string, []>("mh_w_transpose_y_0"), val = tensor<bool, []>(false)];
284
+ tensor<fp16, [1, 20, 1, 1500]> mh_w_cast_fp16 = matmul(transpose_x = mh_w_transpose_x_0, transpose_y = mh_w_transpose_y_0, x = var_421_cast_fp16, y = var_423_cast_fp16)[name = tensor<string, []>("mh_w_cast_fp16")];
285
+ tensor<fp16, [1, 20, 1, 1500]> obj_27_cast_fp16 = softmax(axis = var_270, x = mh_w_cast_fp16)[name = tensor<string, []>("obj_27_cast_fp16")];
286
+ tensor<int32, [4]> var_427 = const()[name = tensor<string, []>("op_427"), val = tensor<int32, [4]>([1, 20, 64, -1])];
287
+ tensor<fp16, [1, 20, 64, 1500]> var_428_cast_fp16 = reshape(shape = var_427, x = value_cast_fp16)[name = tensor<string, []>("op_428_cast_fp16")];
288
+ tensor<bool, []> attn_transpose_x_0 = const()[name = tensor<string, []>("attn_transpose_x_0"), val = tensor<bool, []>(false)];
289
+ tensor<bool, []> attn_transpose_y_0 = const()[name = tensor<string, []>("attn_transpose_y_0"), val = tensor<bool, []>(true)];
290
+ tensor<fp16, [1, 20, 64, 1]> attn_cast_fp16 = matmul(transpose_x = attn_transpose_x_0, transpose_y = attn_transpose_y_0, x = var_428_cast_fp16, y = obj_27_cast_fp16)[name = tensor<string, []>("attn_cast_fp16")];
291
+ tensor<int32, [4]> var_431 = const()[name = tensor<string, []>("op_431"), val = tensor<int32, [4]>([1, 1280, 1, -1])];
292
+ tensor<fp16, [1, 1280, 1, 1]> input_13_cast_fp16 = reshape(shape = var_431, x = attn_cast_fp16)[name = tensor<string, []>("input_13_cast_fp16")];
293
+ tensor<string, []> obj_25_pad_type_0 = const()[name = tensor<string, []>("obj_25_pad_type_0"), val = tensor<string, []>("valid")];
294
+ tensor<int32, [2]> obj_25_strides_0 = const()[name = tensor<string, []>("obj_25_strides_0"), val = tensor<int32, [2]>([1, 1])];
295
+ tensor<int32, [4]> obj_25_pad_0 = const()[name = tensor<string, []>("obj_25_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
296
+ tensor<int32, [2]> obj_25_dilations_0 = const()[name = tensor<string, []>("obj_25_dilations_0"), val = tensor<int32, [2]>([1, 1])];
297
+ tensor<int32, []> obj_25_groups_0 = const()[name = tensor<string, []>("obj_25_groups_0"), val = tensor<int32, []>(1)];
298
+ tensor<fp16, [1280, 1280, 1, 1]> layers_1_encoder_attn_o_proj_weight_to_fp16 = const()[name = tensor<string, []>("layers_1_encoder_attn_o_proj_weight_to_fp16"), val = tensor<fp16, [1280, 1280, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(209364800)))];
299
+ tensor<fp16, [1280]> layers_1_encoder_attn_o_proj_bias_to_fp16 = const()[name = tensor<string, []>("layers_1_encoder_attn_o_proj_bias_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(212641664)))];
300
+ tensor<fp16, [1, 1280, 1, 1]> obj_25_cast_fp16 = conv(bias = layers_1_encoder_attn_o_proj_bias_to_fp16, dilations = obj_25_dilations_0, groups = obj_25_groups_0, pad = obj_25_pad_0, pad_type = obj_25_pad_type_0, strides = obj_25_strides_0, weight = layers_1_encoder_attn_o_proj_weight_to_fp16, x = input_13_cast_fp16)[name = tensor<string, []>("obj_25_cast_fp16")];
301
+ tensor<fp16, [1, 1280, 1, 1]> inputs_11_cast_fp16 = add(x = inputs_9_cast_fp16, y = obj_25_cast_fp16)[name = tensor<string, []>("inputs_11_cast_fp16")];
302
+ tensor<int32, [1]> out_11_axes_0 = const()[name = tensor<string, []>("out_11_axes_0"), val = tensor<int32, [1]>([1])];
303
+ tensor<fp16, []> var_452_to_fp16 = const()[name = tensor<string, []>("op_452_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
304
+ tensor<fp16, [1, 1280, 1, 1]> out_11_cast_fp16 = layer_norm(axes = out_11_axes_0, epsilon = var_452_to_fp16, x = inputs_11_cast_fp16)[name = tensor<string, []>("out_11_cast_fp16")];
305
+ tensor<fp16, [1280]> input_15_gamma_0_to_fp16 = const()[name = tensor<string, []>("input_15_gamma_0_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(212644288)))];
306
+ tensor<fp16, [1280]> input_15_beta_0_to_fp16 = const()[name = tensor<string, []>("input_15_beta_0_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(212646912)))];
307
+ tensor<fp16, []> input_15_epsilon_0_to_fp16 = const()[name = tensor<string, []>("input_15_epsilon_0_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
308
+ tensor<fp16, [1, 1280, 1, 1]> input_15_cast_fp16 = batch_norm(beta = input_15_beta_0_to_fp16, epsilon = input_15_epsilon_0_to_fp16, gamma = input_15_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_11_cast_fp16)[name = tensor<string, []>("input_15_cast_fp16")];
309
+ tensor<string, []> input_17_pad_type_0 = const()[name = tensor<string, []>("input_17_pad_type_0"), val = tensor<string, []>("valid")];
310
+ tensor<int32, [2]> input_17_strides_0 = const()[name = tensor<string, []>("input_17_strides_0"), val = tensor<int32, [2]>([1, 1])];
311
+ tensor<int32, [4]> input_17_pad_0 = const()[name = tensor<string, []>("input_17_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
312
+ tensor<int32, [2]> input_17_dilations_0 = const()[name = tensor<string, []>("input_17_dilations_0"), val = tensor<int32, [2]>([1, 1])];
313
+ tensor<int32, []> input_17_groups_0 = const()[name = tensor<string, []>("input_17_groups_0"), val = tensor<int32, []>(1)];
314
+ tensor<fp16, [5120, 1280, 1, 1]> layers_1_fc1_weight_to_fp16 = const()[name = tensor<string, []>("layers_1_fc1_weight_to_fp16"), val = tensor<fp16, [5120, 1280, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(212649536)))];
315
+ tensor<fp16, [5120]> layers_1_fc1_bias_to_fp16 = const()[name = tensor<string, []>("layers_1_fc1_bias_to_fp16"), val = tensor<fp16, [5120]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(225756800)))];
316
+ tensor<fp16, [1, 5120, 1, 1]> input_17_cast_fp16 = conv(bias = layers_1_fc1_bias_to_fp16, dilations = input_17_dilations_0, groups = input_17_groups_0, pad = input_17_pad_0, pad_type = input_17_pad_type_0, strides = input_17_strides_0, weight = layers_1_fc1_weight_to_fp16, x = input_15_cast_fp16)[name = tensor<string, []>("input_17_cast_fp16")];
317
+ tensor<string, []> input_mode_0 = const()[name = tensor<string, []>("input_mode_0"), val = tensor<string, []>("EXACT")];
318
+ tensor<fp16, [1, 5120, 1, 1]> input_cast_fp16 = gelu(mode = input_mode_0, x = input_17_cast_fp16)[name = tensor<string, []>("input_cast_fp16")];
319
+ tensor<string, []> hidden_states_5_pad_type_0 = const()[name = tensor<string, []>("hidden_states_5_pad_type_0"), val = tensor<string, []>("valid")];
320
+ tensor<int32, [2]> hidden_states_5_strides_0 = const()[name = tensor<string, []>("hidden_states_5_strides_0"), val = tensor<int32, [2]>([1, 1])];
321
+ tensor<int32, [4]> hidden_states_5_pad_0 = const()[name = tensor<string, []>("hidden_states_5_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
322
+ tensor<int32, [2]> hidden_states_5_dilations_0 = const()[name = tensor<string, []>("hidden_states_5_dilations_0"), val = tensor<int32, [2]>([1, 1])];
323
+ tensor<int32, []> hidden_states_5_groups_0 = const()[name = tensor<string, []>("hidden_states_5_groups_0"), val = tensor<int32, []>(1)];
324
+ tensor<fp16, [1280, 5120, 1, 1]> layers_1_fc2_weight_to_fp16 = const()[name = tensor<string, []>("layers_1_fc2_weight_to_fp16"), val = tensor<fp16, [1280, 5120, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(225767104)))];
325
+ tensor<fp16, [1280]> layers_1_fc2_bias_to_fp16 = const()[name = tensor<string, []>("layers_1_fc2_bias_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(238874368)))];
326
+ tensor<fp16, [1, 1280, 1, 1]> hidden_states_5_cast_fp16 = conv(bias = layers_1_fc2_bias_to_fp16, dilations = hidden_states_5_dilations_0, groups = hidden_states_5_groups_0, pad = hidden_states_5_pad_0, pad_type = hidden_states_5_pad_type_0, strides = hidden_states_5_strides_0, weight = layers_1_fc2_weight_to_fp16, x = input_cast_fp16)[name = tensor<string, []>("hidden_states_5_cast_fp16")];
327
+ tensor<fp16, [1, 1280, 1, 1]> inputs_cast_fp16 = add(x = inputs_11_cast_fp16, y = hidden_states_5_cast_fp16)[name = tensor<string, []>("inputs_cast_fp16")];
328
+ tensor<int32, [1]> out_axes_0 = const()[name = tensor<string, []>("out_axes_0"), val = tensor<int32, [1]>([1])];
329
+ tensor<fp16, []> var_495_to_fp16 = const()[name = tensor<string, []>("op_495_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
330
+ tensor<fp16, [1, 1280, 1, 1]> out_cast_fp16 = layer_norm(axes = out_axes_0, epsilon = var_495_to_fp16, x = inputs_cast_fp16)[name = tensor<string, []>("out_cast_fp16")];
331
+ tensor<fp16, [1280]> hidden_states_gamma_0_to_fp16 = const()[name = tensor<string, []>("hidden_states_gamma_0_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(238876992)))];
332
+ tensor<fp16, [1280]> hidden_states_beta_0_to_fp16 = const()[name = tensor<string, []>("hidden_states_beta_0_to_fp16"), val = tensor<fp16, [1280]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(238879616)))];
333
+ tensor<fp16, []> hidden_states_epsilon_0_to_fp16 = const()[name = tensor<string, []>("hidden_states_epsilon_0_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
334
+ tensor<fp16, [1, 1280, 1, 1]> hidden_states_cast_fp16 = batch_norm(beta = hidden_states_beta_0_to_fp16, epsilon = hidden_states_epsilon_0_to_fp16, gamma = hidden_states_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_cast_fp16)[name = tensor<string, []>("hidden_states_cast_fp16")];
335
+ tensor<int32, [1]> var_506_axes_0 = const()[name = tensor<string, []>("op_506_axes_0"), val = tensor<int32, [1]>([2])];
336
+ tensor<fp16, [1, 1280, 1]> var_506_cast_fp16 = squeeze(axes = var_506_axes_0, x = hidden_states_cast_fp16)[name = tensor<string, []>("op_506_cast_fp16")];
337
+ tensor<int32, [3]> var_509_perm_0 = const()[name = tensor<string, []>("op_509_perm_0"), val = tensor<int32, [3]>([0, 2, 1])];
338
+ tensor<fp16, [51866]> linear_0_bias_0_to_fp16 = const()[name = tensor<string, []>("linear_0_bias_0_to_fp16"), val = tensor<fp16, [51866]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(238882240)))];
339
+ tensor<fp16, [1, 1, 1280]> var_509_cast_fp16 = transpose(perm = var_509_perm_0, x = var_506_cast_fp16)[name = tensor<string, []>("transpose_0")];
340
+ tensor<fp16, [1, 1, 51866]> logits = linear(bias = linear_0_bias_0_to_fp16, weight = embed_tokens_weight_to_fp16, x = var_509_cast_fp16)[name = tensor<string, []>("linear_0_cast_fp16")];
341
+ tensor<int32, []> var_513 = const()[name = tensor<string, []>("op_513"), val = tensor<int32, []>(1)];
342
+ tensor<bool, []> obj_31_interleave_0 = const()[name = tensor<string, []>("obj_31_interleave_0"), val = tensor<bool, []>(false)];
343
+ tensor<fp16, [1, 2560, 1, 1]> key_cache_updates = concat(axis = var_513, interleave = obj_31_interleave_0, values = (current_key_1_cast_fp16, current_key_cast_fp16))[name = tensor<string, []>("obj_31_cast_fp16")];
344
+ tensor<int32, []> var_516 = const()[name = tensor<string, []>("op_516"), val = tensor<int32, []>(1)];
345
+ tensor<bool, []> obj_33_interleave_0 = const()[name = tensor<string, []>("obj_33_interleave_0"), val = tensor<bool, []>(false)];
346
+ tensor<fp16, [1, 2560, 1, 1]> value_cache_updates = concat(axis = var_516, interleave = obj_33_interleave_0, values = (current_value_1_cast_fp16, current_value_cast_fp16))[name = tensor<string, []>("obj_33_cast_fp16")];
347
+ tensor<int32, [4]> var_527_begin_0 = const()[name = tensor<string, []>("op_527_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
348
+ tensor<int32, [4]> var_527_end_0 = const()[name = tensor<string, []>("op_527_end_0"), val = tensor<int32, [4]>([1, 1, 1, 1500])];
349
+ tensor<bool, [4]> var_527_end_mask_0 = const()[name = tensor<string, []>("op_527_end_mask_0"), val = tensor<bool, [4]>([true, false, true, true])];
350
+ tensor<fp16, [1, 1, 1, 1500]> var_527_cast_fp16 = slice_by_index(begin = var_527_begin_0, end = var_527_end_0, end_mask = var_527_end_mask_0, x = obj_27_cast_fp16)[name = tensor<string, []>("op_527_cast_fp16")];
351
+ tensor<int32, [4]> var_530_begin_0 = const()[name = tensor<string, []>("op_530_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
352
+ tensor<int32, [4]> var_530_end_0 = const()[name = tensor<string, []>("op_530_end_0"), val = tensor<int32, [4]>([1, 1, 1, 1500])];
353
+ tensor<bool, [4]> var_530_end_mask_0 = const()[name = tensor<string, []>("op_530_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
354
+ tensor<bool, [4]> var_530_squeeze_mask_0 = const()[name = tensor<string, []>("op_530_squeeze_mask_0"), val = tensor<bool, [4]>([false, false, true, false])];
355
+ tensor<fp16, [1, 1, 1500]> var_530_cast_fp16 = slice_by_index(begin = var_530_begin_0, end = var_530_end_0, end_mask = var_530_end_mask_0, squeeze_mask = var_530_squeeze_mask_0, x = var_527_cast_fp16)[name = tensor<string, []>("op_530_cast_fp16")];
356
+ tensor<int32, [4]> var_545_begin_0 = const()[name = tensor<string, []>("op_545_begin_0"), val = tensor<int32, [4]>([0, 1, 0, 0])];
357
+ tensor<int32, [4]> var_545_end_0 = const()[name = tensor<string, []>("op_545_end_0"), val = tensor<int32, [4]>([1, 2, 1, 1500])];
358
+ tensor<bool, [4]> var_545_end_mask_0 = const()[name = tensor<string, []>("op_545_end_mask_0"), val = tensor<bool, [4]>([true, false, true, true])];
359
+ tensor<fp16, [1, 1, 1, 1500]> var_545_cast_fp16 = slice_by_index(begin = var_545_begin_0, end = var_545_end_0, end_mask = var_545_end_mask_0, x = obj_27_cast_fp16)[name = tensor<string, []>("op_545_cast_fp16")];
360
+ tensor<int32, [4]> var_548_begin_0 = const()[name = tensor<string, []>("op_548_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
361
+ tensor<int32, [4]> var_548_end_0 = const()[name = tensor<string, []>("op_548_end_0"), val = tensor<int32, [4]>([1, 1, 1, 1500])];
362
+ tensor<bool, [4]> var_548_end_mask_0 = const()[name = tensor<string, []>("op_548_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
363
+ tensor<bool, [4]> var_548_squeeze_mask_0 = const()[name = tensor<string, []>("op_548_squeeze_mask_0"), val = tensor<bool, [4]>([false, false, true, false])];
364
+ tensor<fp16, [1, 1, 1500]> var_548_cast_fp16 = slice_by_index(begin = var_548_begin_0, end = var_548_end_0, end_mask = var_548_end_mask_0, squeeze_mask = var_548_squeeze_mask_0, x = var_545_cast_fp16)[name = tensor<string, []>("op_548_cast_fp16")];
365
+ tensor<int32, [4]> var_563_begin_0 = const()[name = tensor<string, []>("op_563_begin_0"), val = tensor<int32, [4]>([0, 2, 0, 0])];
366
+ tensor<int32, [4]> var_563_end_0 = const()[name = tensor<string, []>("op_563_end_0"), val = tensor<int32, [4]>([1, 3, 1, 1500])];
367
+ tensor<bool, [4]> var_563_end_mask_0 = const()[name = tensor<string, []>("op_563_end_mask_0"), val = tensor<bool, [4]>([true, false, true, true])];
368
+ tensor<fp16, [1, 1, 1, 1500]> var_563_cast_fp16 = slice_by_index(begin = var_563_begin_0, end = var_563_end_0, end_mask = var_563_end_mask_0, x = obj_27_cast_fp16)[name = tensor<string, []>("op_563_cast_fp16")];
369
+ tensor<int32, [4]> var_566_begin_0 = const()[name = tensor<string, []>("op_566_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
370
+ tensor<int32, [4]> var_566_end_0 = const()[name = tensor<string, []>("op_566_end_0"), val = tensor<int32, [4]>([1, 1, 1, 1500])];
371
+ tensor<bool, [4]> var_566_end_mask_0 = const()[name = tensor<string, []>("op_566_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
372
+ tensor<bool, [4]> var_566_squeeze_mask_0 = const()[name = tensor<string, []>("op_566_squeeze_mask_0"), val = tensor<bool, [4]>([false, false, true, false])];
373
+ tensor<fp16, [1, 1, 1500]> var_566_cast_fp16 = slice_by_index(begin = var_566_begin_0, end = var_566_end_0, end_mask = var_566_end_mask_0, squeeze_mask = var_566_squeeze_mask_0, x = var_563_cast_fp16)[name = tensor<string, []>("op_566_cast_fp16")];
374
+ tensor<int32, [4]> var_581_begin_0 = const()[name = tensor<string, []>("op_581_begin_0"), val = tensor<int32, [4]>([0, 3, 0, 0])];
375
+ tensor<int32, [4]> var_581_end_0 = const()[name = tensor<string, []>("op_581_end_0"), val = tensor<int32, [4]>([1, 4, 1, 1500])];
376
+ tensor<bool, [4]> var_581_end_mask_0 = const()[name = tensor<string, []>("op_581_end_mask_0"), val = tensor<bool, [4]>([true, false, true, true])];
377
+ tensor<fp16, [1, 1, 1, 1500]> var_581_cast_fp16 = slice_by_index(begin = var_581_begin_0, end = var_581_end_0, end_mask = var_581_end_mask_0, x = obj_27_cast_fp16)[name = tensor<string, []>("op_581_cast_fp16")];
378
+ tensor<int32, [4]> var_584_begin_0 = const()[name = tensor<string, []>("op_584_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
379
+ tensor<int32, [4]> var_584_end_0 = const()[name = tensor<string, []>("op_584_end_0"), val = tensor<int32, [4]>([1, 1, 1, 1500])];
380
+ tensor<bool, [4]> var_584_end_mask_0 = const()[name = tensor<string, []>("op_584_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
381
+ tensor<bool, [4]> var_584_squeeze_mask_0 = const()[name = tensor<string, []>("op_584_squeeze_mask_0"), val = tensor<bool, [4]>([false, false, true, false])];
382
+ tensor<fp16, [1, 1, 1500]> var_584_cast_fp16 = slice_by_index(begin = var_584_begin_0, end = var_584_end_0, end_mask = var_584_end_mask_0, squeeze_mask = var_584_squeeze_mask_0, x = var_581_cast_fp16)[name = tensor<string, []>("op_584_cast_fp16")];
383
+ tensor<int32, [4]> var_599_begin_0 = const()[name = tensor<string, []>("op_599_begin_0"), val = tensor<int32, [4]>([0, 4, 0, 0])];
384
+ tensor<int32, [4]> var_599_end_0 = const()[name = tensor<string, []>("op_599_end_0"), val = tensor<int32, [4]>([1, 5, 1, 1500])];
385
+ tensor<bool, [4]> var_599_end_mask_0 = const()[name = tensor<string, []>("op_599_end_mask_0"), val = tensor<bool, [4]>([true, false, true, true])];
386
+ tensor<fp16, [1, 1, 1, 1500]> var_599_cast_fp16 = slice_by_index(begin = var_599_begin_0, end = var_599_end_0, end_mask = var_599_end_mask_0, x = obj_27_cast_fp16)[name = tensor<string, []>("op_599_cast_fp16")];
387
+ tensor<int32, [4]> var_602_begin_0 = const()[name = tensor<string, []>("op_602_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
388
+ tensor<int32, [4]> var_602_end_0 = const()[name = tensor<string, []>("op_602_end_0"), val = tensor<int32, [4]>([1, 1, 1, 1500])];
389
+ tensor<bool, [4]> var_602_end_mask_0 = const()[name = tensor<string, []>("op_602_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
390
+ tensor<bool, [4]> var_602_squeeze_mask_0 = const()[name = tensor<string, []>("op_602_squeeze_mask_0"), val = tensor<bool, [4]>([false, false, true, false])];
391
+ tensor<fp16, [1, 1, 1500]> var_602_cast_fp16 = slice_by_index(begin = var_602_begin_0, end = var_602_end_0, end_mask = var_602_end_mask_0, squeeze_mask = var_602_squeeze_mask_0, x = var_599_cast_fp16)[name = tensor<string, []>("op_602_cast_fp16")];
392
+ tensor<int32, [4]> var_617_begin_0 = const()[name = tensor<string, []>("op_617_begin_0"), val = tensor<int32, [4]>([0, 5, 0, 0])];
393
+ tensor<int32, [4]> var_617_end_0 = const()[name = tensor<string, []>("op_617_end_0"), val = tensor<int32, [4]>([1, 6, 1, 1500])];
394
+ tensor<bool, [4]> var_617_end_mask_0 = const()[name = tensor<string, []>("op_617_end_mask_0"), val = tensor<bool, [4]>([true, false, true, true])];
395
+ tensor<fp16, [1, 1, 1, 1500]> var_617_cast_fp16 = slice_by_index(begin = var_617_begin_0, end = var_617_end_0, end_mask = var_617_end_mask_0, x = obj_27_cast_fp16)[name = tensor<string, []>("op_617_cast_fp16")];
396
+ tensor<int32, [4]> var_620_begin_0 = const()[name = tensor<string, []>("op_620_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
397
+ tensor<int32, [4]> var_620_end_0 = const()[name = tensor<string, []>("op_620_end_0"), val = tensor<int32, [4]>([1, 1, 1, 1500])];
398
+ tensor<bool, [4]> var_620_end_mask_0 = const()[name = tensor<string, []>("op_620_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
399
+ tensor<bool, [4]> var_620_squeeze_mask_0 = const()[name = tensor<string, []>("op_620_squeeze_mask_0"), val = tensor<bool, [4]>([false, false, true, false])];
400
+ tensor<fp16, [1, 1, 1500]> var_620_cast_fp16 = slice_by_index(begin = var_620_begin_0, end = var_620_end_0, end_mask = var_620_end_mask_0, squeeze_mask = var_620_squeeze_mask_0, x = var_617_cast_fp16)[name = tensor<string, []>("op_620_cast_fp16")];
401
+ tensor<int32, [4]> var_635_begin_0 = const()[name = tensor<string, []>("op_635_begin_0"), val = tensor<int32, [4]>([0, 6, 0, 0])];
402
+ tensor<int32, [4]> var_635_end_0 = const()[name = tensor<string, []>("op_635_end_0"), val = tensor<int32, [4]>([1, 7, 1, 1500])];
403
+ tensor<bool, [4]> var_635_end_mask_0 = const()[name = tensor<string, []>("op_635_end_mask_0"), val = tensor<bool, [4]>([true, false, true, true])];
404
+ tensor<fp16, [1, 1, 1, 1500]> var_635_cast_fp16 = slice_by_index(begin = var_635_begin_0, end = var_635_end_0, end_mask = var_635_end_mask_0, x = obj_27_cast_fp16)[name = tensor<string, []>("op_635_cast_fp16")];
405
+ tensor<int32, [4]> var_638_begin_0 = const()[name = tensor<string, []>("op_638_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
406
+ tensor<int32, [4]> var_638_end_0 = const()[name = tensor<string, []>("op_638_end_0"), val = tensor<int32, [4]>([1, 1, 1, 1500])];
407
+ tensor<bool, [4]> var_638_end_mask_0 = const()[name = tensor<string, []>("op_638_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
408
+ tensor<bool, [4]> var_638_squeeze_mask_0 = const()[name = tensor<string, []>("op_638_squeeze_mask_0"), val = tensor<bool, [4]>([false, false, true, false])];
409
+ tensor<fp16, [1, 1, 1500]> var_638_cast_fp16 = slice_by_index(begin = var_638_begin_0, end = var_638_end_0, end_mask = var_638_end_mask_0, squeeze_mask = var_638_squeeze_mask_0, x = var_635_cast_fp16)[name = tensor<string, []>("op_638_cast_fp16")];
410
+ tensor<int32, [4]> var_653_begin_0 = const()[name = tensor<string, []>("op_653_begin_0"), val = tensor<int32, [4]>([0, 7, 0, 0])];
411
+ tensor<int32, [4]> var_653_end_0 = const()[name = tensor<string, []>("op_653_end_0"), val = tensor<int32, [4]>([1, 8, 1, 1500])];
412
+ tensor<bool, [4]> var_653_end_mask_0 = const()[name = tensor<string, []>("op_653_end_mask_0"), val = tensor<bool, [4]>([true, false, true, true])];
413
+ tensor<fp16, [1, 1, 1, 1500]> var_653_cast_fp16 = slice_by_index(begin = var_653_begin_0, end = var_653_end_0, end_mask = var_653_end_mask_0, x = obj_27_cast_fp16)[name = tensor<string, []>("op_653_cast_fp16")];
414
+ tensor<int32, [4]> var_656_begin_0 = const()[name = tensor<string, []>("op_656_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
415
+ tensor<int32, [4]> var_656_end_0 = const()[name = tensor<string, []>("op_656_end_0"), val = tensor<int32, [4]>([1, 1, 1, 1500])];
416
+ tensor<bool, [4]> var_656_end_mask_0 = const()[name = tensor<string, []>("op_656_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
417
+ tensor<bool, [4]> var_656_squeeze_mask_0 = const()[name = tensor<string, []>("op_656_squeeze_mask_0"), val = tensor<bool, [4]>([false, false, true, false])];
418
+ tensor<fp16, [1, 1, 1500]> var_656_cast_fp16 = slice_by_index(begin = var_656_begin_0, end = var_656_end_0, end_mask = var_656_end_mask_0, squeeze_mask = var_656_squeeze_mask_0, x = var_653_cast_fp16)[name = tensor<string, []>("op_656_cast_fp16")];
419
+ tensor<int32, [4]> var_671_begin_0 = const()[name = tensor<string, []>("op_671_begin_0"), val = tensor<int32, [4]>([0, 8, 0, 0])];
420
+ tensor<int32, [4]> var_671_end_0 = const()[name = tensor<string, []>("op_671_end_0"), val = tensor<int32, [4]>([1, 9, 1, 1500])];
421
+ tensor<bool, [4]> var_671_end_mask_0 = const()[name = tensor<string, []>("op_671_end_mask_0"), val = tensor<bool, [4]>([true, false, true, true])];
422
+ tensor<fp16, [1, 1, 1, 1500]> var_671_cast_fp16 = slice_by_index(begin = var_671_begin_0, end = var_671_end_0, end_mask = var_671_end_mask_0, x = obj_27_cast_fp16)[name = tensor<string, []>("op_671_cast_fp16")];
423
+ tensor<int32, [4]> var_674_begin_0 = const()[name = tensor<string, []>("op_674_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
424
+ tensor<int32, [4]> var_674_end_0 = const()[name = tensor<string, []>("op_674_end_0"), val = tensor<int32, [4]>([1, 1, 1, 1500])];
425
+ tensor<bool, [4]> var_674_end_mask_0 = const()[name = tensor<string, []>("op_674_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
426
+ tensor<bool, [4]> var_674_squeeze_mask_0 = const()[name = tensor<string, []>("op_674_squeeze_mask_0"), val = tensor<bool, [4]>([false, false, true, false])];
427
+ tensor<fp16, [1, 1, 1500]> var_674_cast_fp16 = slice_by_index(begin = var_674_begin_0, end = var_674_end_0, end_mask = var_674_end_mask_0, squeeze_mask = var_674_squeeze_mask_0, x = var_671_cast_fp16)[name = tensor<string, []>("op_674_cast_fp16")];
428
+ tensor<int32, [4]> var_689_begin_0 = const()[name = tensor<string, []>("op_689_begin_0"), val = tensor<int32, [4]>([0, 9, 0, 0])];
429
+ tensor<int32, [4]> var_689_end_0 = const()[name = tensor<string, []>("op_689_end_0"), val = tensor<int32, [4]>([1, 10, 1, 1500])];
430
+ tensor<bool, [4]> var_689_end_mask_0 = const()[name = tensor<string, []>("op_689_end_mask_0"), val = tensor<bool, [4]>([true, false, true, true])];
431
+ tensor<fp16, [1, 1, 1, 1500]> var_689_cast_fp16 = slice_by_index(begin = var_689_begin_0, end = var_689_end_0, end_mask = var_689_end_mask_0, x = obj_27_cast_fp16)[name = tensor<string, []>("op_689_cast_fp16")];
432
+ tensor<int32, [4]> var_692_begin_0 = const()[name = tensor<string, []>("op_692_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
433
+ tensor<int32, [4]> var_692_end_0 = const()[name = tensor<string, []>("op_692_end_0"), val = tensor<int32, [4]>([1, 1, 1, 1500])];
434
+ tensor<bool, [4]> var_692_end_mask_0 = const()[name = tensor<string, []>("op_692_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
435
+ tensor<bool, [4]> var_692_squeeze_mask_0 = const()[name = tensor<string, []>("op_692_squeeze_mask_0"), val = tensor<bool, [4]>([false, false, true, false])];
436
+ tensor<fp16, [1, 1, 1500]> var_692_cast_fp16 = slice_by_index(begin = var_692_begin_0, end = var_692_end_0, end_mask = var_692_end_mask_0, squeeze_mask = var_692_squeeze_mask_0, x = var_689_cast_fp16)[name = tensor<string, []>("op_692_cast_fp16")];
437
+ tensor<int32, [4]> var_707_begin_0 = const()[name = tensor<string, []>("op_707_begin_0"), val = tensor<int32, [4]>([0, 10, 0, 0])];
438
+ tensor<int32, [4]> var_707_end_0 = const()[name = tensor<string, []>("op_707_end_0"), val = tensor<int32, [4]>([1, 11, 1, 1500])];
439
+ tensor<bool, [4]> var_707_end_mask_0 = const()[name = tensor<string, []>("op_707_end_mask_0"), val = tensor<bool, [4]>([true, false, true, true])];
440
+ tensor<fp16, [1, 1, 1, 1500]> var_707_cast_fp16 = slice_by_index(begin = var_707_begin_0, end = var_707_end_0, end_mask = var_707_end_mask_0, x = obj_27_cast_fp16)[name = tensor<string, []>("op_707_cast_fp16")];
441
+ tensor<int32, [4]> var_710_begin_0 = const()[name = tensor<string, []>("op_710_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
442
+ tensor<int32, [4]> var_710_end_0 = const()[name = tensor<string, []>("op_710_end_0"), val = tensor<int32, [4]>([1, 1, 1, 1500])];
443
+ tensor<bool, [4]> var_710_end_mask_0 = const()[name = tensor<string, []>("op_710_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
444
+ tensor<bool, [4]> var_710_squeeze_mask_0 = const()[name = tensor<string, []>("op_710_squeeze_mask_0"), val = tensor<bool, [4]>([false, false, true, false])];
445
+ tensor<fp16, [1, 1, 1500]> var_710_cast_fp16 = slice_by_index(begin = var_710_begin_0, end = var_710_end_0, end_mask = var_710_end_mask_0, squeeze_mask = var_710_squeeze_mask_0, x = var_707_cast_fp16)[name = tensor<string, []>("op_710_cast_fp16")];
446
+ tensor<int32, [4]> var_725_begin_0 = const()[name = tensor<string, []>("op_725_begin_0"), val = tensor<int32, [4]>([0, 11, 0, 0])];
447
+ tensor<int32, [4]> var_725_end_0 = const()[name = tensor<string, []>("op_725_end_0"), val = tensor<int32, [4]>([1, 12, 1, 1500])];
448
+ tensor<bool, [4]> var_725_end_mask_0 = const()[name = tensor<string, []>("op_725_end_mask_0"), val = tensor<bool, [4]>([true, false, true, true])];
449
+ tensor<fp16, [1, 1, 1, 1500]> var_725_cast_fp16 = slice_by_index(begin = var_725_begin_0, end = var_725_end_0, end_mask = var_725_end_mask_0, x = obj_27_cast_fp16)[name = tensor<string, []>("op_725_cast_fp16")];
450
+ tensor<int32, [4]> var_728_begin_0 = const()[name = tensor<string, []>("op_728_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
451
+ tensor<int32, [4]> var_728_end_0 = const()[name = tensor<string, []>("op_728_end_0"), val = tensor<int32, [4]>([1, 1, 1, 1500])];
452
+ tensor<bool, [4]> var_728_end_mask_0 = const()[name = tensor<string, []>("op_728_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
453
+ tensor<bool, [4]> var_728_squeeze_mask_0 = const()[name = tensor<string, []>("op_728_squeeze_mask_0"), val = tensor<bool, [4]>([false, false, true, false])];
454
+ tensor<fp16, [1, 1, 1500]> var_728_cast_fp16 = slice_by_index(begin = var_728_begin_0, end = var_728_end_0, end_mask = var_728_end_mask_0, squeeze_mask = var_728_squeeze_mask_0, x = var_725_cast_fp16)[name = tensor<string, []>("op_728_cast_fp16")];
455
+ tensor<int32, [4]> var_743_begin_0 = const()[name = tensor<string, []>("op_743_begin_0"), val = tensor<int32, [4]>([0, 12, 0, 0])];
456
+ tensor<int32, [4]> var_743_end_0 = const()[name = tensor<string, []>("op_743_end_0"), val = tensor<int32, [4]>([1, 13, 1, 1500])];
457
+ tensor<bool, [4]> var_743_end_mask_0 = const()[name = tensor<string, []>("op_743_end_mask_0"), val = tensor<bool, [4]>([true, false, true, true])];
458
+ tensor<fp16, [1, 1, 1, 1500]> var_743_cast_fp16 = slice_by_index(begin = var_743_begin_0, end = var_743_end_0, end_mask = var_743_end_mask_0, x = obj_27_cast_fp16)[name = tensor<string, []>("op_743_cast_fp16")];
459
+ tensor<int32, [4]> var_746_begin_0 = const()[name = tensor<string, []>("op_746_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
460
+ tensor<int32, [4]> var_746_end_0 = const()[name = tensor<string, []>("op_746_end_0"), val = tensor<int32, [4]>([1, 1, 1, 1500])];
461
+ tensor<bool, [4]> var_746_end_mask_0 = const()[name = tensor<string, []>("op_746_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
462
+ tensor<bool, [4]> var_746_squeeze_mask_0 = const()[name = tensor<string, []>("op_746_squeeze_mask_0"), val = tensor<bool, [4]>([false, false, true, false])];
463
+ tensor<fp16, [1, 1, 1500]> var_746_cast_fp16 = slice_by_index(begin = var_746_begin_0, end = var_746_end_0, end_mask = var_746_end_mask_0, squeeze_mask = var_746_squeeze_mask_0, x = var_743_cast_fp16)[name = tensor<string, []>("op_746_cast_fp16")];
464
+ tensor<int32, [4]> var_761_begin_0 = const()[name = tensor<string, []>("op_761_begin_0"), val = tensor<int32, [4]>([0, 13, 0, 0])];
465
+ tensor<int32, [4]> var_761_end_0 = const()[name = tensor<string, []>("op_761_end_0"), val = tensor<int32, [4]>([1, 14, 1, 1500])];
466
+ tensor<bool, [4]> var_761_end_mask_0 = const()[name = tensor<string, []>("op_761_end_mask_0"), val = tensor<bool, [4]>([true, false, true, true])];
467
+ tensor<fp16, [1, 1, 1, 1500]> var_761_cast_fp16 = slice_by_index(begin = var_761_begin_0, end = var_761_end_0, end_mask = var_761_end_mask_0, x = obj_27_cast_fp16)[name = tensor<string, []>("op_761_cast_fp16")];
468
+ tensor<int32, [4]> var_764_begin_0 = const()[name = tensor<string, []>("op_764_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
469
+ tensor<int32, [4]> var_764_end_0 = const()[name = tensor<string, []>("op_764_end_0"), val = tensor<int32, [4]>([1, 1, 1, 1500])];
470
+ tensor<bool, [4]> var_764_end_mask_0 = const()[name = tensor<string, []>("op_764_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
471
+ tensor<bool, [4]> var_764_squeeze_mask_0 = const()[name = tensor<string, []>("op_764_squeeze_mask_0"), val = tensor<bool, [4]>([false, false, true, false])];
472
+ tensor<fp16, [1, 1, 1500]> var_764_cast_fp16 = slice_by_index(begin = var_764_begin_0, end = var_764_end_0, end_mask = var_764_end_mask_0, squeeze_mask = var_764_squeeze_mask_0, x = var_761_cast_fp16)[name = tensor<string, []>("op_764_cast_fp16")];
473
+ tensor<int32, [4]> var_779_begin_0 = const()[name = tensor<string, []>("op_779_begin_0"), val = tensor<int32, [4]>([0, 14, 0, 0])];
474
+ tensor<int32, [4]> var_779_end_0 = const()[name = tensor<string, []>("op_779_end_0"), val = tensor<int32, [4]>([1, 15, 1, 1500])];
475
+ tensor<bool, [4]> var_779_end_mask_0 = const()[name = tensor<string, []>("op_779_end_mask_0"), val = tensor<bool, [4]>([true, false, true, true])];
476
+ tensor<fp16, [1, 1, 1, 1500]> var_779_cast_fp16 = slice_by_index(begin = var_779_begin_0, end = var_779_end_0, end_mask = var_779_end_mask_0, x = obj_27_cast_fp16)[name = tensor<string, []>("op_779_cast_fp16")];
477
+ tensor<int32, [4]> var_782_begin_0 = const()[name = tensor<string, []>("op_782_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
478
+ tensor<int32, [4]> var_782_end_0 = const()[name = tensor<string, []>("op_782_end_0"), val = tensor<int32, [4]>([1, 1, 1, 1500])];
479
+ tensor<bool, [4]> var_782_end_mask_0 = const()[name = tensor<string, []>("op_782_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
480
+ tensor<bool, [4]> var_782_squeeze_mask_0 = const()[name = tensor<string, []>("op_782_squeeze_mask_0"), val = tensor<bool, [4]>([false, false, true, false])];
481
+ tensor<fp16, [1, 1, 1500]> var_782_cast_fp16 = slice_by_index(begin = var_782_begin_0, end = var_782_end_0, end_mask = var_782_end_mask_0, squeeze_mask = var_782_squeeze_mask_0, x = var_779_cast_fp16)[name = tensor<string, []>("op_782_cast_fp16")];
482
+ tensor<int32, [4]> var_797_begin_0 = const()[name = tensor<string, []>("op_797_begin_0"), val = tensor<int32, [4]>([0, 15, 0, 0])];
483
+ tensor<int32, [4]> var_797_end_0 = const()[name = tensor<string, []>("op_797_end_0"), val = tensor<int32, [4]>([1, 16, 1, 1500])];
484
+ tensor<bool, [4]> var_797_end_mask_0 = const()[name = tensor<string, []>("op_797_end_mask_0"), val = tensor<bool, [4]>([true, false, true, true])];
485
+ tensor<fp16, [1, 1, 1, 1500]> var_797_cast_fp16 = slice_by_index(begin = var_797_begin_0, end = var_797_end_0, end_mask = var_797_end_mask_0, x = obj_27_cast_fp16)[name = tensor<string, []>("op_797_cast_fp16")];
486
+ tensor<int32, [4]> var_800_begin_0 = const()[name = tensor<string, []>("op_800_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
487
+ tensor<int32, [4]> var_800_end_0 = const()[name = tensor<string, []>("op_800_end_0"), val = tensor<int32, [4]>([1, 1, 1, 1500])];
488
+ tensor<bool, [4]> var_800_end_mask_0 = const()[name = tensor<string, []>("op_800_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
489
+ tensor<bool, [4]> var_800_squeeze_mask_0 = const()[name = tensor<string, []>("op_800_squeeze_mask_0"), val = tensor<bool, [4]>([false, false, true, false])];
490
+ tensor<fp16, [1, 1, 1500]> var_800_cast_fp16 = slice_by_index(begin = var_800_begin_0, end = var_800_end_0, end_mask = var_800_end_mask_0, squeeze_mask = var_800_squeeze_mask_0, x = var_797_cast_fp16)[name = tensor<string, []>("op_800_cast_fp16")];
491
+ tensor<int32, [4]> var_815_begin_0 = const()[name = tensor<string, []>("op_815_begin_0"), val = tensor<int32, [4]>([0, 16, 0, 0])];
492
+ tensor<int32, [4]> var_815_end_0 = const()[name = tensor<string, []>("op_815_end_0"), val = tensor<int32, [4]>([1, 17, 1, 1500])];
493
+ tensor<bool, [4]> var_815_end_mask_0 = const()[name = tensor<string, []>("op_815_end_mask_0"), val = tensor<bool, [4]>([true, false, true, true])];
494
+ tensor<fp16, [1, 1, 1, 1500]> var_815_cast_fp16 = slice_by_index(begin = var_815_begin_0, end = var_815_end_0, end_mask = var_815_end_mask_0, x = obj_27_cast_fp16)[name = tensor<string, []>("op_815_cast_fp16")];
495
+ tensor<int32, [4]> var_818_begin_0 = const()[name = tensor<string, []>("op_818_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
496
+ tensor<int32, [4]> var_818_end_0 = const()[name = tensor<string, []>("op_818_end_0"), val = tensor<int32, [4]>([1, 1, 1, 1500])];
497
+ tensor<bool, [4]> var_818_end_mask_0 = const()[name = tensor<string, []>("op_818_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
498
+ tensor<bool, [4]> var_818_squeeze_mask_0 = const()[name = tensor<string, []>("op_818_squeeze_mask_0"), val = tensor<bool, [4]>([false, false, true, false])];
499
+ tensor<fp16, [1, 1, 1500]> var_818_cast_fp16 = slice_by_index(begin = var_818_begin_0, end = var_818_end_0, end_mask = var_818_end_mask_0, squeeze_mask = var_818_squeeze_mask_0, x = var_815_cast_fp16)[name = tensor<string, []>("op_818_cast_fp16")];
500
+ tensor<int32, [4]> var_833_begin_0 = const()[name = tensor<string, []>("op_833_begin_0"), val = tensor<int32, [4]>([0, 17, 0, 0])];
501
+ tensor<int32, [4]> var_833_end_0 = const()[name = tensor<string, []>("op_833_end_0"), val = tensor<int32, [4]>([1, 18, 1, 1500])];
502
+ tensor<bool, [4]> var_833_end_mask_0 = const()[name = tensor<string, []>("op_833_end_mask_0"), val = tensor<bool, [4]>([true, false, true, true])];
503
+ tensor<fp16, [1, 1, 1, 1500]> var_833_cast_fp16 = slice_by_index(begin = var_833_begin_0, end = var_833_end_0, end_mask = var_833_end_mask_0, x = obj_27_cast_fp16)[name = tensor<string, []>("op_833_cast_fp16")];
504
+ tensor<int32, [4]> var_836_begin_0 = const()[name = tensor<string, []>("op_836_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
505
+ tensor<int32, [4]> var_836_end_0 = const()[name = tensor<string, []>("op_836_end_0"), val = tensor<int32, [4]>([1, 1, 1, 1500])];
506
+ tensor<bool, [4]> var_836_end_mask_0 = const()[name = tensor<string, []>("op_836_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
507
+ tensor<bool, [4]> var_836_squeeze_mask_0 = const()[name = tensor<string, []>("op_836_squeeze_mask_0"), val = tensor<bool, [4]>([false, false, true, false])];
508
+ tensor<fp16, [1, 1, 1500]> var_836_cast_fp16 = slice_by_index(begin = var_836_begin_0, end = var_836_end_0, end_mask = var_836_end_mask_0, squeeze_mask = var_836_squeeze_mask_0, x = var_833_cast_fp16)[name = tensor<string, []>("op_836_cast_fp16")];
509
+ tensor<int32, [4]> var_851_begin_0 = const()[name = tensor<string, []>("op_851_begin_0"), val = tensor<int32, [4]>([0, 18, 0, 0])];
510
+ tensor<int32, [4]> var_851_end_0 = const()[name = tensor<string, []>("op_851_end_0"), val = tensor<int32, [4]>([1, 19, 1, 1500])];
511
+ tensor<bool, [4]> var_851_end_mask_0 = const()[name = tensor<string, []>("op_851_end_mask_0"), val = tensor<bool, [4]>([true, false, true, true])];
512
+ tensor<fp16, [1, 1, 1, 1500]> var_851_cast_fp16 = slice_by_index(begin = var_851_begin_0, end = var_851_end_0, end_mask = var_851_end_mask_0, x = obj_27_cast_fp16)[name = tensor<string, []>("op_851_cast_fp16")];
513
+ tensor<int32, [4]> var_854_begin_0 = const()[name = tensor<string, []>("op_854_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
514
+ tensor<int32, [4]> var_854_end_0 = const()[name = tensor<string, []>("op_854_end_0"), val = tensor<int32, [4]>([1, 1, 1, 1500])];
515
+ tensor<bool, [4]> var_854_end_mask_0 = const()[name = tensor<string, []>("op_854_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
516
+ tensor<bool, [4]> var_854_squeeze_mask_0 = const()[name = tensor<string, []>("op_854_squeeze_mask_0"), val = tensor<bool, [4]>([false, false, true, false])];
517
+ tensor<fp16, [1, 1, 1500]> var_854_cast_fp16 = slice_by_index(begin = var_854_begin_0, end = var_854_end_0, end_mask = var_854_end_mask_0, squeeze_mask = var_854_squeeze_mask_0, x = var_851_cast_fp16)[name = tensor<string, []>("op_854_cast_fp16")];
518
+ tensor<int32, [4]> var_869_begin_0 = const()[name = tensor<string, []>("op_869_begin_0"), val = tensor<int32, [4]>([0, 19, 0, 0])];
519
+ tensor<int32, [4]> var_869_end_0 = const()[name = tensor<string, []>("op_869_end_0"), val = tensor<int32, [4]>([1, 20, 1, 1500])];
520
+ tensor<bool, [4]> var_869_end_mask_0 = const()[name = tensor<string, []>("op_869_end_mask_0"), val = tensor<bool, [4]>([true, false, true, true])];
521
+ tensor<fp16, [1, 1, 1, 1500]> var_869_cast_fp16 = slice_by_index(begin = var_869_begin_0, end = var_869_end_0, end_mask = var_869_end_mask_0, x = obj_27_cast_fp16)[name = tensor<string, []>("op_869_cast_fp16")];
522
+ tensor<int32, [4]> var_872_begin_0 = const()[name = tensor<string, []>("op_872_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
523
+ tensor<int32, [4]> var_872_end_0 = const()[name = tensor<string, []>("op_872_end_0"), val = tensor<int32, [4]>([1, 1, 1, 1500])];
524
+ tensor<bool, [4]> var_872_end_mask_0 = const()[name = tensor<string, []>("op_872_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
525
+ tensor<bool, [4]> var_872_squeeze_mask_0 = const()[name = tensor<string, []>("op_872_squeeze_mask_0"), val = tensor<bool, [4]>([false, false, true, false])];
526
+ tensor<fp16, [1, 1, 1500]> var_872_cast_fp16 = slice_by_index(begin = var_872_begin_0, end = var_872_end_0, end_mask = var_872_end_mask_0, squeeze_mask = var_872_squeeze_mask_0, x = var_869_cast_fp16)[name = tensor<string, []>("op_872_cast_fp16")];
527
+ tensor<int32, []> var_879 = const()[name = tensor<string, []>("op_879"), val = tensor<int32, []>(1)];
528
+ tensor<bool, []> var_880_interleave_0 = const()[name = tensor<string, []>("op_880_interleave_0"), val = tensor<bool, []>(false)];
529
+ tensor<fp16, [1, 20, 1500]> var_880_cast_fp16 = concat(axis = var_879, interleave = var_880_interleave_0, values = (var_530_cast_fp16, var_548_cast_fp16, var_566_cast_fp16, var_584_cast_fp16, var_602_cast_fp16, var_620_cast_fp16, var_638_cast_fp16, var_656_cast_fp16, var_674_cast_fp16, var_692_cast_fp16, var_710_cast_fp16, var_728_cast_fp16, var_746_cast_fp16, var_764_cast_fp16, var_782_cast_fp16, var_800_cast_fp16, var_818_cast_fp16, var_836_cast_fp16, var_854_cast_fp16, var_872_cast_fp16))[name = tensor<string, []>("op_880_cast_fp16")];
530
+ tensor<bool, []> var_883 = const()[name = tensor<string, []>("op_883"), val = tensor<bool, []>(false)];
531
+ tensor<int32, [1]> obj_axes_0 = const()[name = tensor<string, []>("obj_axes_0"), val = tensor<int32, [1]>([1])];
532
+ tensor<fp16, [1, 1500]> alignment_heads_weights = reduce_mean(axes = obj_axes_0, keep_dims = var_883, x = var_880_cast_fp16)[name = tensor<string, []>("obj_cast_fp16")];
533
+ } -> (logits, key_cache_updates, value_cache_updates, alignment_heads_weights);
534
+ }
dearyoungjo_Whisper-Medicalv1CNB1/TextDecoder.mlmodelc/weights/weight.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:855c2c8dbed9d1ffe84bf538708ca5c1363bfa712448a7c49a708dc6a3d02391
3
+ size 238986036
dearyoungjo_Whisper-Medicalv1CNB1/config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"_name_or_path": "./distil-large-v3", "activation_dropout": 0.0, "activation_function": "gelu", "apply_spec_augment": false, "architectures": ["WhisperForConditionalGeneration"], "attention_dropout": 0.0, "begin_suppress_tokens": [220, 50257], "bos_token_id": 50257, "classifier_proj_size": 256, "d_model": 1280, "decoder_attention_heads": 20, "decoder_ffn_dim": 5120, "decoder_layerdrop": 0.0, "decoder_layers": 2, "decoder_start_token_id": 50258, "dropout": 0.0, "encoder_attention_heads": 20, "encoder_ffn_dim": 5120, "encoder_layerdrop": 0.0, "encoder_layers": 32, "eos_token_id": 50257, "init_std": 0.02, "is_encoder_decoder": true, "mask_feature_length": 10, "mask_feature_min_masks": 0, "mask_feature_prob": 0.0, "mask_time_length": 10, "mask_time_min_masks": 2, "mask_time_prob": 0.05, "max_length": 448, "max_source_positions": 1500, "max_target_positions": 448, "median_filter_width": 7, "model_type": "whisper", "num_hidden_layers": 32, "num_mel_bins": 128, "pad_token_id": 50256, "scale_embedding": false, "torch_dtype": "float16", "transformers_version": "4.38.0.dev0", "use_cache": true, "use_weighted_layer_sum": false, "vocab_size": 51866}
dearyoungjo_Whisper-Medicalv1CNB1/generation_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"alignment_heads": [[1, 0], [1, 1], [1, 2], [1, 3], [1, 4], [1, 5], [1, 6], [1, 7], [1, 8], [1, 9], [1, 10], [1, 11], [1, 12], [1, 13], [1, 14], [1, 15], [1, 16], [1, 17], [1, 18], [1, 19]], "begin_suppress_tokens": [220, 50257], "bos_token_id": 50257, "decoder_start_token_id": 50258, "eos_token_id": 50257, "forced_decoder_ids": [[1, null], [2, 50360]], "is_multilingual": true, "lang_to_id": {"<|af|>": 50327, "<|am|>": 50334, "<|ar|>": 50272, "<|as|>": 50350, "<|az|>": 50304, "<|ba|>": 50355, "<|be|>": 50330, "<|bg|>": 50292, "<|bn|>": 50302, "<|bo|>": 50347, "<|br|>": 50309, "<|bs|>": 50315, "<|ca|>": 50270, "<|cs|>": 50283, "<|cy|>": 50297, "<|da|>": 50285, "<|de|>": 50261, "<|el|>": 50281, "<|en|>": 50259, "<|es|>": 50262, "<|et|>": 50307, "<|eu|>": 50310, "<|fa|>": 50300, "<|fi|>": 50277, "<|fo|>": 50338, "<|fr|>": 50265, "<|gl|>": 50319, "<|gu|>": 50333, "<|haw|>": 50352, "<|ha|>": 50354, "<|he|>": 50279, "<|hi|>": 50276, "<|hr|>": 50291, "<|ht|>": 50339, "<|hu|>": 50286, "<|hy|>": 50312, "<|id|>": 50275, "<|is|>": 50311, "<|it|>": 50274, "<|ja|>": 50266, "<|jw|>": 50356, "<|ka|>": 50329, "<|kk|>": 50316, "<|km|>": 50323, "<|kn|>": 50306, "<|ko|>": 50264, "<|la|>": 50294, "<|lb|>": 50345, "<|ln|>": 50353, "<|lo|>": 50336, "<|lt|>": 50293, "<|lv|>": 50301, "<|mg|>": 50349, "<|mi|>": 50295, "<|mk|>": 50308, "<|ml|>": 50296, "<|mn|>": 50314, "<|mr|>": 50320, "<|ms|>": 50282, "<|mt|>": 50343, "<|my|>": 50346, "<|ne|>": 50313, "<|nl|>": 50271, "<|nn|>": 50342, "<|no|>": 50288, "<|oc|>": 50328, "<|pa|>": 50321, "<|pl|>": 50269, "<|ps|>": 50340, "<|pt|>": 50267, "<|ro|>": 50284, "<|ru|>": 50263, "<|sa|>": 50344, "<|sd|>": 50332, "<|si|>": 50322, "<|sk|>": 50298, "<|sl|>": 50305, "<|sn|>": 50324, "<|so|>": 50326, "<|sq|>": 50317, "<|sr|>": 50303, "<|su|>": 50357, "<|sv|>": 50273, "<|sw|>": 50318, "<|ta|>": 50287, "<|te|>": 50299, "<|tg|>": 50331, "<|th|>": 50289, "<|tk|>": 50341, "<|tl|>": 50348, "<|tr|>": 50268, "<|tt|>": 50351, "<|uk|>": 50280, "<|ur|>": 50290, "<|uz|>": 50337, "<|vi|>": 50278, "<|yi|>": 50335, "<|yo|>": 50325, "<|yue|>": 50358, "<|zh|>": 50260}, "language": "<|en|>", "max_initial_timestamp_index": 50, "max_length": 448, "no_timestamps_token_id": 50364, "pad_token_id": 50257, "prev_sot_token_id": 50362, "return_timestamps": false, "suppress_tokens": [1, 2, 7, 8, 9, 10, 14, 25, 26, 27, 28, 29, 31, 58, 59, 60, 61, 62, 63, 90, 91, 92, 93, 359, 503, 522, 542, 873, 893, 902, 918, 922, 931, 1350, 1853, 1982, 2460, 2627, 3246, 3253, 3268, 3536, 3846, 3961, 4183, 4667, 6585, 6647, 7273, 9061, 9383, 10428, 10929, 11938, 12033, 12331, 12562, 13793, 14157, 14635, 15265, 15618, 16553, 16604, 18362, 18956, 20075, 21675, 22520, 26130, 26161, 26435, 28279, 29464, 31650, 32302, 32470, 36865, 42863, 47425, 49870, 50254, 50258, 50359, 50360, 50361, 50362, 50363], "task": "transcribe", "task_to_id": {"transcribe": 50360, "translate": 50359}, "transformers_version": "4.40.0.dev0"}