Add model
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- Llama-2-7b-hf_chunk1.mlmodelc/analytics/coremldata.bin +3 -0
- Llama-2-7b-hf_chunk1.mlmodelc/coremldata.bin +3 -0
- Llama-2-7b-hf_chunk1.mlmodelc/metadata.json +104 -0
- Llama-2-7b-hf_chunk1.mlmodelc/model.mil +48 -0
- Llama-2-7b-hf_chunk1.mlmodelc/weights/weight.bin +3 -0
- Llama-2-7b-hf_chunk10.mlmodelc/analytics/coremldata.bin +3 -0
- Llama-2-7b-hf_chunk10.mlmodelc/coremldata.bin +3 -0
- Llama-2-7b-hf_chunk10.mlmodelc/metadata.json +218 -0
- Llama-2-7b-hf_chunk10.mlmodelc/model.mil +429 -0
- Llama-2-7b-hf_chunk10.mlmodelc/weights/weight.bin +3 -0
- Llama-2-7b-hf_chunk11.mlmodelc/analytics/coremldata.bin +3 -0
- Llama-2-7b-hf_chunk11.mlmodelc/coremldata.bin +3 -0
- Llama-2-7b-hf_chunk11.mlmodelc/metadata.json +218 -0
- Llama-2-7b-hf_chunk11.mlmodelc/model.mil +429 -0
- Llama-2-7b-hf_chunk11.mlmodelc/weights/weight.bin +3 -0
- Llama-2-7b-hf_chunk12.mlmodelc/analytics/coremldata.bin +3 -0
- Llama-2-7b-hf_chunk12.mlmodelc/coremldata.bin +3 -0
- Llama-2-7b-hf_chunk12.mlmodelc/metadata.json +178 -0
- Llama-2-7b-hf_chunk12.mlmodelc/model.mil +288 -0
- Llama-2-7b-hf_chunk12.mlmodelc/weights/weight.bin +3 -0
- Llama-2-7b-hf_chunk13.mlmodelc/analytics/coremldata.bin +3 -0
- Llama-2-7b-hf_chunk13.mlmodelc/coremldata.bin +3 -0
- Llama-2-7b-hf_chunk13.mlmodelc/metadata.json +65 -0
- Llama-2-7b-hf_chunk13.mlmodelc/model.mil +38 -0
- Llama-2-7b-hf_chunk13.mlmodelc/weights/weight.bin +3 -0
- Llama-2-7b-hf_chunk2.mlmodelc/analytics/coremldata.bin +3 -0
- Llama-2-7b-hf_chunk2.mlmodelc/coremldata.bin +3 -0
- Llama-2-7b-hf_chunk2.mlmodelc/metadata.json +218 -0
- Llama-2-7b-hf_chunk2.mlmodelc/model.mil +429 -0
- Llama-2-7b-hf_chunk2.mlmodelc/weights/weight.bin +3 -0
- Llama-2-7b-hf_chunk3.mlmodelc/analytics/coremldata.bin +3 -0
- Llama-2-7b-hf_chunk3.mlmodelc/coremldata.bin +3 -0
- Llama-2-7b-hf_chunk3.mlmodelc/metadata.json +218 -0
- Llama-2-7b-hf_chunk3.mlmodelc/model.mil +429 -0
- Llama-2-7b-hf_chunk3.mlmodelc/weights/weight.bin +3 -0
- Llama-2-7b-hf_chunk4.mlmodelc/analytics/coremldata.bin +3 -0
- Llama-2-7b-hf_chunk4.mlmodelc/coremldata.bin +3 -0
- Llama-2-7b-hf_chunk4.mlmodelc/metadata.json +218 -0
- Llama-2-7b-hf_chunk4.mlmodelc/model.mil +429 -0
- Llama-2-7b-hf_chunk4.mlmodelc/weights/weight.bin +3 -0
- Llama-2-7b-hf_chunk5.mlmodelc/analytics/coremldata.bin +3 -0
- Llama-2-7b-hf_chunk5.mlmodelc/coremldata.bin +3 -0
- Llama-2-7b-hf_chunk5.mlmodelc/metadata.json +218 -0
- Llama-2-7b-hf_chunk5.mlmodelc/model.mil +429 -0
- Llama-2-7b-hf_chunk5.mlmodelc/weights/weight.bin +3 -0
- Llama-2-7b-hf_chunk6.mlmodelc/analytics/coremldata.bin +3 -0
- Llama-2-7b-hf_chunk6.mlmodelc/coremldata.bin +3 -0
- Llama-2-7b-hf_chunk6.mlmodelc/metadata.json +218 -0
- Llama-2-7b-hf_chunk6.mlmodelc/model.mil +429 -0
- Llama-2-7b-hf_chunk6.mlmodelc/weights/weight.bin +3 -0
Llama-2-7b-hf_chunk1.mlmodelc/analytics/coremldata.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2184893e48a9da76b01012a32cca3e2ebfd4080553daa78318fe2391679dd7fe
|
3 |
+
size 243
|
Llama-2-7b-hf_chunk1.mlmodelc/coremldata.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8dbc016e9274c2a01d9eddb55dfd163a8ae74e7e97f0932268602c1a8b14903c
|
3 |
+
size 407
|
Llama-2-7b-hf_chunk1.mlmodelc/metadata.json
ADDED
@@ -0,0 +1,104 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[
|
2 |
+
{
|
3 |
+
"metadataOutputVersion" : "3.0",
|
4 |
+
"storagePrecision" : "Float16",
|
5 |
+
"outputSchema" : [
|
6 |
+
{
|
7 |
+
"hasShapeFlexibility" : "0",
|
8 |
+
"isOptional" : "0",
|
9 |
+
"dataType" : "Float16",
|
10 |
+
"formattedType" : "MultiArray (Float16 1 × 4096 × 1 × 64)",
|
11 |
+
"shortDescription" : "",
|
12 |
+
"shape" : "[1, 4096, 1, 64]",
|
13 |
+
"name" : "x",
|
14 |
+
"type" : "MultiArray"
|
15 |
+
},
|
16 |
+
{
|
17 |
+
"hasShapeFlexibility" : "0",
|
18 |
+
"isOptional" : "0",
|
19 |
+
"dataType" : "Float16",
|
20 |
+
"formattedType" : "MultiArray (Float16 128 × 64)",
|
21 |
+
"shortDescription" : "",
|
22 |
+
"shape" : "[128, 64]",
|
23 |
+
"name" : "cos",
|
24 |
+
"type" : "MultiArray"
|
25 |
+
},
|
26 |
+
{
|
27 |
+
"hasShapeFlexibility" : "0",
|
28 |
+
"isOptional" : "0",
|
29 |
+
"dataType" : "Float16",
|
30 |
+
"formattedType" : "MultiArray (Float16 128 × 64)",
|
31 |
+
"shortDescription" : "",
|
32 |
+
"shape" : "[128, 64]",
|
33 |
+
"name" : "sin",
|
34 |
+
"type" : "MultiArray"
|
35 |
+
},
|
36 |
+
{
|
37 |
+
"hasShapeFlexibility" : "0",
|
38 |
+
"isOptional" : "0",
|
39 |
+
"dataType" : "Float16",
|
40 |
+
"formattedType" : "MultiArray (Float16 1 × 1 × 64 × 512)",
|
41 |
+
"shortDescription" : "",
|
42 |
+
"shape" : "[1, 1, 64, 512]",
|
43 |
+
"name" : "mask",
|
44 |
+
"type" : "MultiArray"
|
45 |
+
}
|
46 |
+
],
|
47 |
+
"modelParameters" : [
|
48 |
+
|
49 |
+
],
|
50 |
+
"specificationVersion" : 7,
|
51 |
+
"mlProgramOperationTypeHistogram" : {
|
52 |
+
"Select" : 2,
|
53 |
+
"Tile" : 2,
|
54 |
+
"Ios16.sub" : 3,
|
55 |
+
"Transpose" : 1,
|
56 |
+
"Ios16.gather" : 3,
|
57 |
+
"ExpandDims" : 4,
|
58 |
+
"Ios16.maximum" : 1,
|
59 |
+
"Ios16.less" : 2
|
60 |
+
},
|
61 |
+
"computePrecision" : "Mixed (Float16, Int32)",
|
62 |
+
"isUpdatable" : "0",
|
63 |
+
"availability" : {
|
64 |
+
"macOS" : "13.0",
|
65 |
+
"tvOS" : "16.0",
|
66 |
+
"visionOS" : "1.0",
|
67 |
+
"watchOS" : "9.0",
|
68 |
+
"iOS" : "16.0",
|
69 |
+
"macCatalyst" : "16.0"
|
70 |
+
},
|
71 |
+
"modelType" : {
|
72 |
+
"name" : "MLModelType_mlProgram"
|
73 |
+
},
|
74 |
+
"userDefinedMetadata" : {
|
75 |
+
"com.github.apple.coremltools.source_dialect" : "TorchScript",
|
76 |
+
"com.github.apple.coremltools.source" : "torch==2.1.0",
|
77 |
+
"com.github.apple.coremltools.version" : "7.2"
|
78 |
+
},
|
79 |
+
"inputSchema" : [
|
80 |
+
{
|
81 |
+
"hasShapeFlexibility" : "0",
|
82 |
+
"isOptional" : "0",
|
83 |
+
"dataType" : "Int32",
|
84 |
+
"formattedType" : "MultiArray (Int32 1 × 64)",
|
85 |
+
"shortDescription" : "",
|
86 |
+
"shape" : "[1, 64]",
|
87 |
+
"name" : "input_ids",
|
88 |
+
"type" : "MultiArray"
|
89 |
+
},
|
90 |
+
{
|
91 |
+
"hasShapeFlexibility" : "0",
|
92 |
+
"isOptional" : "0",
|
93 |
+
"dataType" : "Int32",
|
94 |
+
"formattedType" : "MultiArray (Int32 1)",
|
95 |
+
"shortDescription" : "",
|
96 |
+
"shape" : "[1]",
|
97 |
+
"name" : "full_sequence_length",
|
98 |
+
"type" : "MultiArray"
|
99 |
+
}
|
100 |
+
],
|
101 |
+
"generatedClassName" : "Llama_2_7b_hf_2024_05_25_14_03_55_chunk1",
|
102 |
+
"method" : "predict"
|
103 |
+
}
|
104 |
+
]
|
Llama-2-7b-hf_chunk1.mlmodelc/model.mil
ADDED
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
program(1.0)
|
2 |
+
[buildInfo = dict<tensor<string, []>, tensor<string, []>>({{"coremlc-component-MIL", "5.33.5"}, {"coremlc-version", "1877.40.3"}, {"coremltools-component-torch", "2.1.0"}, {"coremltools-source-dialect", "TorchScript"}, {"coremltools-version", "7.2"}})]
|
3 |
+
{
|
4 |
+
func main<ios16>(tensor<int32, [1]> full_sequence_length, tensor<int32, [1, 64]> input_ids) {
|
5 |
+
tensor<int32, [1]> T = const()[name = tensor<string, []>("T"), val = tensor<int32, [1]>([64])];
|
6 |
+
tensor<int32, []> x_axis_0 = const()[name = tensor<string, []>("x_axis_0"), val = tensor<int32, []>(0)];
|
7 |
+
tensor<int32, []> x_batch_dims_0 = const()[name = tensor<string, []>("x_batch_dims_0"), val = tensor<int32, []>(0)];
|
8 |
+
tensor<fp16, [32000, 4096]> wte_weight_to_fp16 = const()[name = tensor<string, []>("wte_weight_to_fp16"), val = tensor<fp16, [32000, 4096]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(64)))];
|
9 |
+
tensor<fp16, [1, 64, 4096]> x_cast_fp16 = gather(axis = x_axis_0, batch_dims = x_batch_dims_0, indices = input_ids, x = wte_weight_to_fp16)[name = tensor<string, []>("x_cast_fp16")];
|
10 |
+
tensor<int32, [3]> var_16_perm_0 = const()[name = tensor<string, []>("op_16_perm_0"), val = tensor<int32, [3]>([0, 2, 1])];
|
11 |
+
tensor<int32, [1]> var_18_axes_0 = const()[name = tensor<string, []>("op_18_axes_0"), val = tensor<int32, [1]>([2])];
|
12 |
+
tensor<fp16, [1, 4096, 64]> transpose_0 = transpose(perm = var_16_perm_0, x = x_cast_fp16)[name = tensor<string, []>("transpose_0")];
|
13 |
+
tensor<fp16, [1, 4096, 1, 64]> x = expand_dims(axes = var_18_axes_0, x = transpose_0)[name = tensor<string, []>("op_18_cast_fp16")];
|
14 |
+
tensor<int32, [1]> pos_offset = sub(x = T, y = full_sequence_length)[name = tensor<string, []>("pos_offset")];
|
15 |
+
tensor<int32, [64]> var_26 = const()[name = tensor<string, []>("op_26"), val = tensor<int32, [64]>([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63])];
|
16 |
+
tensor<int32, [64]> input_pos_1 = sub(x = var_26, y = pos_offset)[name = tensor<string, []>("input_pos_1")];
|
17 |
+
tensor<int32, [64]> var_34 = const()[name = tensor<string, []>("op_34"), val = tensor<int32, [64]>([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])];
|
18 |
+
tensor<int32, [64]> input_pos = maximum(x = input_pos_1, y = var_34)[name = tensor<string, []>("input_pos")];
|
19 |
+
tensor<int32, []> var_45 = const()[name = tensor<string, []>("op_45"), val = tensor<int32, []>(1)];
|
20 |
+
tensor<int32, []> var_46_batch_dims_0 = const()[name = tensor<string, []>("op_46_batch_dims_0"), val = tensor<int32, []>(0)];
|
21 |
+
tensor<fp16, [128, 512]> var_44_to_fp16 = const()[name = tensor<string, []>("op_44_to_fp16"), val = tensor<fp16, [128, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(262144128)))];
|
22 |
+
tensor<fp16, [128, 64]> cos = gather(axis = var_45, batch_dims = var_46_batch_dims_0, indices = input_pos, x = var_44_to_fp16)[name = tensor<string, []>("op_46_cast_fp16")];
|
23 |
+
tensor<int32, []> var_56 = const()[name = tensor<string, []>("op_56"), val = tensor<int32, []>(1)];
|
24 |
+
tensor<int32, []> var_57_batch_dims_0 = const()[name = tensor<string, []>("op_57_batch_dims_0"), val = tensor<int32, []>(0)];
|
25 |
+
tensor<fp16, [128, 512]> var_55_to_fp16 = const()[name = tensor<string, []>("op_55_to_fp16"), val = tensor<fp16, [128, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(262275264)))];
|
26 |
+
tensor<fp16, [128, 64]> sin = gather(axis = var_56, batch_dims = var_57_batch_dims_0, indices = input_pos, x = var_55_to_fp16)[name = tensor<string, []>("op_57_cast_fp16")];
|
27 |
+
tensor<int32, [64, 1]> var_92 = const()[name = tensor<string, []>("op_92"), val = tensor<int32, [64, 1]>([[0], [1], [2], [3], [4], [5], [6], [7], [8], [9], [10], [11], [12], [13], [14], [15], [16], [17], [18], [19], [20], [21], [22], [23], [24], [25], [26], [27], [28], [29], [30], [31], [32], [33], [34], [35], [36], [37], [38], [39], [40], [41], [42], [43], [44], [45], [46], [47], [48], [49], [50], [51], [52], [53], [54], [55], [56], [57], [58], [59], [60], [61], [62], [63]])];
|
28 |
+
tensor<bool, [64, 1]> var_95 = less(x = var_92, y = pos_offset)[name = tensor<string, []>("op_95")];
|
29 |
+
tensor<int32, [2]> var_95_after_broadcast_reps_0 = const()[name = tensor<string, []>("op_95_after_broadcast_reps_0"), val = tensor<int32, [2]>([1, 512])];
|
30 |
+
tensor<bool, [64, 512]> var_95_after_broadcast = tile(reps = var_95_after_broadcast_reps_0, x = var_95)[name = tensor<string, []>("op_95_after_broadcast")];
|
31 |
+
tensor<fp16, [64, 512]> all_mask_to_fp16 = const()[name = tensor<string, []>("all_mask_to_fp16"), val = tensor<fp16, [64, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(262406400)))];
|
32 |
+
tensor<fp16, [64, 512]> m_1_to_fp16 = const()[name = tensor<string, []>("m_1_to_fp16"), val = tensor<fp16, [64, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(262472000)))];
|
33 |
+
tensor<fp16, [64, 512]> m_3_cast_fp16 = select(a = all_mask_to_fp16, b = m_1_to_fp16, cond = var_95_after_broadcast)[name = tensor<string, []>("m_3_cast_fp16")];
|
34 |
+
tensor<int32, [512]> var_105 = const()[name = tensor<string, []>("op_105"), val = tensor<int32, [512]>([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, 287, 288, 289, 290, 291, 292, 293, 294, 295, 296, 297, 298, 299, 300, 301, 302, 303, 304, 305, 306, 307, 308, 309, 310, 311, 312, 313, 314, 315, 316, 317, 318, 319, 320, 321, 322, 323, 324, 325, 326, 327, 328, 329, 330, 331, 332, 333, 334, 335, 336, 337, 338, 339, 340, 341, 342, 343, 344, 345, 346, 347, 348, 349, 350, 351, 352, 353, 354, 355, 356, 357, 358, 359, 360, 361, 362, 363, 364, 365, 366, 367, 368, 369, 370, 371, 372, 373, 374, 375, 376, 377, 378, 379, 380, 381, 382, 383, 384, 385, 386, 387, 388, 389, 390, 391, 392, 393, 394, 395, 396, 397, 398, 399, 400, 401, 402, 403, 404, 405, 406, 407, 408, 409, 410, 411, 412, 413, 414, 415, 416, 417, 418, 419, 420, 421, 422, 423, 424, 425, 426, 427, 428, 429, 430, 431, 432, 433, 434, 435, 436, 437, 438, 439, 440, 441, 442, 443, 444, 445, 446, 447, 448, 449, 450, 451, 452, 453, 454, 455, 456, 457, 458, 459, 460, 461, 462, 463, 464, 465, 466, 467, 468, 469, 470, 471, 472, 473, 474, 475, 476, 477, 478, 479, 480, 481, 482, 483, 484, 485, 486, 487, 488, 489, 490, 491, 492, 493, 494, 495, 496, 497, 498, 499, 500, 501, 502, 503, 504, 505, 506, 507, 508, 509, 510, 511])];
|
35 |
+
tensor<int32, []> var_106 = const()[name = tensor<string, []>("op_106"), val = tensor<int32, []>(512)];
|
36 |
+
tensor<int32, [1]> var_108 = sub(x = var_106, y = full_sequence_length)[name = tensor<string, []>("op_108")];
|
37 |
+
tensor<bool, [512]> var_109 = less(x = var_105, y = var_108)[name = tensor<string, []>("op_109")];
|
38 |
+
tensor<int32, [1]> expand_dims_0_axes_0 = const()[name = tensor<string, []>("expand_dims_0_axes_0"), val = tensor<int32, [1]>([0])];
|
39 |
+
tensor<bool, [1, 512]> expand_dims_0 = expand_dims(axes = expand_dims_0_axes_0, x = var_109)[name = tensor<string, []>("expand_dims_0")];
|
40 |
+
tensor<int32, [2]> var_109_after_broadcast_reps_0 = const()[name = tensor<string, []>("op_109_after_broadcast_reps_0"), val = tensor<int32, [2]>([64, 1])];
|
41 |
+
tensor<bool, [64, 512]> var_109_after_broadcast = tile(reps = var_109_after_broadcast_reps_0, x = expand_dims_0)[name = tensor<string, []>("op_109_after_broadcast")];
|
42 |
+
tensor<fp16, [64, 512]> m_cast_fp16 = select(a = all_mask_to_fp16, b = m_3_cast_fp16, cond = var_109_after_broadcast)[name = tensor<string, []>("m_cast_fp16")];
|
43 |
+
tensor<int32, [1]> var_112_axes_0 = const()[name = tensor<string, []>("op_112_axes_0"), val = tensor<int32, [1]>([0])];
|
44 |
+
tensor<fp16, [1, 64, 512]> var_112_cast_fp16 = expand_dims(axes = var_112_axes_0, x = m_cast_fp16)[name = tensor<string, []>("op_112_cast_fp16")];
|
45 |
+
tensor<int32, [1]> var_114_axes_0 = const()[name = tensor<string, []>("op_114_axes_0"), val = tensor<int32, [1]>([0])];
|
46 |
+
tensor<fp16, [1, 1, 64, 512]> mask = expand_dims(axes = var_114_axes_0, x = var_112_cast_fp16)[name = tensor<string, []>("op_114_cast_fp16")];
|
47 |
+
} -> (x, cos, sin, mask);
|
48 |
+
}
|
Llama-2-7b-hf_chunk1.mlmodelc/weights/weight.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:75a8ba0e4d6fc824f820051588b446e6b72dfb09497a058e443ab071d9b3cbc7
|
3 |
+
size 262537600
|
Llama-2-7b-hf_chunk10.mlmodelc/analytics/coremldata.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3412284b024b899a736cd77112d4b1a4a5faa19d954259e925ef429f58bd886b
|
3 |
+
size 243
|
Llama-2-7b-hf_chunk10.mlmodelc/coremldata.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b79e263bb20b8a02d650dad2c3eee71ff787829f337aedacb6cd4e1b61c1ce23
|
3 |
+
size 791
|
Llama-2-7b-hf_chunk10.mlmodelc/metadata.json
ADDED
@@ -0,0 +1,218 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[
|
2 |
+
{
|
3 |
+
"metadataOutputVersion" : "3.0",
|
4 |
+
"storagePrecision" : "Mixed (Float16, Palettized (4 bits))",
|
5 |
+
"outputSchema" : [
|
6 |
+
{
|
7 |
+
"hasShapeFlexibility" : "0",
|
8 |
+
"isOptional" : "0",
|
9 |
+
"dataType" : "Float16",
|
10 |
+
"formattedType" : "MultiArray (Float16 1 × 4096 × 1 × 64)",
|
11 |
+
"shortDescription" : "",
|
12 |
+
"shape" : "[1, 4096, 1, 64]",
|
13 |
+
"name" : "new_x",
|
14 |
+
"type" : "MultiArray"
|
15 |
+
},
|
16 |
+
{
|
17 |
+
"hasShapeFlexibility" : "0",
|
18 |
+
"isOptional" : "0",
|
19 |
+
"dataType" : "Float16",
|
20 |
+
"formattedType" : "MultiArray (Float16 1 × 32 × 128 × 64)",
|
21 |
+
"shortDescription" : "",
|
22 |
+
"shape" : "[1, 32, 128, 64]",
|
23 |
+
"name" : "new_k_cache_0",
|
24 |
+
"type" : "MultiArray"
|
25 |
+
},
|
26 |
+
{
|
27 |
+
"hasShapeFlexibility" : "0",
|
28 |
+
"isOptional" : "0",
|
29 |
+
"dataType" : "Float16",
|
30 |
+
"formattedType" : "MultiArray (Float16 1 × 32 × 128 × 64)",
|
31 |
+
"shortDescription" : "",
|
32 |
+
"shape" : "[1, 32, 128, 64]",
|
33 |
+
"name" : "new_k_cache_1",
|
34 |
+
"type" : "MultiArray"
|
35 |
+
},
|
36 |
+
{
|
37 |
+
"hasShapeFlexibility" : "0",
|
38 |
+
"isOptional" : "0",
|
39 |
+
"dataType" : "Float16",
|
40 |
+
"formattedType" : "MultiArray (Float16 1 × 32 × 128 × 64)",
|
41 |
+
"shortDescription" : "",
|
42 |
+
"shape" : "[1, 32, 128, 64]",
|
43 |
+
"name" : "new_k_cache_2",
|
44 |
+
"type" : "MultiArray"
|
45 |
+
},
|
46 |
+
{
|
47 |
+
"hasShapeFlexibility" : "0",
|
48 |
+
"isOptional" : "0",
|
49 |
+
"dataType" : "Float16",
|
50 |
+
"formattedType" : "MultiArray (Float16 1 × 32 × 128 × 64)",
|
51 |
+
"shortDescription" : "",
|
52 |
+
"shape" : "[1, 32, 128, 64]",
|
53 |
+
"name" : "new_v_cache_0",
|
54 |
+
"type" : "MultiArray"
|
55 |
+
},
|
56 |
+
{
|
57 |
+
"hasShapeFlexibility" : "0",
|
58 |
+
"isOptional" : "0",
|
59 |
+
"dataType" : "Float16",
|
60 |
+
"formattedType" : "MultiArray (Float16 1 × 32 × 128 × 64)",
|
61 |
+
"shortDescription" : "",
|
62 |
+
"shape" : "[1, 32, 128, 64]",
|
63 |
+
"name" : "new_v_cache_1",
|
64 |
+
"type" : "MultiArray"
|
65 |
+
},
|
66 |
+
{
|
67 |
+
"hasShapeFlexibility" : "0",
|
68 |
+
"isOptional" : "0",
|
69 |
+
"dataType" : "Float16",
|
70 |
+
"formattedType" : "MultiArray (Float16 1 × 32 × 128 × 64)",
|
71 |
+
"shortDescription" : "",
|
72 |
+
"shape" : "[1, 32, 128, 64]",
|
73 |
+
"name" : "new_v_cache_2",
|
74 |
+
"type" : "MultiArray"
|
75 |
+
}
|
76 |
+
],
|
77 |
+
"modelParameters" : [
|
78 |
+
|
79 |
+
],
|
80 |
+
"specificationVersion" : 7,
|
81 |
+
"mlProgramOperationTypeHistogram" : {
|
82 |
+
"Concat" : 18,
|
83 |
+
"Ios16.rsqrt" : 6,
|
84 |
+
"Ios16.mul" : 63,
|
85 |
+
"SliceByIndex" : 12,
|
86 |
+
"Ios16.constexprLutToDense" : 21,
|
87 |
+
"Ios16.conv" : 21,
|
88 |
+
"Ios16.add" : 21,
|
89 |
+
"Ios16.reduceMean" : 6,
|
90 |
+
"Ios16.matmul" : 6,
|
91 |
+
"Ios16.softmax" : 3,
|
92 |
+
"Ios16.reshape" : 12,
|
93 |
+
"Ios16.silu" : 3
|
94 |
+
},
|
95 |
+
"computePrecision" : "Mixed (Float16, Int32)",
|
96 |
+
"isUpdatable" : "0",
|
97 |
+
"availability" : {
|
98 |
+
"macOS" : "13.0",
|
99 |
+
"tvOS" : "16.0",
|
100 |
+
"visionOS" : "1.0",
|
101 |
+
"watchOS" : "9.0",
|
102 |
+
"iOS" : "16.0",
|
103 |
+
"macCatalyst" : "16.0"
|
104 |
+
},
|
105 |
+
"modelType" : {
|
106 |
+
"name" : "MLModelType_mlProgram"
|
107 |
+
},
|
108 |
+
"userDefinedMetadata" : {
|
109 |
+
"com.github.apple.coremltools.source_dialect" : "TorchScript",
|
110 |
+
"com.github.apple.coremltools.source" : "torch==2.1.0",
|
111 |
+
"com.github.apple.coremltools.version" : "7.2"
|
112 |
+
},
|
113 |
+
"inputSchema" : [
|
114 |
+
{
|
115 |
+
"hasShapeFlexibility" : "0",
|
116 |
+
"isOptional" : "0",
|
117 |
+
"dataType" : "Float16",
|
118 |
+
"formattedType" : "MultiArray (Float16 1 × 4096 × 1 × 64)",
|
119 |
+
"shortDescription" : "",
|
120 |
+
"shape" : "[1, 4096, 1, 64]",
|
121 |
+
"name" : "x",
|
122 |
+
"type" : "MultiArray"
|
123 |
+
},
|
124 |
+
{
|
125 |
+
"hasShapeFlexibility" : "0",
|
126 |
+
"isOptional" : "0",
|
127 |
+
"dataType" : "Float16",
|
128 |
+
"formattedType" : "MultiArray (Float16 128 × 64)",
|
129 |
+
"shortDescription" : "",
|
130 |
+
"shape" : "[128, 64]",
|
131 |
+
"name" : "cos",
|
132 |
+
"type" : "MultiArray"
|
133 |
+
},
|
134 |
+
{
|
135 |
+
"hasShapeFlexibility" : "0",
|
136 |
+
"isOptional" : "0",
|
137 |
+
"dataType" : "Float16",
|
138 |
+
"formattedType" : "MultiArray (Float16 128 × 64)",
|
139 |
+
"shortDescription" : "",
|
140 |
+
"shape" : "[128, 64]",
|
141 |
+
"name" : "sin",
|
142 |
+
"type" : "MultiArray"
|
143 |
+
},
|
144 |
+
{
|
145 |
+
"hasShapeFlexibility" : "0",
|
146 |
+
"isOptional" : "0",
|
147 |
+
"dataType" : "Float16",
|
148 |
+
"formattedType" : "MultiArray (Float16 1 × 1 × 64 × 512)",
|
149 |
+
"shortDescription" : "",
|
150 |
+
"shape" : "[1, 1, 64, 512]",
|
151 |
+
"name" : "mask",
|
152 |
+
"type" : "MultiArray"
|
153 |
+
},
|
154 |
+
{
|
155 |
+
"hasShapeFlexibility" : "0",
|
156 |
+
"isOptional" : "1",
|
157 |
+
"dataType" : "Float16",
|
158 |
+
"formattedType" : "MultiArray (Float16 1 × 32 × 128 × 448)?",
|
159 |
+
"shortDescription" : "",
|
160 |
+
"shape" : "[1, 32, 128, 448]",
|
161 |
+
"name" : "k_cache_0",
|
162 |
+
"type" : "MultiArray"
|
163 |
+
},
|
164 |
+
{
|
165 |
+
"hasShapeFlexibility" : "0",
|
166 |
+
"isOptional" : "1",
|
167 |
+
"dataType" : "Float16",
|
168 |
+
"formattedType" : "MultiArray (Float16 1 × 32 × 128 × 448)?",
|
169 |
+
"shortDescription" : "",
|
170 |
+
"shape" : "[1, 32, 128, 448]",
|
171 |
+
"name" : "v_cache_0",
|
172 |
+
"type" : "MultiArray"
|
173 |
+
},
|
174 |
+
{
|
175 |
+
"hasShapeFlexibility" : "0",
|
176 |
+
"isOptional" : "1",
|
177 |
+
"dataType" : "Float16",
|
178 |
+
"formattedType" : "MultiArray (Float16 1 × 32 × 128 × 448)?",
|
179 |
+
"shortDescription" : "",
|
180 |
+
"shape" : "[1, 32, 128, 448]",
|
181 |
+
"name" : "k_cache_1",
|
182 |
+
"type" : "MultiArray"
|
183 |
+
},
|
184 |
+
{
|
185 |
+
"hasShapeFlexibility" : "0",
|
186 |
+
"isOptional" : "1",
|
187 |
+
"dataType" : "Float16",
|
188 |
+
"formattedType" : "MultiArray (Float16 1 × 32 × 128 × 448)?",
|
189 |
+
"shortDescription" : "",
|
190 |
+
"shape" : "[1, 32, 128, 448]",
|
191 |
+
"name" : "v_cache_1",
|
192 |
+
"type" : "MultiArray"
|
193 |
+
},
|
194 |
+
{
|
195 |
+
"hasShapeFlexibility" : "0",
|
196 |
+
"isOptional" : "1",
|
197 |
+
"dataType" : "Float16",
|
198 |
+
"formattedType" : "MultiArray (Float16 1 × 32 × 128 × 448)?",
|
199 |
+
"shortDescription" : "",
|
200 |
+
"shape" : "[1, 32, 128, 448]",
|
201 |
+
"name" : "k_cache_2",
|
202 |
+
"type" : "MultiArray"
|
203 |
+
},
|
204 |
+
{
|
205 |
+
"hasShapeFlexibility" : "0",
|
206 |
+
"isOptional" : "1",
|
207 |
+
"dataType" : "Float16",
|
208 |
+
"formattedType" : "MultiArray (Float16 1 × 32 × 128 × 448)?",
|
209 |
+
"shortDescription" : "",
|
210 |
+
"shape" : "[1, 32, 128, 448]",
|
211 |
+
"name" : "v_cache_2",
|
212 |
+
"type" : "MultiArray"
|
213 |
+
}
|
214 |
+
],
|
215 |
+
"generatedClassName" : "Llama_2_7b_hf_2024_05_25_14_03_55_chunk10",
|
216 |
+
"method" : "predict"
|
217 |
+
}
|
218 |
+
]
|
Llama-2-7b-hf_chunk10.mlmodelc/model.mil
ADDED
@@ -0,0 +1,429 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
program(1.0)
|
2 |
+
[buildInfo = dict<tensor<string, []>, tensor<string, []>>({{"coremlc-component-MIL", "5.33.5"}, {"coremlc-version", "1877.40.3"}, {"coremltools-component-torch", "2.1.0"}, {"coremltools-source-dialect", "TorchScript"}, {"coremltools-version", "7.2"}})]
|
3 |
+
{
|
4 |
+
func main<ios16>(tensor<fp16, [128, 64]> cos, tensor<fp16, [1, 32, 128, 448]> k_cache_0, tensor<fp16, [1, 32, 128, 448]> k_cache_1, tensor<fp16, [1, 32, 128, 448]> k_cache_2, tensor<fp16, [1, 1, 64, 512]> mask, tensor<fp16, [128, 64]> sin, tensor<fp16, [1, 32, 128, 448]> v_cache_0, tensor<fp16, [1, 32, 128, 448]> v_cache_1, tensor<fp16, [1, 32, 128, 448]> v_cache_2, tensor<fp16, [1, 4096, 1, 64]> x) [CoreML_InputDefaultValues = dict<tensor<string, []>, tensor<fp32, []>>({{"k_cache_0", 0}, {"k_cache_1", 0}, {"k_cache_2", 0}, {"v_cache_0", 0}, {"v_cache_1", 0}, {"v_cache_2", 0}})] {
|
5 |
+
tensor<fp16, [4096, 4096, 1, 1]> blocks_0_attn_q_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [8388608]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(64))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(8388736))), name = tensor<string, []>("blocks_0_attn_q_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 4096, 1, 1])];
|
6 |
+
tensor<fp16, [4096, 4096, 1, 1]> blocks_0_attn_k_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [8388608]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(8388864))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(16777536))), name = tensor<string, []>("blocks_0_attn_k_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 4096, 1, 1])];
|
7 |
+
tensor<fp16, [4096, 4096, 1, 1]> blocks_0_attn_v_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [8388608]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(16777664))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(25166336))), name = tensor<string, []>("blocks_0_attn_v_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 4096, 1, 1])];
|
8 |
+
tensor<fp16, [4096, 4096, 1, 1]> blocks_0_attn_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [8388608]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(25166464))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(33555136))), name = tensor<string, []>("blocks_0_attn_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 4096, 1, 1])];
|
9 |
+
tensor<fp16, [11008, 4096, 1, 1]> blocks_0_mlp_fc_1_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [22544384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(33555264))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(56099712))), name = tensor<string, []>("blocks_0_mlp_fc_1_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([11008, 4096, 1, 1])];
|
10 |
+
tensor<fp16, [11008, 4096, 1, 1]> blocks_0_mlp_fc_2_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [22544384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(56099840))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(78644288))), name = tensor<string, []>("blocks_0_mlp_fc_2_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([11008, 4096, 1, 1])];
|
11 |
+
tensor<fp16, [4096, 11008, 1, 1]> blocks_0_mlp_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [22544384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(78644416))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(101188864))), name = tensor<string, []>("blocks_0_mlp_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 11008, 1, 1])];
|
12 |
+
tensor<fp16, [4096, 4096, 1, 1]> blocks_1_attn_q_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [8388608]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(101188992))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(109577664))), name = tensor<string, []>("blocks_1_attn_q_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 4096, 1, 1])];
|
13 |
+
tensor<fp16, [4096, 4096, 1, 1]> blocks_1_attn_k_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [8388608]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(109577792))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(117966464))), name = tensor<string, []>("blocks_1_attn_k_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 4096, 1, 1])];
|
14 |
+
tensor<fp16, [4096, 4096, 1, 1]> blocks_1_attn_v_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [8388608]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(117966592))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(126355264))), name = tensor<string, []>("blocks_1_attn_v_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 4096, 1, 1])];
|
15 |
+
tensor<fp16, [4096, 4096, 1, 1]> blocks_1_attn_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [8388608]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(126355392))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(134744064))), name = tensor<string, []>("blocks_1_attn_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 4096, 1, 1])];
|
16 |
+
tensor<fp16, [11008, 4096, 1, 1]> blocks_1_mlp_fc_1_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [22544384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(134744192))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(157288640))), name = tensor<string, []>("blocks_1_mlp_fc_1_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([11008, 4096, 1, 1])];
|
17 |
+
tensor<fp16, [11008, 4096, 1, 1]> blocks_1_mlp_fc_2_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [22544384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(157288768))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(179833216))), name = tensor<string, []>("blocks_1_mlp_fc_2_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([11008, 4096, 1, 1])];
|
18 |
+
tensor<fp16, [4096, 11008, 1, 1]> blocks_1_mlp_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [22544384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(179833344))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(202377792))), name = tensor<string, []>("blocks_1_mlp_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 11008, 1, 1])];
|
19 |
+
tensor<fp16, [4096, 4096, 1, 1]> blocks_2_attn_q_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [8388608]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(202377920))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(210766592))), name = tensor<string, []>("blocks_2_attn_q_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 4096, 1, 1])];
|
20 |
+
tensor<fp16, [4096, 4096, 1, 1]> blocks_2_attn_k_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [8388608]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(210766720))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(219155392))), name = tensor<string, []>("blocks_2_attn_k_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 4096, 1, 1])];
|
21 |
+
tensor<fp16, [4096, 4096, 1, 1]> blocks_2_attn_v_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [8388608]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(219155520))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(227544192))), name = tensor<string, []>("blocks_2_attn_v_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 4096, 1, 1])];
|
22 |
+
tensor<fp16, [4096, 4096, 1, 1]> blocks_2_attn_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [8388608]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(227544320))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(235932992))), name = tensor<string, []>("blocks_2_attn_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 4096, 1, 1])];
|
23 |
+
tensor<fp16, [11008, 4096, 1, 1]> blocks_2_mlp_fc_1_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [22544384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(235933120))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(258477568))), name = tensor<string, []>("blocks_2_mlp_fc_1_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([11008, 4096, 1, 1])];
|
24 |
+
tensor<fp16, [11008, 4096, 1, 1]> blocks_2_mlp_fc_2_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [22544384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(258477696))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(281022144))), name = tensor<string, []>("blocks_2_mlp_fc_2_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([11008, 4096, 1, 1])];
|
25 |
+
tensor<fp16, [4096, 11008, 1, 1]> blocks_2_mlp_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [22544384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(281022272))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303566720))), name = tensor<string, []>("blocks_2_mlp_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 11008, 1, 1])];
|
26 |
+
tensor<int32, []> var_18 = const()[name = tensor<string, []>("op_18"), val = tensor<int32, []>(3)];
|
27 |
+
tensor<int32, []> var_23 = const()[name = tensor<string, []>("op_23"), val = tensor<int32, []>(-2)];
|
28 |
+
tensor<int32, []> var_25 = const()[name = tensor<string, []>("op_25"), val = tensor<int32, []>(-1)];
|
29 |
+
tensor<int32, []> var_32 = const()[name = tensor<string, []>("op_32"), val = tensor<int32, []>(1)];
|
30 |
+
tensor<bool, []> var_33 = const()[name = tensor<string, []>("op_33"), val = tensor<bool, []>(true)];
|
31 |
+
tensor<fp16, [1, 4096, 1, 64]> var_41_cast_fp16 = mul(x = x, y = x)[name = tensor<string, []>("op_41_cast_fp16")];
|
32 |
+
tensor<int32, [1]> var_42 = const()[name = tensor<string, []>("op_42"), val = tensor<int32, [1]>([1])];
|
33 |
+
tensor<fp16, [1, 1, 1, 64]> norm_x_1_cast_fp16 = reduce_mean(axes = var_42, keep_dims = var_33, x = var_41_cast_fp16)[name = tensor<string, []>("norm_x_1_cast_fp16")];
|
34 |
+
tensor<fp16, []> var_44_to_fp16 = const()[name = tensor<string, []>("op_44_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
|
35 |
+
tensor<fp16, [1, 1, 1, 64]> var_45_cast_fp16 = add(x = norm_x_1_cast_fp16, y = var_44_to_fp16)[name = tensor<string, []>("op_45_cast_fp16")];
|
36 |
+
tensor<fp16, []> var_46_epsilon_0_to_fp16 = const()[name = tensor<string, []>("op_46_epsilon_0_to_fp16"), val = tensor<fp16, []>(0x1p-24)];
|
37 |
+
tensor<fp16, [1, 1, 1, 64]> var_46_cast_fp16 = rsqrt(epsilon = var_46_epsilon_0_to_fp16, x = var_45_cast_fp16)[name = tensor<string, []>("op_46_cast_fp16")];
|
38 |
+
tensor<fp16, [1, 4096, 1, 64]> x_normed_1_cast_fp16 = mul(x = x, y = var_46_cast_fp16)[name = tensor<string, []>("x_normed_1_cast_fp16")];
|
39 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_0_norm_1_weight_to_fp16 = const()[name = tensor<string, []>("blocks_0_norm_1_weight_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303566848)))];
|
40 |
+
tensor<fp16, [1, 4096, 1, 64]> x_5_cast_fp16 = mul(x = x_normed_1_cast_fp16, y = blocks_0_norm_1_weight_to_fp16)[name = tensor<string, []>("x_5_cast_fp16")];
|
41 |
+
tensor<int32, [2]> var_58 = const()[name = tensor<string, []>("op_58"), val = tensor<int32, [2]>([1, 1])];
|
42 |
+
tensor<int32, [2]> var_60 = const()[name = tensor<string, []>("op_60"), val = tensor<int32, [2]>([1, 1])];
|
43 |
+
tensor<string, []> var_62_pad_type_0 = const()[name = tensor<string, []>("op_62_pad_type_0"), val = tensor<string, []>("custom")];
|
44 |
+
tensor<int32, [4]> var_62_pad_0 = const()[name = tensor<string, []>("op_62_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
45 |
+
tensor<fp16, [1, 4096, 1, 64]> var_62_cast_fp16 = conv(dilations = var_60, groups = var_32, pad = var_62_pad_0, pad_type = var_62_pad_type_0, strides = var_58, weight = blocks_0_attn_q_proj_weight_palettized_cast_fp16, x = x_5_cast_fp16)[name = tensor<string, []>("op_62_cast_fp16")];
|
46 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_0_attn_q_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_0_attn_q_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303575104)))];
|
47 |
+
tensor<fp16, [1, 4096, 1, 64]> q_1_cast_fp16 = mul(x = var_62_cast_fp16, y = blocks_0_attn_q_proj_output_scales_to_fp16)[name = tensor<string, []>("q_1_cast_fp16")];
|
48 |
+
tensor<int32, [2]> var_66 = const()[name = tensor<string, []>("op_66"), val = tensor<int32, [2]>([1, 1])];
|
49 |
+
tensor<int32, [2]> var_68 = const()[name = tensor<string, []>("op_68"), val = tensor<int32, [2]>([1, 1])];
|
50 |
+
tensor<string, []> var_70_pad_type_0 = const()[name = tensor<string, []>("op_70_pad_type_0"), val = tensor<string, []>("custom")];
|
51 |
+
tensor<int32, [4]> var_70_pad_0 = const()[name = tensor<string, []>("op_70_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
52 |
+
tensor<fp16, [1, 4096, 1, 64]> var_70_cast_fp16 = conv(dilations = var_68, groups = var_32, pad = var_70_pad_0, pad_type = var_70_pad_type_0, strides = var_66, weight = blocks_0_attn_k_proj_weight_palettized_cast_fp16, x = x_5_cast_fp16)[name = tensor<string, []>("op_70_cast_fp16")];
|
53 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_0_attn_k_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_0_attn_k_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303583360)))];
|
54 |
+
tensor<fp16, [1, 4096, 1, 64]> k_1_cast_fp16 = mul(x = var_70_cast_fp16, y = blocks_0_attn_k_proj_output_scales_to_fp16)[name = tensor<string, []>("k_1_cast_fp16")];
|
55 |
+
tensor<int32, [2]> var_74 = const()[name = tensor<string, []>("op_74"), val = tensor<int32, [2]>([1, 1])];
|
56 |
+
tensor<int32, [2]> var_76 = const()[name = tensor<string, []>("op_76"), val = tensor<int32, [2]>([1, 1])];
|
57 |
+
tensor<string, []> var_78_pad_type_0 = const()[name = tensor<string, []>("op_78_pad_type_0"), val = tensor<string, []>("custom")];
|
58 |
+
tensor<int32, [4]> var_78_pad_0 = const()[name = tensor<string, []>("op_78_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
59 |
+
tensor<fp16, [1, 4096, 1, 64]> var_78_cast_fp16 = conv(dilations = var_76, groups = var_32, pad = var_78_pad_0, pad_type = var_78_pad_type_0, strides = var_74, weight = blocks_0_attn_v_proj_weight_palettized_cast_fp16, x = x_5_cast_fp16)[name = tensor<string, []>("op_78_cast_fp16")];
|
60 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_0_attn_v_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_0_attn_v_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303591616)))];
|
61 |
+
tensor<fp16, [1, 4096, 1, 64]> v_1_cast_fp16 = mul(x = var_78_cast_fp16, y = blocks_0_attn_v_proj_output_scales_to_fp16)[name = tensor<string, []>("v_1_cast_fp16")];
|
62 |
+
tensor<int32, [4]> var_80 = const()[name = tensor<string, []>("op_80"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
63 |
+
tensor<fp16, [1, 32, 128, 64]> q_3_cast_fp16 = reshape(shape = var_80, x = q_1_cast_fp16)[name = tensor<string, []>("q_3_cast_fp16")];
|
64 |
+
tensor<int32, [4]> var_82 = const()[name = tensor<string, []>("op_82"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
65 |
+
tensor<fp16, [1, 32, 128, 64]> k_3_cast_fp16 = reshape(shape = var_82, x = k_1_cast_fp16)[name = tensor<string, []>("k_3_cast_fp16")];
|
66 |
+
tensor<int32, [4]> var_84 = const()[name = tensor<string, []>("op_84"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
67 |
+
tensor<fp16, [1, 32, 128, 64]> new_v_cache_0 = reshape(shape = var_84, x = v_1_cast_fp16)[name = tensor<string, []>("v_3_cast_fp16")];
|
68 |
+
tensor<int32, [4]> var_96_begin_0 = const()[name = tensor<string, []>("op_96_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
69 |
+
tensor<int32, [4]> var_96_end_0 = const()[name = tensor<string, []>("op_96_end_0"), val = tensor<int32, [4]>([1, 32, 64, 64])];
|
70 |
+
tensor<bool, [4]> var_96_end_mask_0 = const()[name = tensor<string, []>("op_96_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
|
71 |
+
tensor<fp16, [1, 32, 64, 64]> var_96_cast_fp16 = slice_by_index(begin = var_96_begin_0, end = var_96_end_0, end_mask = var_96_end_mask_0, x = q_3_cast_fp16)[name = tensor<string, []>("op_96_cast_fp16")];
|
72 |
+
tensor<int32, [4]> var_102_begin_0 = const()[name = tensor<string, []>("op_102_begin_0"), val = tensor<int32, [4]>([0, 0, 64, 0])];
|
73 |
+
tensor<int32, [4]> var_102_end_0 = const()[name = tensor<string, []>("op_102_end_0"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
74 |
+
tensor<bool, [4]> var_102_end_mask_0 = const()[name = tensor<string, []>("op_102_end_mask_0"), val = tensor<bool, [4]>([true, true, true, true])];
|
75 |
+
tensor<fp16, [1, 32, 64, 64]> var_102_cast_fp16 = slice_by_index(begin = var_102_begin_0, end = var_102_end_0, end_mask = var_102_end_mask_0, x = q_3_cast_fp16)[name = tensor<string, []>("op_102_cast_fp16")];
|
76 |
+
tensor<fp16, []> const_3_promoted_to_fp16 = const()[name = tensor<string, []>("const_3_promoted_to_fp16"), val = tensor<fp16, []>(-0x1p+0)];
|
77 |
+
tensor<fp16, [1, 32, 64, 64]> var_104_cast_fp16 = mul(x = var_102_cast_fp16, y = const_3_promoted_to_fp16)[name = tensor<string, []>("op_104_cast_fp16")];
|
78 |
+
tensor<bool, []> rotated_1_interleave_0 = const()[name = tensor<string, []>("rotated_1_interleave_0"), val = tensor<bool, []>(false)];
|
79 |
+
tensor<fp16, [1, 32, 128, 64]> rotated_1_cast_fp16 = concat(axis = var_23, interleave = rotated_1_interleave_0, values = (var_104_cast_fp16, var_96_cast_fp16))[name = tensor<string, []>("rotated_1_cast_fp16")];
|
80 |
+
tensor<fp16, [1, 32, 128, 64]> var_107_cast_fp16 = mul(x = q_3_cast_fp16, y = cos)[name = tensor<string, []>("op_107_cast_fp16")];
|
81 |
+
tensor<fp16, [1, 32, 128, 64]> var_108_cast_fp16 = mul(x = rotated_1_cast_fp16, y = sin)[name = tensor<string, []>("op_108_cast_fp16")];
|
82 |
+
tensor<fp16, [1, 32, 128, 64]> roped_1_cast_fp16 = add(x = var_107_cast_fp16, y = var_108_cast_fp16)[name = tensor<string, []>("roped_1_cast_fp16")];
|
83 |
+
tensor<int32, [4]> var_121_begin_0 = const()[name = tensor<string, []>("op_121_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
84 |
+
tensor<int32, [4]> var_121_end_0 = const()[name = tensor<string, []>("op_121_end_0"), val = tensor<int32, [4]>([1, 32, 64, 64])];
|
85 |
+
tensor<bool, [4]> var_121_end_mask_0 = const()[name = tensor<string, []>("op_121_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
|
86 |
+
tensor<fp16, [1, 32, 64, 64]> var_121_cast_fp16 = slice_by_index(begin = var_121_begin_0, end = var_121_end_0, end_mask = var_121_end_mask_0, x = k_3_cast_fp16)[name = tensor<string, []>("op_121_cast_fp16")];
|
87 |
+
tensor<int32, [4]> var_127_begin_0 = const()[name = tensor<string, []>("op_127_begin_0"), val = tensor<int32, [4]>([0, 0, 64, 0])];
|
88 |
+
tensor<int32, [4]> var_127_end_0 = const()[name = tensor<string, []>("op_127_end_0"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
89 |
+
tensor<bool, [4]> var_127_end_mask_0 = const()[name = tensor<string, []>("op_127_end_mask_0"), val = tensor<bool, [4]>([true, true, true, true])];
|
90 |
+
tensor<fp16, [1, 32, 64, 64]> var_127_cast_fp16 = slice_by_index(begin = var_127_begin_0, end = var_127_end_0, end_mask = var_127_end_mask_0, x = k_3_cast_fp16)[name = tensor<string, []>("op_127_cast_fp16")];
|
91 |
+
tensor<fp16, []> const_5_promoted_to_fp16 = const()[name = tensor<string, []>("const_5_promoted_to_fp16"), val = tensor<fp16, []>(-0x1p+0)];
|
92 |
+
tensor<fp16, [1, 32, 64, 64]> var_129_cast_fp16 = mul(x = var_127_cast_fp16, y = const_5_promoted_to_fp16)[name = tensor<string, []>("op_129_cast_fp16")];
|
93 |
+
tensor<bool, []> rotated_3_interleave_0 = const()[name = tensor<string, []>("rotated_3_interleave_0"), val = tensor<bool, []>(false)];
|
94 |
+
tensor<fp16, [1, 32, 128, 64]> rotated_3_cast_fp16 = concat(axis = var_23, interleave = rotated_3_interleave_0, values = (var_129_cast_fp16, var_121_cast_fp16))[name = tensor<string, []>("rotated_3_cast_fp16")];
|
95 |
+
tensor<fp16, [1, 32, 128, 64]> var_132_cast_fp16 = mul(x = k_3_cast_fp16, y = cos)[name = tensor<string, []>("op_132_cast_fp16")];
|
96 |
+
tensor<fp16, [1, 32, 128, 64]> var_133_cast_fp16 = mul(x = rotated_3_cast_fp16, y = sin)[name = tensor<string, []>("op_133_cast_fp16")];
|
97 |
+
tensor<fp16, [1, 32, 128, 64]> roped_3_cast_fp16 = add(x = var_132_cast_fp16, y = var_133_cast_fp16)[name = tensor<string, []>("roped_3_cast_fp16")];
|
98 |
+
tensor<bool, []> q_5_interleave_0 = const()[name = tensor<string, []>("q_5_interleave_0"), val = tensor<bool, []>(false)];
|
99 |
+
tensor<fp16, [1, 32, 128, 64]> q_5_cast_fp16 = concat(axis = var_23, interleave = q_5_interleave_0, values = roped_1_cast_fp16)[name = tensor<string, []>("q_5_cast_fp16")];
|
100 |
+
tensor<bool, []> k_5_interleave_0 = const()[name = tensor<string, []>("k_5_interleave_0"), val = tensor<bool, []>(false)];
|
101 |
+
tensor<fp16, [1, 32, 128, 64]> new_k_cache_0 = concat(axis = var_23, interleave = k_5_interleave_0, values = roped_3_cast_fp16)[name = tensor<string, []>("k_5_cast_fp16")];
|
102 |
+
tensor<bool, []> k_7_interleave_0 = const()[name = tensor<string, []>("k_7_interleave_0"), val = tensor<bool, []>(false)];
|
103 |
+
tensor<fp16, [1, 32, 128, 512]> k_7_cast_fp16 = concat(axis = var_25, interleave = k_7_interleave_0, values = (k_cache_0, new_k_cache_0))[name = tensor<string, []>("k_7_cast_fp16")];
|
104 |
+
tensor<bool, []> v_5_interleave_0 = const()[name = tensor<string, []>("v_5_interleave_0"), val = tensor<bool, []>(false)];
|
105 |
+
tensor<fp16, [1, 32, 128, 512]> v_5_cast_fp16 = concat(axis = var_25, interleave = v_5_interleave_0, values = (v_cache_0, new_v_cache_0))[name = tensor<string, []>("v_5_cast_fp16")];
|
106 |
+
tensor<fp16, []> var_155_to_fp16 = const()[name = tensor<string, []>("op_155_to_fp16"), val = tensor<fp16, []>(0x1.6ap-4)];
|
107 |
+
tensor<fp16, [1, 32, 128, 64]> var_156_cast_fp16 = mul(x = q_5_cast_fp16, y = var_155_to_fp16)[name = tensor<string, []>("op_156_cast_fp16")];
|
108 |
+
tensor<bool, []> attn_weights_1_transpose_x_0 = const()[name = tensor<string, []>("attn_weights_1_transpose_x_0"), val = tensor<bool, []>(true)];
|
109 |
+
tensor<bool, []> attn_weights_1_transpose_y_0 = const()[name = tensor<string, []>("attn_weights_1_transpose_y_0"), val = tensor<bool, []>(false)];
|
110 |
+
tensor<fp16, [1, 32, 64, 512]> attn_weights_1_cast_fp16 = matmul(transpose_x = attn_weights_1_transpose_x_0, transpose_y = attn_weights_1_transpose_y_0, x = var_156_cast_fp16, y = k_7_cast_fp16)[name = tensor<string, []>("attn_weights_1_cast_fp16")];
|
111 |
+
tensor<fp16, [1, 32, 64, 512]> attn_weights_3_cast_fp16 = add(x = attn_weights_1_cast_fp16, y = mask)[name = tensor<string, []>("attn_weights_3_cast_fp16")];
|
112 |
+
tensor<fp16, [1, 32, 64, 512]> var_164_cast_fp16 = softmax(axis = var_18, x = attn_weights_3_cast_fp16)[name = tensor<string, []>("op_164_cast_fp16")];
|
113 |
+
tensor<bool, []> attn_1_transpose_x_0 = const()[name = tensor<string, []>("attn_1_transpose_x_0"), val = tensor<bool, []>(false)];
|
114 |
+
tensor<bool, []> attn_1_transpose_y_0 = const()[name = tensor<string, []>("attn_1_transpose_y_0"), val = tensor<bool, []>(true)];
|
115 |
+
tensor<fp16, [1, 32, 128, 64]> attn_1_cast_fp16 = matmul(transpose_x = attn_1_transpose_x_0, transpose_y = attn_1_transpose_y_0, x = v_5_cast_fp16, y = var_164_cast_fp16)[name = tensor<string, []>("attn_1_cast_fp16")];
|
116 |
+
tensor<int32, [4]> var_168 = const()[name = tensor<string, []>("op_168"), val = tensor<int32, [4]>([1, 4096, 1, -1])];
|
117 |
+
tensor<fp16, [1, 4096, 1, 64]> input_1_cast_fp16 = reshape(shape = var_168, x = attn_1_cast_fp16)[name = tensor<string, []>("input_1_cast_fp16")];
|
118 |
+
tensor<int32, [2]> var_172 = const()[name = tensor<string, []>("op_172"), val = tensor<int32, [2]>([1, 1])];
|
119 |
+
tensor<int32, [2]> var_174 = const()[name = tensor<string, []>("op_174"), val = tensor<int32, [2]>([1, 1])];
|
120 |
+
tensor<string, []> var_176_pad_type_0 = const()[name = tensor<string, []>("op_176_pad_type_0"), val = tensor<string, []>("custom")];
|
121 |
+
tensor<int32, [4]> var_176_pad_0 = const()[name = tensor<string, []>("op_176_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
122 |
+
tensor<fp16, [1, 4096, 1, 64]> var_176_cast_fp16 = conv(dilations = var_174, groups = var_32, pad = var_176_pad_0, pad_type = var_176_pad_type_0, strides = var_172, weight = blocks_0_attn_proj_weight_palettized_cast_fp16, x = input_1_cast_fp16)[name = tensor<string, []>("op_176_cast_fp16")];
|
123 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_0_attn_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_0_attn_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303599872)))];
|
124 |
+
tensor<fp16, [1, 4096, 1, 64]> attention_output_1_cast_fp16 = mul(x = var_176_cast_fp16, y = blocks_0_attn_proj_output_scales_to_fp16)[name = tensor<string, []>("attention_output_1_cast_fp16")];
|
125 |
+
tensor<fp16, [1, 4096, 1, 64]> x_11_cast_fp16 = add(x = attention_output_1_cast_fp16, y = x)[name = tensor<string, []>("x_11_cast_fp16")];
|
126 |
+
tensor<fp16, [1, 4096, 1, 64]> var_185_cast_fp16 = mul(x = x_11_cast_fp16, y = x_11_cast_fp16)[name = tensor<string, []>("op_185_cast_fp16")];
|
127 |
+
tensor<int32, [1]> var_186 = const()[name = tensor<string, []>("op_186"), val = tensor<int32, [1]>([1])];
|
128 |
+
tensor<fp16, [1, 1, 1, 64]> norm_x_3_cast_fp16 = reduce_mean(axes = var_186, keep_dims = var_33, x = var_185_cast_fp16)[name = tensor<string, []>("norm_x_3_cast_fp16")];
|
129 |
+
tensor<fp16, []> var_188_to_fp16 = const()[name = tensor<string, []>("op_188_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
|
130 |
+
tensor<fp16, [1, 1, 1, 64]> var_189_cast_fp16 = add(x = norm_x_3_cast_fp16, y = var_188_to_fp16)[name = tensor<string, []>("op_189_cast_fp16")];
|
131 |
+
tensor<fp16, []> var_190_epsilon_0_to_fp16 = const()[name = tensor<string, []>("op_190_epsilon_0_to_fp16"), val = tensor<fp16, []>(0x1p-24)];
|
132 |
+
tensor<fp16, [1, 1, 1, 64]> var_190_cast_fp16 = rsqrt(epsilon = var_190_epsilon_0_to_fp16, x = var_189_cast_fp16)[name = tensor<string, []>("op_190_cast_fp16")];
|
133 |
+
tensor<fp16, [1, 4096, 1, 64]> x_normed_5_cast_fp16 = mul(x = x_11_cast_fp16, y = var_190_cast_fp16)[name = tensor<string, []>("x_normed_5_cast_fp16")];
|
134 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_0_norm_2_weight_to_fp16 = const()[name = tensor<string, []>("blocks_0_norm_2_weight_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303608128)))];
|
135 |
+
tensor<fp16, [1, 4096, 1, 64]> input_3_cast_fp16 = mul(x = x_normed_5_cast_fp16, y = blocks_0_norm_2_weight_to_fp16)[name = tensor<string, []>("input_3_cast_fp16")];
|
136 |
+
tensor<int32, [2]> var_202 = const()[name = tensor<string, []>("op_202"), val = tensor<int32, [2]>([1, 1])];
|
137 |
+
tensor<int32, [2]> var_204 = const()[name = tensor<string, []>("op_204"), val = tensor<int32, [2]>([1, 1])];
|
138 |
+
tensor<string, []> var_206_pad_type_0 = const()[name = tensor<string, []>("op_206_pad_type_0"), val = tensor<string, []>("custom")];
|
139 |
+
tensor<int32, [4]> var_206_pad_0 = const()[name = tensor<string, []>("op_206_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
140 |
+
tensor<fp16, [1, 11008, 1, 64]> var_206_cast_fp16 = conv(dilations = var_204, groups = var_32, pad = var_206_pad_0, pad_type = var_206_pad_type_0, strides = var_202, weight = blocks_0_mlp_fc_1_weight_palettized_cast_fp16, x = input_3_cast_fp16)[name = tensor<string, []>("op_206_cast_fp16")];
|
141 |
+
tensor<fp16, [1, 11008, 1, 1]> blocks_0_mlp_fc_1_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_0_mlp_fc_1_output_scales_to_fp16"), val = tensor<fp16, [1, 11008, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303616384)))];
|
142 |
+
tensor<fp16, [1, 11008, 1, 64]> input_5_cast_fp16 = mul(x = var_206_cast_fp16, y = blocks_0_mlp_fc_1_output_scales_to_fp16)[name = tensor<string, []>("input_5_cast_fp16")];
|
143 |
+
tensor<int32, [2]> var_210 = const()[name = tensor<string, []>("op_210"), val = tensor<int32, [2]>([1, 1])];
|
144 |
+
tensor<int32, [2]> var_212 = const()[name = tensor<string, []>("op_212"), val = tensor<int32, [2]>([1, 1])];
|
145 |
+
tensor<string, []> var_214_pad_type_0 = const()[name = tensor<string, []>("op_214_pad_type_0"), val = tensor<string, []>("custom")];
|
146 |
+
tensor<int32, [4]> var_214_pad_0 = const()[name = tensor<string, []>("op_214_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
147 |
+
tensor<fp16, [1, 11008, 1, 64]> var_214_cast_fp16 = conv(dilations = var_212, groups = var_32, pad = var_214_pad_0, pad_type = var_214_pad_type_0, strides = var_210, weight = blocks_0_mlp_fc_2_weight_palettized_cast_fp16, x = input_3_cast_fp16)[name = tensor<string, []>("op_214_cast_fp16")];
|
148 |
+
tensor<fp16, [1, 11008, 1, 1]> blocks_0_mlp_fc_2_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_0_mlp_fc_2_output_scales_to_fp16"), val = tensor<fp16, [1, 11008, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303638464)))];
|
149 |
+
tensor<fp16, [1, 11008, 1, 64]> x_fc_2_1_cast_fp16 = mul(x = var_214_cast_fp16, y = blocks_0_mlp_fc_2_output_scales_to_fp16)[name = tensor<string, []>("x_fc_2_1_cast_fp16")];
|
150 |
+
tensor<fp16, [1, 11008, 1, 64]> var_216_cast_fp16 = silu(x = input_5_cast_fp16)[name = tensor<string, []>("op_216_cast_fp16")];
|
151 |
+
tensor<fp16, [1, 11008, 1, 64]> input_7_cast_fp16 = mul(x = var_216_cast_fp16, y = x_fc_2_1_cast_fp16)[name = tensor<string, []>("input_7_cast_fp16")];
|
152 |
+
tensor<int32, [2]> var_220 = const()[name = tensor<string, []>("op_220"), val = tensor<int32, [2]>([1, 1])];
|
153 |
+
tensor<int32, [2]> var_222 = const()[name = tensor<string, []>("op_222"), val = tensor<int32, [2]>([1, 1])];
|
154 |
+
tensor<string, []> var_224_pad_type_0 = const()[name = tensor<string, []>("op_224_pad_type_0"), val = tensor<string, []>("custom")];
|
155 |
+
tensor<int32, [4]> var_224_pad_0 = const()[name = tensor<string, []>("op_224_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
156 |
+
tensor<fp16, [1, 4096, 1, 64]> var_224_cast_fp16 = conv(dilations = var_222, groups = var_32, pad = var_224_pad_0, pad_type = var_224_pad_type_0, strides = var_220, weight = blocks_0_mlp_proj_weight_palettized_cast_fp16, x = input_7_cast_fp16)[name = tensor<string, []>("op_224_cast_fp16")];
|
157 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_0_mlp_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_0_mlp_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303660544)))];
|
158 |
+
tensor<fp16, [1, 4096, 1, 64]> var_225_cast_fp16 = mul(x = var_224_cast_fp16, y = blocks_0_mlp_proj_output_scales_to_fp16)[name = tensor<string, []>("op_225_cast_fp16")];
|
159 |
+
tensor<fp16, [1, 4096, 1, 64]> x_15_cast_fp16 = add(x = var_225_cast_fp16, y = x_11_cast_fp16)[name = tensor<string, []>("x_15_cast_fp16")];
|
160 |
+
tensor<int32, []> var_232 = const()[name = tensor<string, []>("op_232"), val = tensor<int32, []>(3)];
|
161 |
+
tensor<int32, []> var_237 = const()[name = tensor<string, []>("op_237"), val = tensor<int32, []>(-2)];
|
162 |
+
tensor<int32, []> var_239 = const()[name = tensor<string, []>("op_239"), val = tensor<int32, []>(-1)];
|
163 |
+
tensor<int32, []> var_246 = const()[name = tensor<string, []>("op_246"), val = tensor<int32, []>(1)];
|
164 |
+
tensor<bool, []> var_247 = const()[name = tensor<string, []>("op_247"), val = tensor<bool, []>(true)];
|
165 |
+
tensor<fp16, [1, 4096, 1, 64]> var_254_cast_fp16 = mul(x = x_15_cast_fp16, y = x_15_cast_fp16)[name = tensor<string, []>("op_254_cast_fp16")];
|
166 |
+
tensor<int32, [1]> var_255 = const()[name = tensor<string, []>("op_255"), val = tensor<int32, [1]>([1])];
|
167 |
+
tensor<fp16, [1, 1, 1, 64]> norm_x_5_cast_fp16 = reduce_mean(axes = var_255, keep_dims = var_247, x = var_254_cast_fp16)[name = tensor<string, []>("norm_x_5_cast_fp16")];
|
168 |
+
tensor<fp16, []> var_257_to_fp16 = const()[name = tensor<string, []>("op_257_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
|
169 |
+
tensor<fp16, [1, 1, 1, 64]> var_258_cast_fp16 = add(x = norm_x_5_cast_fp16, y = var_257_to_fp16)[name = tensor<string, []>("op_258_cast_fp16")];
|
170 |
+
tensor<fp16, []> var_259_epsilon_0_to_fp16 = const()[name = tensor<string, []>("op_259_epsilon_0_to_fp16"), val = tensor<fp16, []>(0x1p-24)];
|
171 |
+
tensor<fp16, [1, 1, 1, 64]> var_259_cast_fp16 = rsqrt(epsilon = var_259_epsilon_0_to_fp16, x = var_258_cast_fp16)[name = tensor<string, []>("op_259_cast_fp16")];
|
172 |
+
tensor<fp16, [1, 4096, 1, 64]> x_normed_9_cast_fp16 = mul(x = x_15_cast_fp16, y = var_259_cast_fp16)[name = tensor<string, []>("x_normed_9_cast_fp16")];
|
173 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_1_norm_1_weight_to_fp16 = const()[name = tensor<string, []>("blocks_1_norm_1_weight_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303668800)))];
|
174 |
+
tensor<fp16, [1, 4096, 1, 64]> x_19_cast_fp16 = mul(x = x_normed_9_cast_fp16, y = blocks_1_norm_1_weight_to_fp16)[name = tensor<string, []>("x_19_cast_fp16")];
|
175 |
+
tensor<int32, [2]> var_274 = const()[name = tensor<string, []>("op_274"), val = tensor<int32, [2]>([1, 1])];
|
176 |
+
tensor<int32, [2]> var_276 = const()[name = tensor<string, []>("op_276"), val = tensor<int32, [2]>([1, 1])];
|
177 |
+
tensor<string, []> var_278_pad_type_0 = const()[name = tensor<string, []>("op_278_pad_type_0"), val = tensor<string, []>("custom")];
|
178 |
+
tensor<int32, [4]> var_278_pad_0 = const()[name = tensor<string, []>("op_278_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
179 |
+
tensor<fp16, [1, 4096, 1, 64]> var_278_cast_fp16 = conv(dilations = var_276, groups = var_246, pad = var_278_pad_0, pad_type = var_278_pad_type_0, strides = var_274, weight = blocks_1_attn_q_proj_weight_palettized_cast_fp16, x = x_19_cast_fp16)[name = tensor<string, []>("op_278_cast_fp16")];
|
180 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_1_attn_q_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_1_attn_q_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303677056)))];
|
181 |
+
tensor<fp16, [1, 4096, 1, 64]> q_7_cast_fp16 = mul(x = var_278_cast_fp16, y = blocks_1_attn_q_proj_output_scales_to_fp16)[name = tensor<string, []>("q_7_cast_fp16")];
|
182 |
+
tensor<int32, [2]> var_282 = const()[name = tensor<string, []>("op_282"), val = tensor<int32, [2]>([1, 1])];
|
183 |
+
tensor<int32, [2]> var_284 = const()[name = tensor<string, []>("op_284"), val = tensor<int32, [2]>([1, 1])];
|
184 |
+
tensor<string, []> var_286_pad_type_0 = const()[name = tensor<string, []>("op_286_pad_type_0"), val = tensor<string, []>("custom")];
|
185 |
+
tensor<int32, [4]> var_286_pad_0 = const()[name = tensor<string, []>("op_286_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
186 |
+
tensor<fp16, [1, 4096, 1, 64]> var_286_cast_fp16 = conv(dilations = var_284, groups = var_246, pad = var_286_pad_0, pad_type = var_286_pad_type_0, strides = var_282, weight = blocks_1_attn_k_proj_weight_palettized_cast_fp16, x = x_19_cast_fp16)[name = tensor<string, []>("op_286_cast_fp16")];
|
187 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_1_attn_k_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_1_attn_k_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303685312)))];
|
188 |
+
tensor<fp16, [1, 4096, 1, 64]> k_9_cast_fp16 = mul(x = var_286_cast_fp16, y = blocks_1_attn_k_proj_output_scales_to_fp16)[name = tensor<string, []>("k_9_cast_fp16")];
|
189 |
+
tensor<int32, [2]> var_290 = const()[name = tensor<string, []>("op_290"), val = tensor<int32, [2]>([1, 1])];
|
190 |
+
tensor<int32, [2]> var_292 = const()[name = tensor<string, []>("op_292"), val = tensor<int32, [2]>([1, 1])];
|
191 |
+
tensor<string, []> var_294_pad_type_0 = const()[name = tensor<string, []>("op_294_pad_type_0"), val = tensor<string, []>("custom")];
|
192 |
+
tensor<int32, [4]> var_294_pad_0 = const()[name = tensor<string, []>("op_294_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
193 |
+
tensor<fp16, [1, 4096, 1, 64]> var_294_cast_fp16 = conv(dilations = var_292, groups = var_246, pad = var_294_pad_0, pad_type = var_294_pad_type_0, strides = var_290, weight = blocks_1_attn_v_proj_weight_palettized_cast_fp16, x = x_19_cast_fp16)[name = tensor<string, []>("op_294_cast_fp16")];
|
194 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_1_attn_v_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_1_attn_v_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303693568)))];
|
195 |
+
tensor<fp16, [1, 4096, 1, 64]> v_7_cast_fp16 = mul(x = var_294_cast_fp16, y = blocks_1_attn_v_proj_output_scales_to_fp16)[name = tensor<string, []>("v_7_cast_fp16")];
|
196 |
+
tensor<int32, [4]> var_296 = const()[name = tensor<string, []>("op_296"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
197 |
+
tensor<fp16, [1, 32, 128, 64]> q_9_cast_fp16 = reshape(shape = var_296, x = q_7_cast_fp16)[name = tensor<string, []>("q_9_cast_fp16")];
|
198 |
+
tensor<int32, [4]> var_298 = const()[name = tensor<string, []>("op_298"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
199 |
+
tensor<fp16, [1, 32, 128, 64]> k_11_cast_fp16 = reshape(shape = var_298, x = k_9_cast_fp16)[name = tensor<string, []>("k_11_cast_fp16")];
|
200 |
+
tensor<int32, [4]> var_300 = const()[name = tensor<string, []>("op_300"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
201 |
+
tensor<fp16, [1, 32, 128, 64]> new_v_cache_1 = reshape(shape = var_300, x = v_7_cast_fp16)[name = tensor<string, []>("v_9_cast_fp16")];
|
202 |
+
tensor<int32, [4]> var_312_begin_0 = const()[name = tensor<string, []>("op_312_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
203 |
+
tensor<int32, [4]> var_312_end_0 = const()[name = tensor<string, []>("op_312_end_0"), val = tensor<int32, [4]>([1, 32, 64, 64])];
|
204 |
+
tensor<bool, [4]> var_312_end_mask_0 = const()[name = tensor<string, []>("op_312_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
|
205 |
+
tensor<fp16, [1, 32, 64, 64]> var_312_cast_fp16 = slice_by_index(begin = var_312_begin_0, end = var_312_end_0, end_mask = var_312_end_mask_0, x = q_9_cast_fp16)[name = tensor<string, []>("op_312_cast_fp16")];
|
206 |
+
tensor<int32, [4]> var_318_begin_0 = const()[name = tensor<string, []>("op_318_begin_0"), val = tensor<int32, [4]>([0, 0, 64, 0])];
|
207 |
+
tensor<int32, [4]> var_318_end_0 = const()[name = tensor<string, []>("op_318_end_0"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
208 |
+
tensor<bool, [4]> var_318_end_mask_0 = const()[name = tensor<string, []>("op_318_end_mask_0"), val = tensor<bool, [4]>([true, true, true, true])];
|
209 |
+
tensor<fp16, [1, 32, 64, 64]> var_318_cast_fp16 = slice_by_index(begin = var_318_begin_0, end = var_318_end_0, end_mask = var_318_end_mask_0, x = q_9_cast_fp16)[name = tensor<string, []>("op_318_cast_fp16")];
|
210 |
+
tensor<fp16, []> const_10_promoted_to_fp16 = const()[name = tensor<string, []>("const_10_promoted_to_fp16"), val = tensor<fp16, []>(-0x1p+0)];
|
211 |
+
tensor<fp16, [1, 32, 64, 64]> var_320_cast_fp16 = mul(x = var_318_cast_fp16, y = const_10_promoted_to_fp16)[name = tensor<string, []>("op_320_cast_fp16")];
|
212 |
+
tensor<bool, []> rotated_5_interleave_0 = const()[name = tensor<string, []>("rotated_5_interleave_0"), val = tensor<bool, []>(false)];
|
213 |
+
tensor<fp16, [1, 32, 128, 64]> rotated_5_cast_fp16 = concat(axis = var_237, interleave = rotated_5_interleave_0, values = (var_320_cast_fp16, var_312_cast_fp16))[name = tensor<string, []>("rotated_5_cast_fp16")];
|
214 |
+
tensor<fp16, [1, 32, 128, 64]> var_323_cast_fp16 = mul(x = q_9_cast_fp16, y = cos)[name = tensor<string, []>("op_323_cast_fp16")];
|
215 |
+
tensor<fp16, [1, 32, 128, 64]> var_324_cast_fp16 = mul(x = rotated_5_cast_fp16, y = sin)[name = tensor<string, []>("op_324_cast_fp16")];
|
216 |
+
tensor<fp16, [1, 32, 128, 64]> roped_5_cast_fp16 = add(x = var_323_cast_fp16, y = var_324_cast_fp16)[name = tensor<string, []>("roped_5_cast_fp16")];
|
217 |
+
tensor<int32, [4]> var_337_begin_0 = const()[name = tensor<string, []>("op_337_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
218 |
+
tensor<int32, [4]> var_337_end_0 = const()[name = tensor<string, []>("op_337_end_0"), val = tensor<int32, [4]>([1, 32, 64, 64])];
|
219 |
+
tensor<bool, [4]> var_337_end_mask_0 = const()[name = tensor<string, []>("op_337_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
|
220 |
+
tensor<fp16, [1, 32, 64, 64]> var_337_cast_fp16 = slice_by_index(begin = var_337_begin_0, end = var_337_end_0, end_mask = var_337_end_mask_0, x = k_11_cast_fp16)[name = tensor<string, []>("op_337_cast_fp16")];
|
221 |
+
tensor<int32, [4]> var_343_begin_0 = const()[name = tensor<string, []>("op_343_begin_0"), val = tensor<int32, [4]>([0, 0, 64, 0])];
|
222 |
+
tensor<int32, [4]> var_343_end_0 = const()[name = tensor<string, []>("op_343_end_0"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
223 |
+
tensor<bool, [4]> var_343_end_mask_0 = const()[name = tensor<string, []>("op_343_end_mask_0"), val = tensor<bool, [4]>([true, true, true, true])];
|
224 |
+
tensor<fp16, [1, 32, 64, 64]> var_343_cast_fp16 = slice_by_index(begin = var_343_begin_0, end = var_343_end_0, end_mask = var_343_end_mask_0, x = k_11_cast_fp16)[name = tensor<string, []>("op_343_cast_fp16")];
|
225 |
+
tensor<fp16, []> const_12_promoted_to_fp16 = const()[name = tensor<string, []>("const_12_promoted_to_fp16"), val = tensor<fp16, []>(-0x1p+0)];
|
226 |
+
tensor<fp16, [1, 32, 64, 64]> var_345_cast_fp16 = mul(x = var_343_cast_fp16, y = const_12_promoted_to_fp16)[name = tensor<string, []>("op_345_cast_fp16")];
|
227 |
+
tensor<bool, []> rotated_7_interleave_0 = const()[name = tensor<string, []>("rotated_7_interleave_0"), val = tensor<bool, []>(false)];
|
228 |
+
tensor<fp16, [1, 32, 128, 64]> rotated_7_cast_fp16 = concat(axis = var_237, interleave = rotated_7_interleave_0, values = (var_345_cast_fp16, var_337_cast_fp16))[name = tensor<string, []>("rotated_7_cast_fp16")];
|
229 |
+
tensor<fp16, [1, 32, 128, 64]> var_348_cast_fp16 = mul(x = k_11_cast_fp16, y = cos)[name = tensor<string, []>("op_348_cast_fp16")];
|
230 |
+
tensor<fp16, [1, 32, 128, 64]> var_349_cast_fp16 = mul(x = rotated_7_cast_fp16, y = sin)[name = tensor<string, []>("op_349_cast_fp16")];
|
231 |
+
tensor<fp16, [1, 32, 128, 64]> roped_7_cast_fp16 = add(x = var_348_cast_fp16, y = var_349_cast_fp16)[name = tensor<string, []>("roped_7_cast_fp16")];
|
232 |
+
tensor<bool, []> q_11_interleave_0 = const()[name = tensor<string, []>("q_11_interleave_0"), val = tensor<bool, []>(false)];
|
233 |
+
tensor<fp16, [1, 32, 128, 64]> q_11_cast_fp16 = concat(axis = var_237, interleave = q_11_interleave_0, values = roped_5_cast_fp16)[name = tensor<string, []>("q_11_cast_fp16")];
|
234 |
+
tensor<bool, []> k_13_interleave_0 = const()[name = tensor<string, []>("k_13_interleave_0"), val = tensor<bool, []>(false)];
|
235 |
+
tensor<fp16, [1, 32, 128, 64]> new_k_cache_1 = concat(axis = var_237, interleave = k_13_interleave_0, values = roped_7_cast_fp16)[name = tensor<string, []>("k_13_cast_fp16")];
|
236 |
+
tensor<bool, []> k_15_interleave_0 = const()[name = tensor<string, []>("k_15_interleave_0"), val = tensor<bool, []>(false)];
|
237 |
+
tensor<fp16, [1, 32, 128, 512]> k_15_cast_fp16 = concat(axis = var_239, interleave = k_15_interleave_0, values = (k_cache_1, new_k_cache_1))[name = tensor<string, []>("k_15_cast_fp16")];
|
238 |
+
tensor<bool, []> v_11_interleave_0 = const()[name = tensor<string, []>("v_11_interleave_0"), val = tensor<bool, []>(false)];
|
239 |
+
tensor<fp16, [1, 32, 128, 512]> v_11_cast_fp16 = concat(axis = var_239, interleave = v_11_interleave_0, values = (v_cache_1, new_v_cache_1))[name = tensor<string, []>("v_11_cast_fp16")];
|
240 |
+
tensor<fp16, []> var_371_to_fp16 = const()[name = tensor<string, []>("op_371_to_fp16"), val = tensor<fp16, []>(0x1.6ap-4)];
|
241 |
+
tensor<fp16, [1, 32, 128, 64]> var_372_cast_fp16 = mul(x = q_11_cast_fp16, y = var_371_to_fp16)[name = tensor<string, []>("op_372_cast_fp16")];
|
242 |
+
tensor<bool, []> attn_weights_5_transpose_x_0 = const()[name = tensor<string, []>("attn_weights_5_transpose_x_0"), val = tensor<bool, []>(true)];
|
243 |
+
tensor<bool, []> attn_weights_5_transpose_y_0 = const()[name = tensor<string, []>("attn_weights_5_transpose_y_0"), val = tensor<bool, []>(false)];
|
244 |
+
tensor<fp16, [1, 32, 64, 512]> attn_weights_5_cast_fp16 = matmul(transpose_x = attn_weights_5_transpose_x_0, transpose_y = attn_weights_5_transpose_y_0, x = var_372_cast_fp16, y = k_15_cast_fp16)[name = tensor<string, []>("attn_weights_5_cast_fp16")];
|
245 |
+
tensor<fp16, [1, 32, 64, 512]> attn_weights_7_cast_fp16 = add(x = attn_weights_5_cast_fp16, y = mask)[name = tensor<string, []>("attn_weights_7_cast_fp16")];
|
246 |
+
tensor<fp16, [1, 32, 64, 512]> var_380_cast_fp16 = softmax(axis = var_232, x = attn_weights_7_cast_fp16)[name = tensor<string, []>("op_380_cast_fp16")];
|
247 |
+
tensor<bool, []> attn_3_transpose_x_0 = const()[name = tensor<string, []>("attn_3_transpose_x_0"), val = tensor<bool, []>(false)];
|
248 |
+
tensor<bool, []> attn_3_transpose_y_0 = const()[name = tensor<string, []>("attn_3_transpose_y_0"), val = tensor<bool, []>(true)];
|
249 |
+
tensor<fp16, [1, 32, 128, 64]> attn_3_cast_fp16 = matmul(transpose_x = attn_3_transpose_x_0, transpose_y = attn_3_transpose_y_0, x = v_11_cast_fp16, y = var_380_cast_fp16)[name = tensor<string, []>("attn_3_cast_fp16")];
|
250 |
+
tensor<int32, [4]> var_384 = const()[name = tensor<string, []>("op_384"), val = tensor<int32, [4]>([1, 4096, 1, -1])];
|
251 |
+
tensor<fp16, [1, 4096, 1, 64]> input_9_cast_fp16 = reshape(shape = var_384, x = attn_3_cast_fp16)[name = tensor<string, []>("input_9_cast_fp16")];
|
252 |
+
tensor<int32, [2]> var_388 = const()[name = tensor<string, []>("op_388"), val = tensor<int32, [2]>([1, 1])];
|
253 |
+
tensor<int32, [2]> var_390 = const()[name = tensor<string, []>("op_390"), val = tensor<int32, [2]>([1, 1])];
|
254 |
+
tensor<string, []> var_392_pad_type_0 = const()[name = tensor<string, []>("op_392_pad_type_0"), val = tensor<string, []>("custom")];
|
255 |
+
tensor<int32, [4]> var_392_pad_0 = const()[name = tensor<string, []>("op_392_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
256 |
+
tensor<fp16, [1, 4096, 1, 64]> var_392_cast_fp16 = conv(dilations = var_390, groups = var_246, pad = var_392_pad_0, pad_type = var_392_pad_type_0, strides = var_388, weight = blocks_1_attn_proj_weight_palettized_cast_fp16, x = input_9_cast_fp16)[name = tensor<string, []>("op_392_cast_fp16")];
|
257 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_1_attn_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_1_attn_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303701824)))];
|
258 |
+
tensor<fp16, [1, 4096, 1, 64]> attention_output_3_cast_fp16 = mul(x = var_392_cast_fp16, y = blocks_1_attn_proj_output_scales_to_fp16)[name = tensor<string, []>("attention_output_3_cast_fp16")];
|
259 |
+
tensor<fp16, [1, 4096, 1, 64]> x_25_cast_fp16 = add(x = attention_output_3_cast_fp16, y = x_15_cast_fp16)[name = tensor<string, []>("x_25_cast_fp16")];
|
260 |
+
tensor<fp16, [1, 4096, 1, 64]> var_401_cast_fp16 = mul(x = x_25_cast_fp16, y = x_25_cast_fp16)[name = tensor<string, []>("op_401_cast_fp16")];
|
261 |
+
tensor<int32, [1]> var_402 = const()[name = tensor<string, []>("op_402"), val = tensor<int32, [1]>([1])];
|
262 |
+
tensor<fp16, [1, 1, 1, 64]> norm_x_7_cast_fp16 = reduce_mean(axes = var_402, keep_dims = var_247, x = var_401_cast_fp16)[name = tensor<string, []>("norm_x_7_cast_fp16")];
|
263 |
+
tensor<fp16, []> var_404_to_fp16 = const()[name = tensor<string, []>("op_404_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
|
264 |
+
tensor<fp16, [1, 1, 1, 64]> var_405_cast_fp16 = add(x = norm_x_7_cast_fp16, y = var_404_to_fp16)[name = tensor<string, []>("op_405_cast_fp16")];
|
265 |
+
tensor<fp16, []> var_406_epsilon_0_to_fp16 = const()[name = tensor<string, []>("op_406_epsilon_0_to_fp16"), val = tensor<fp16, []>(0x1p-24)];
|
266 |
+
tensor<fp16, [1, 1, 1, 64]> var_406_cast_fp16 = rsqrt(epsilon = var_406_epsilon_0_to_fp16, x = var_405_cast_fp16)[name = tensor<string, []>("op_406_cast_fp16")];
|
267 |
+
tensor<fp16, [1, 4096, 1, 64]> x_normed_13_cast_fp16 = mul(x = x_25_cast_fp16, y = var_406_cast_fp16)[name = tensor<string, []>("x_normed_13_cast_fp16")];
|
268 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_1_norm_2_weight_to_fp16 = const()[name = tensor<string, []>("blocks_1_norm_2_weight_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303710080)))];
|
269 |
+
tensor<fp16, [1, 4096, 1, 64]> input_11_cast_fp16 = mul(x = x_normed_13_cast_fp16, y = blocks_1_norm_2_weight_to_fp16)[name = tensor<string, []>("input_11_cast_fp16")];
|
270 |
+
tensor<int32, [2]> var_418 = const()[name = tensor<string, []>("op_418"), val = tensor<int32, [2]>([1, 1])];
|
271 |
+
tensor<int32, [2]> var_420 = const()[name = tensor<string, []>("op_420"), val = tensor<int32, [2]>([1, 1])];
|
272 |
+
tensor<string, []> var_422_pad_type_0 = const()[name = tensor<string, []>("op_422_pad_type_0"), val = tensor<string, []>("custom")];
|
273 |
+
tensor<int32, [4]> var_422_pad_0 = const()[name = tensor<string, []>("op_422_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
274 |
+
tensor<fp16, [1, 11008, 1, 64]> var_422_cast_fp16 = conv(dilations = var_420, groups = var_246, pad = var_422_pad_0, pad_type = var_422_pad_type_0, strides = var_418, weight = blocks_1_mlp_fc_1_weight_palettized_cast_fp16, x = input_11_cast_fp16)[name = tensor<string, []>("op_422_cast_fp16")];
|
275 |
+
tensor<fp16, [1, 11008, 1, 1]> blocks_1_mlp_fc_1_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_1_mlp_fc_1_output_scales_to_fp16"), val = tensor<fp16, [1, 11008, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303718336)))];
|
276 |
+
tensor<fp16, [1, 11008, 1, 64]> input_13_cast_fp16 = mul(x = var_422_cast_fp16, y = blocks_1_mlp_fc_1_output_scales_to_fp16)[name = tensor<string, []>("input_13_cast_fp16")];
|
277 |
+
tensor<int32, [2]> var_426 = const()[name = tensor<string, []>("op_426"), val = tensor<int32, [2]>([1, 1])];
|
278 |
+
tensor<int32, [2]> var_428 = const()[name = tensor<string, []>("op_428"), val = tensor<int32, [2]>([1, 1])];
|
279 |
+
tensor<string, []> var_430_pad_type_0 = const()[name = tensor<string, []>("op_430_pad_type_0"), val = tensor<string, []>("custom")];
|
280 |
+
tensor<int32, [4]> var_430_pad_0 = const()[name = tensor<string, []>("op_430_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
281 |
+
tensor<fp16, [1, 11008, 1, 64]> var_430_cast_fp16 = conv(dilations = var_428, groups = var_246, pad = var_430_pad_0, pad_type = var_430_pad_type_0, strides = var_426, weight = blocks_1_mlp_fc_2_weight_palettized_cast_fp16, x = input_11_cast_fp16)[name = tensor<string, []>("op_430_cast_fp16")];
|
282 |
+
tensor<fp16, [1, 11008, 1, 1]> blocks_1_mlp_fc_2_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_1_mlp_fc_2_output_scales_to_fp16"), val = tensor<fp16, [1, 11008, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303740416)))];
|
283 |
+
tensor<fp16, [1, 11008, 1, 64]> x_fc_2_3_cast_fp16 = mul(x = var_430_cast_fp16, y = blocks_1_mlp_fc_2_output_scales_to_fp16)[name = tensor<string, []>("x_fc_2_3_cast_fp16")];
|
284 |
+
tensor<fp16, [1, 11008, 1, 64]> var_432_cast_fp16 = silu(x = input_13_cast_fp16)[name = tensor<string, []>("op_432_cast_fp16")];
|
285 |
+
tensor<fp16, [1, 11008, 1, 64]> input_15_cast_fp16 = mul(x = var_432_cast_fp16, y = x_fc_2_3_cast_fp16)[name = tensor<string, []>("input_15_cast_fp16")];
|
286 |
+
tensor<int32, [2]> var_436 = const()[name = tensor<string, []>("op_436"), val = tensor<int32, [2]>([1, 1])];
|
287 |
+
tensor<int32, [2]> var_438 = const()[name = tensor<string, []>("op_438"), val = tensor<int32, [2]>([1, 1])];
|
288 |
+
tensor<string, []> var_440_pad_type_0 = const()[name = tensor<string, []>("op_440_pad_type_0"), val = tensor<string, []>("custom")];
|
289 |
+
tensor<int32, [4]> var_440_pad_0 = const()[name = tensor<string, []>("op_440_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
290 |
+
tensor<fp16, [1, 4096, 1, 64]> var_440_cast_fp16 = conv(dilations = var_438, groups = var_246, pad = var_440_pad_0, pad_type = var_440_pad_type_0, strides = var_436, weight = blocks_1_mlp_proj_weight_palettized_cast_fp16, x = input_15_cast_fp16)[name = tensor<string, []>("op_440_cast_fp16")];
|
291 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_1_mlp_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_1_mlp_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303762496)))];
|
292 |
+
tensor<fp16, [1, 4096, 1, 64]> var_441_cast_fp16 = mul(x = var_440_cast_fp16, y = blocks_1_mlp_proj_output_scales_to_fp16)[name = tensor<string, []>("op_441_cast_fp16")];
|
293 |
+
tensor<fp16, [1, 4096, 1, 64]> x_29_cast_fp16 = add(x = var_441_cast_fp16, y = x_25_cast_fp16)[name = tensor<string, []>("x_29_cast_fp16")];
|
294 |
+
tensor<int32, []> var_448 = const()[name = tensor<string, []>("op_448"), val = tensor<int32, []>(3)];
|
295 |
+
tensor<int32, []> var_453 = const()[name = tensor<string, []>("op_453"), val = tensor<int32, []>(-2)];
|
296 |
+
tensor<int32, []> var_455 = const()[name = tensor<string, []>("op_455"), val = tensor<int32, []>(-1)];
|
297 |
+
tensor<int32, []> var_462 = const()[name = tensor<string, []>("op_462"), val = tensor<int32, []>(1)];
|
298 |
+
tensor<bool, []> var_463 = const()[name = tensor<string, []>("op_463"), val = tensor<bool, []>(true)];
|
299 |
+
tensor<fp16, [1, 4096, 1, 64]> var_470_cast_fp16 = mul(x = x_29_cast_fp16, y = x_29_cast_fp16)[name = tensor<string, []>("op_470_cast_fp16")];
|
300 |
+
tensor<int32, [1]> var_471 = const()[name = tensor<string, []>("op_471"), val = tensor<int32, [1]>([1])];
|
301 |
+
tensor<fp16, [1, 1, 1, 64]> norm_x_9_cast_fp16 = reduce_mean(axes = var_471, keep_dims = var_463, x = var_470_cast_fp16)[name = tensor<string, []>("norm_x_9_cast_fp16")];
|
302 |
+
tensor<fp16, []> var_473_to_fp16 = const()[name = tensor<string, []>("op_473_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
|
303 |
+
tensor<fp16, [1, 1, 1, 64]> var_474_cast_fp16 = add(x = norm_x_9_cast_fp16, y = var_473_to_fp16)[name = tensor<string, []>("op_474_cast_fp16")];
|
304 |
+
tensor<fp16, []> var_475_epsilon_0_to_fp16 = const()[name = tensor<string, []>("op_475_epsilon_0_to_fp16"), val = tensor<fp16, []>(0x1p-24)];
|
305 |
+
tensor<fp16, [1, 1, 1, 64]> var_475_cast_fp16 = rsqrt(epsilon = var_475_epsilon_0_to_fp16, x = var_474_cast_fp16)[name = tensor<string, []>("op_475_cast_fp16")];
|
306 |
+
tensor<fp16, [1, 4096, 1, 64]> x_normed_17_cast_fp16 = mul(x = x_29_cast_fp16, y = var_475_cast_fp16)[name = tensor<string, []>("x_normed_17_cast_fp16")];
|
307 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_2_norm_1_weight_to_fp16 = const()[name = tensor<string, []>("blocks_2_norm_1_weight_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303770752)))];
|
308 |
+
tensor<fp16, [1, 4096, 1, 64]> x_33_cast_fp16 = mul(x = x_normed_17_cast_fp16, y = blocks_2_norm_1_weight_to_fp16)[name = tensor<string, []>("x_33_cast_fp16")];
|
309 |
+
tensor<int32, [2]> var_490 = const()[name = tensor<string, []>("op_490"), val = tensor<int32, [2]>([1, 1])];
|
310 |
+
tensor<int32, [2]> var_492 = const()[name = tensor<string, []>("op_492"), val = tensor<int32, [2]>([1, 1])];
|
311 |
+
tensor<string, []> var_494_pad_type_0 = const()[name = tensor<string, []>("op_494_pad_type_0"), val = tensor<string, []>("custom")];
|
312 |
+
tensor<int32, [4]> var_494_pad_0 = const()[name = tensor<string, []>("op_494_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
313 |
+
tensor<fp16, [1, 4096, 1, 64]> var_494_cast_fp16 = conv(dilations = var_492, groups = var_462, pad = var_494_pad_0, pad_type = var_494_pad_type_0, strides = var_490, weight = blocks_2_attn_q_proj_weight_palettized_cast_fp16, x = x_33_cast_fp16)[name = tensor<string, []>("op_494_cast_fp16")];
|
314 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_2_attn_q_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_2_attn_q_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303779008)))];
|
315 |
+
tensor<fp16, [1, 4096, 1, 64]> q_13_cast_fp16 = mul(x = var_494_cast_fp16, y = blocks_2_attn_q_proj_output_scales_to_fp16)[name = tensor<string, []>("q_13_cast_fp16")];
|
316 |
+
tensor<int32, [2]> var_498 = const()[name = tensor<string, []>("op_498"), val = tensor<int32, [2]>([1, 1])];
|
317 |
+
tensor<int32, [2]> var_500 = const()[name = tensor<string, []>("op_500"), val = tensor<int32, [2]>([1, 1])];
|
318 |
+
tensor<string, []> var_502_pad_type_0 = const()[name = tensor<string, []>("op_502_pad_type_0"), val = tensor<string, []>("custom")];
|
319 |
+
tensor<int32, [4]> var_502_pad_0 = const()[name = tensor<string, []>("op_502_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
320 |
+
tensor<fp16, [1, 4096, 1, 64]> var_502_cast_fp16 = conv(dilations = var_500, groups = var_462, pad = var_502_pad_0, pad_type = var_502_pad_type_0, strides = var_498, weight = blocks_2_attn_k_proj_weight_palettized_cast_fp16, x = x_33_cast_fp16)[name = tensor<string, []>("op_502_cast_fp16")];
|
321 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_2_attn_k_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_2_attn_k_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303787264)))];
|
322 |
+
tensor<fp16, [1, 4096, 1, 64]> k_17_cast_fp16 = mul(x = var_502_cast_fp16, y = blocks_2_attn_k_proj_output_scales_to_fp16)[name = tensor<string, []>("k_17_cast_fp16")];
|
323 |
+
tensor<int32, [2]> var_506 = const()[name = tensor<string, []>("op_506"), val = tensor<int32, [2]>([1, 1])];
|
324 |
+
tensor<int32, [2]> var_508 = const()[name = tensor<string, []>("op_508"), val = tensor<int32, [2]>([1, 1])];
|
325 |
+
tensor<string, []> var_510_pad_type_0 = const()[name = tensor<string, []>("op_510_pad_type_0"), val = tensor<string, []>("custom")];
|
326 |
+
tensor<int32, [4]> var_510_pad_0 = const()[name = tensor<string, []>("op_510_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
327 |
+
tensor<fp16, [1, 4096, 1, 64]> var_510_cast_fp16 = conv(dilations = var_508, groups = var_462, pad = var_510_pad_0, pad_type = var_510_pad_type_0, strides = var_506, weight = blocks_2_attn_v_proj_weight_palettized_cast_fp16, x = x_33_cast_fp16)[name = tensor<string, []>("op_510_cast_fp16")];
|
328 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_2_attn_v_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_2_attn_v_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303795520)))];
|
329 |
+
tensor<fp16, [1, 4096, 1, 64]> v_13_cast_fp16 = mul(x = var_510_cast_fp16, y = blocks_2_attn_v_proj_output_scales_to_fp16)[name = tensor<string, []>("v_13_cast_fp16")];
|
330 |
+
tensor<int32, [4]> var_512 = const()[name = tensor<string, []>("op_512"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
331 |
+
tensor<fp16, [1, 32, 128, 64]> q_15_cast_fp16 = reshape(shape = var_512, x = q_13_cast_fp16)[name = tensor<string, []>("q_15_cast_fp16")];
|
332 |
+
tensor<int32, [4]> var_514 = const()[name = tensor<string, []>("op_514"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
333 |
+
tensor<fp16, [1, 32, 128, 64]> k_19_cast_fp16 = reshape(shape = var_514, x = k_17_cast_fp16)[name = tensor<string, []>("k_19_cast_fp16")];
|
334 |
+
tensor<int32, [4]> var_516 = const()[name = tensor<string, []>("op_516"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
335 |
+
tensor<fp16, [1, 32, 128, 64]> new_v_cache_2 = reshape(shape = var_516, x = v_13_cast_fp16)[name = tensor<string, []>("v_15_cast_fp16")];
|
336 |
+
tensor<int32, [4]> var_528_begin_0 = const()[name = tensor<string, []>("op_528_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
337 |
+
tensor<int32, [4]> var_528_end_0 = const()[name = tensor<string, []>("op_528_end_0"), val = tensor<int32, [4]>([1, 32, 64, 64])];
|
338 |
+
tensor<bool, [4]> var_528_end_mask_0 = const()[name = tensor<string, []>("op_528_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
|
339 |
+
tensor<fp16, [1, 32, 64, 64]> var_528_cast_fp16 = slice_by_index(begin = var_528_begin_0, end = var_528_end_0, end_mask = var_528_end_mask_0, x = q_15_cast_fp16)[name = tensor<string, []>("op_528_cast_fp16")];
|
340 |
+
tensor<int32, [4]> var_534_begin_0 = const()[name = tensor<string, []>("op_534_begin_0"), val = tensor<int32, [4]>([0, 0, 64, 0])];
|
341 |
+
tensor<int32, [4]> var_534_end_0 = const()[name = tensor<string, []>("op_534_end_0"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
342 |
+
tensor<bool, [4]> var_534_end_mask_0 = const()[name = tensor<string, []>("op_534_end_mask_0"), val = tensor<bool, [4]>([true, true, true, true])];
|
343 |
+
tensor<fp16, [1, 32, 64, 64]> var_534_cast_fp16 = slice_by_index(begin = var_534_begin_0, end = var_534_end_0, end_mask = var_534_end_mask_0, x = q_15_cast_fp16)[name = tensor<string, []>("op_534_cast_fp16")];
|
344 |
+
tensor<fp16, []> const_17_promoted_to_fp16 = const()[name = tensor<string, []>("const_17_promoted_to_fp16"), val = tensor<fp16, []>(-0x1p+0)];
|
345 |
+
tensor<fp16, [1, 32, 64, 64]> var_536_cast_fp16 = mul(x = var_534_cast_fp16, y = const_17_promoted_to_fp16)[name = tensor<string, []>("op_536_cast_fp16")];
|
346 |
+
tensor<bool, []> rotated_9_interleave_0 = const()[name = tensor<string, []>("rotated_9_interleave_0"), val = tensor<bool, []>(false)];
|
347 |
+
tensor<fp16, [1, 32, 128, 64]> rotated_9_cast_fp16 = concat(axis = var_453, interleave = rotated_9_interleave_0, values = (var_536_cast_fp16, var_528_cast_fp16))[name = tensor<string, []>("rotated_9_cast_fp16")];
|
348 |
+
tensor<fp16, [1, 32, 128, 64]> var_539_cast_fp16 = mul(x = q_15_cast_fp16, y = cos)[name = tensor<string, []>("op_539_cast_fp16")];
|
349 |
+
tensor<fp16, [1, 32, 128, 64]> var_540_cast_fp16 = mul(x = rotated_9_cast_fp16, y = sin)[name = tensor<string, []>("op_540_cast_fp16")];
|
350 |
+
tensor<fp16, [1, 32, 128, 64]> roped_9_cast_fp16 = add(x = var_539_cast_fp16, y = var_540_cast_fp16)[name = tensor<string, []>("roped_9_cast_fp16")];
|
351 |
+
tensor<int32, [4]> var_553_begin_0 = const()[name = tensor<string, []>("op_553_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
352 |
+
tensor<int32, [4]> var_553_end_0 = const()[name = tensor<string, []>("op_553_end_0"), val = tensor<int32, [4]>([1, 32, 64, 64])];
|
353 |
+
tensor<bool, [4]> var_553_end_mask_0 = const()[name = tensor<string, []>("op_553_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
|
354 |
+
tensor<fp16, [1, 32, 64, 64]> var_553_cast_fp16 = slice_by_index(begin = var_553_begin_0, end = var_553_end_0, end_mask = var_553_end_mask_0, x = k_19_cast_fp16)[name = tensor<string, []>("op_553_cast_fp16")];
|
355 |
+
tensor<int32, [4]> var_559_begin_0 = const()[name = tensor<string, []>("op_559_begin_0"), val = tensor<int32, [4]>([0, 0, 64, 0])];
|
356 |
+
tensor<int32, [4]> var_559_end_0 = const()[name = tensor<string, []>("op_559_end_0"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
357 |
+
tensor<bool, [4]> var_559_end_mask_0 = const()[name = tensor<string, []>("op_559_end_mask_0"), val = tensor<bool, [4]>([true, true, true, true])];
|
358 |
+
tensor<fp16, [1, 32, 64, 64]> var_559_cast_fp16 = slice_by_index(begin = var_559_begin_0, end = var_559_end_0, end_mask = var_559_end_mask_0, x = k_19_cast_fp16)[name = tensor<string, []>("op_559_cast_fp16")];
|
359 |
+
tensor<fp16, []> const_19_promoted_to_fp16 = const()[name = tensor<string, []>("const_19_promoted_to_fp16"), val = tensor<fp16, []>(-0x1p+0)];
|
360 |
+
tensor<fp16, [1, 32, 64, 64]> var_561_cast_fp16 = mul(x = var_559_cast_fp16, y = const_19_promoted_to_fp16)[name = tensor<string, []>("op_561_cast_fp16")];
|
361 |
+
tensor<bool, []> rotated_interleave_0 = const()[name = tensor<string, []>("rotated_interleave_0"), val = tensor<bool, []>(false)];
|
362 |
+
tensor<fp16, [1, 32, 128, 64]> rotated_cast_fp16 = concat(axis = var_453, interleave = rotated_interleave_0, values = (var_561_cast_fp16, var_553_cast_fp16))[name = tensor<string, []>("rotated_cast_fp16")];
|
363 |
+
tensor<fp16, [1, 32, 128, 64]> var_564_cast_fp16 = mul(x = k_19_cast_fp16, y = cos)[name = tensor<string, []>("op_564_cast_fp16")];
|
364 |
+
tensor<fp16, [1, 32, 128, 64]> var_565_cast_fp16 = mul(x = rotated_cast_fp16, y = sin)[name = tensor<string, []>("op_565_cast_fp16")];
|
365 |
+
tensor<fp16, [1, 32, 128, 64]> roped_cast_fp16 = add(x = var_564_cast_fp16, y = var_565_cast_fp16)[name = tensor<string, []>("roped_cast_fp16")];
|
366 |
+
tensor<bool, []> q_interleave_0 = const()[name = tensor<string, []>("q_interleave_0"), val = tensor<bool, []>(false)];
|
367 |
+
tensor<fp16, [1, 32, 128, 64]> q_cast_fp16 = concat(axis = var_453, interleave = q_interleave_0, values = roped_9_cast_fp16)[name = tensor<string, []>("q_cast_fp16")];
|
368 |
+
tensor<bool, []> k_21_interleave_0 = const()[name = tensor<string, []>("k_21_interleave_0"), val = tensor<bool, []>(false)];
|
369 |
+
tensor<fp16, [1, 32, 128, 64]> new_k_cache_2 = concat(axis = var_453, interleave = k_21_interleave_0, values = roped_cast_fp16)[name = tensor<string, []>("k_21_cast_fp16")];
|
370 |
+
tensor<bool, []> k_interleave_0 = const()[name = tensor<string, []>("k_interleave_0"), val = tensor<bool, []>(false)];
|
371 |
+
tensor<fp16, [1, 32, 128, 512]> k_cast_fp16 = concat(axis = var_455, interleave = k_interleave_0, values = (k_cache_2, new_k_cache_2))[name = tensor<string, []>("k_cast_fp16")];
|
372 |
+
tensor<bool, []> v_interleave_0 = const()[name = tensor<string, []>("v_interleave_0"), val = tensor<bool, []>(false)];
|
373 |
+
tensor<fp16, [1, 32, 128, 512]> v_cast_fp16 = concat(axis = var_455, interleave = v_interleave_0, values = (v_cache_2, new_v_cache_2))[name = tensor<string, []>("v_cast_fp16")];
|
374 |
+
tensor<fp16, []> var_587_to_fp16 = const()[name = tensor<string, []>("op_587_to_fp16"), val = tensor<fp16, []>(0x1.6ap-4)];
|
375 |
+
tensor<fp16, [1, 32, 128, 64]> var_588_cast_fp16 = mul(x = q_cast_fp16, y = var_587_to_fp16)[name = tensor<string, []>("op_588_cast_fp16")];
|
376 |
+
tensor<bool, []> attn_weights_9_transpose_x_0 = const()[name = tensor<string, []>("attn_weights_9_transpose_x_0"), val = tensor<bool, []>(true)];
|
377 |
+
tensor<bool, []> attn_weights_9_transpose_y_0 = const()[name = tensor<string, []>("attn_weights_9_transpose_y_0"), val = tensor<bool, []>(false)];
|
378 |
+
tensor<fp16, [1, 32, 64, 512]> attn_weights_9_cast_fp16 = matmul(transpose_x = attn_weights_9_transpose_x_0, transpose_y = attn_weights_9_transpose_y_0, x = var_588_cast_fp16, y = k_cast_fp16)[name = tensor<string, []>("attn_weights_9_cast_fp16")];
|
379 |
+
tensor<fp16, [1, 32, 64, 512]> attn_weights_cast_fp16 = add(x = attn_weights_9_cast_fp16, y = mask)[name = tensor<string, []>("attn_weights_cast_fp16")];
|
380 |
+
tensor<fp16, [1, 32, 64, 512]> var_596_cast_fp16 = softmax(axis = var_448, x = attn_weights_cast_fp16)[name = tensor<string, []>("op_596_cast_fp16")];
|
381 |
+
tensor<bool, []> attn_5_transpose_x_0 = const()[name = tensor<string, []>("attn_5_transpose_x_0"), val = tensor<bool, []>(false)];
|
382 |
+
tensor<bool, []> attn_5_transpose_y_0 = const()[name = tensor<string, []>("attn_5_transpose_y_0"), val = tensor<bool, []>(true)];
|
383 |
+
tensor<fp16, [1, 32, 128, 64]> attn_5_cast_fp16 = matmul(transpose_x = attn_5_transpose_x_0, transpose_y = attn_5_transpose_y_0, x = v_cast_fp16, y = var_596_cast_fp16)[name = tensor<string, []>("attn_5_cast_fp16")];
|
384 |
+
tensor<int32, [4]> var_600 = const()[name = tensor<string, []>("op_600"), val = tensor<int32, [4]>([1, 4096, 1, -1])];
|
385 |
+
tensor<fp16, [1, 4096, 1, 64]> input_17_cast_fp16 = reshape(shape = var_600, x = attn_5_cast_fp16)[name = tensor<string, []>("input_17_cast_fp16")];
|
386 |
+
tensor<int32, [2]> var_604 = const()[name = tensor<string, []>("op_604"), val = tensor<int32, [2]>([1, 1])];
|
387 |
+
tensor<int32, [2]> var_606 = const()[name = tensor<string, []>("op_606"), val = tensor<int32, [2]>([1, 1])];
|
388 |
+
tensor<string, []> var_608_pad_type_0 = const()[name = tensor<string, []>("op_608_pad_type_0"), val = tensor<string, []>("custom")];
|
389 |
+
tensor<int32, [4]> var_608_pad_0 = const()[name = tensor<string, []>("op_608_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
390 |
+
tensor<fp16, [1, 4096, 1, 64]> var_608_cast_fp16 = conv(dilations = var_606, groups = var_462, pad = var_608_pad_0, pad_type = var_608_pad_type_0, strides = var_604, weight = blocks_2_attn_proj_weight_palettized_cast_fp16, x = input_17_cast_fp16)[name = tensor<string, []>("op_608_cast_fp16")];
|
391 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_2_attn_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_2_attn_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303803776)))];
|
392 |
+
tensor<fp16, [1, 4096, 1, 64]> attention_output_cast_fp16 = mul(x = var_608_cast_fp16, y = blocks_2_attn_proj_output_scales_to_fp16)[name = tensor<string, []>("attention_output_cast_fp16")];
|
393 |
+
tensor<fp16, [1, 4096, 1, 64]> x_39_cast_fp16 = add(x = attention_output_cast_fp16, y = x_29_cast_fp16)[name = tensor<string, []>("x_39_cast_fp16")];
|
394 |
+
tensor<fp16, [1, 4096, 1, 64]> var_617_cast_fp16 = mul(x = x_39_cast_fp16, y = x_39_cast_fp16)[name = tensor<string, []>("op_617_cast_fp16")];
|
395 |
+
tensor<int32, [1]> var_618 = const()[name = tensor<string, []>("op_618"), val = tensor<int32, [1]>([1])];
|
396 |
+
tensor<fp16, [1, 1, 1, 64]> norm_x_cast_fp16 = reduce_mean(axes = var_618, keep_dims = var_463, x = var_617_cast_fp16)[name = tensor<string, []>("norm_x_cast_fp16")];
|
397 |
+
tensor<fp16, []> var_620_to_fp16 = const()[name = tensor<string, []>("op_620_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
|
398 |
+
tensor<fp16, [1, 1, 1, 64]> var_621_cast_fp16 = add(x = norm_x_cast_fp16, y = var_620_to_fp16)[name = tensor<string, []>("op_621_cast_fp16")];
|
399 |
+
tensor<fp16, []> var_622_epsilon_0_to_fp16 = const()[name = tensor<string, []>("op_622_epsilon_0_to_fp16"), val = tensor<fp16, []>(0x1p-24)];
|
400 |
+
tensor<fp16, [1, 1, 1, 64]> var_622_cast_fp16 = rsqrt(epsilon = var_622_epsilon_0_to_fp16, x = var_621_cast_fp16)[name = tensor<string, []>("op_622_cast_fp16")];
|
401 |
+
tensor<fp16, [1, 4096, 1, 64]> x_normed_21_cast_fp16 = mul(x = x_39_cast_fp16, y = var_622_cast_fp16)[name = tensor<string, []>("x_normed_21_cast_fp16")];
|
402 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_2_norm_2_weight_to_fp16 = const()[name = tensor<string, []>("blocks_2_norm_2_weight_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303812032)))];
|
403 |
+
tensor<fp16, [1, 4096, 1, 64]> input_19_cast_fp16 = mul(x = x_normed_21_cast_fp16, y = blocks_2_norm_2_weight_to_fp16)[name = tensor<string, []>("input_19_cast_fp16")];
|
404 |
+
tensor<int32, [2]> var_634 = const()[name = tensor<string, []>("op_634"), val = tensor<int32, [2]>([1, 1])];
|
405 |
+
tensor<int32, [2]> var_636 = const()[name = tensor<string, []>("op_636"), val = tensor<int32, [2]>([1, 1])];
|
406 |
+
tensor<string, []> var_638_pad_type_0 = const()[name = tensor<string, []>("op_638_pad_type_0"), val = tensor<string, []>("custom")];
|
407 |
+
tensor<int32, [4]> var_638_pad_0 = const()[name = tensor<string, []>("op_638_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
408 |
+
tensor<fp16, [1, 11008, 1, 64]> var_638_cast_fp16 = conv(dilations = var_636, groups = var_462, pad = var_638_pad_0, pad_type = var_638_pad_type_0, strides = var_634, weight = blocks_2_mlp_fc_1_weight_palettized_cast_fp16, x = input_19_cast_fp16)[name = tensor<string, []>("op_638_cast_fp16")];
|
409 |
+
tensor<fp16, [1, 11008, 1, 1]> blocks_2_mlp_fc_1_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_2_mlp_fc_1_output_scales_to_fp16"), val = tensor<fp16, [1, 11008, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303820288)))];
|
410 |
+
tensor<fp16, [1, 11008, 1, 64]> input_21_cast_fp16 = mul(x = var_638_cast_fp16, y = blocks_2_mlp_fc_1_output_scales_to_fp16)[name = tensor<string, []>("input_21_cast_fp16")];
|
411 |
+
tensor<int32, [2]> var_642 = const()[name = tensor<string, []>("op_642"), val = tensor<int32, [2]>([1, 1])];
|
412 |
+
tensor<int32, [2]> var_644 = const()[name = tensor<string, []>("op_644"), val = tensor<int32, [2]>([1, 1])];
|
413 |
+
tensor<string, []> var_646_pad_type_0 = const()[name = tensor<string, []>("op_646_pad_type_0"), val = tensor<string, []>("custom")];
|
414 |
+
tensor<int32, [4]> var_646_pad_0 = const()[name = tensor<string, []>("op_646_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
415 |
+
tensor<fp16, [1, 11008, 1, 64]> var_646_cast_fp16 = conv(dilations = var_644, groups = var_462, pad = var_646_pad_0, pad_type = var_646_pad_type_0, strides = var_642, weight = blocks_2_mlp_fc_2_weight_palettized_cast_fp16, x = input_19_cast_fp16)[name = tensor<string, []>("op_646_cast_fp16")];
|
416 |
+
tensor<fp16, [1, 11008, 1, 1]> blocks_2_mlp_fc_2_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_2_mlp_fc_2_output_scales_to_fp16"), val = tensor<fp16, [1, 11008, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303842368)))];
|
417 |
+
tensor<fp16, [1, 11008, 1, 64]> x_fc_2_cast_fp16 = mul(x = var_646_cast_fp16, y = blocks_2_mlp_fc_2_output_scales_to_fp16)[name = tensor<string, []>("x_fc_2_cast_fp16")];
|
418 |
+
tensor<fp16, [1, 11008, 1, 64]> var_648_cast_fp16 = silu(x = input_21_cast_fp16)[name = tensor<string, []>("op_648_cast_fp16")];
|
419 |
+
tensor<fp16, [1, 11008, 1, 64]> input_cast_fp16 = mul(x = var_648_cast_fp16, y = x_fc_2_cast_fp16)[name = tensor<string, []>("input_cast_fp16")];
|
420 |
+
tensor<int32, [2]> var_652 = const()[name = tensor<string, []>("op_652"), val = tensor<int32, [2]>([1, 1])];
|
421 |
+
tensor<int32, [2]> var_654 = const()[name = tensor<string, []>("op_654"), val = tensor<int32, [2]>([1, 1])];
|
422 |
+
tensor<string, []> var_656_pad_type_0 = const()[name = tensor<string, []>("op_656_pad_type_0"), val = tensor<string, []>("custom")];
|
423 |
+
tensor<int32, [4]> var_656_pad_0 = const()[name = tensor<string, []>("op_656_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
424 |
+
tensor<fp16, [1, 4096, 1, 64]> var_656_cast_fp16 = conv(dilations = var_654, groups = var_462, pad = var_656_pad_0, pad_type = var_656_pad_type_0, strides = var_652, weight = blocks_2_mlp_proj_weight_palettized_cast_fp16, x = input_cast_fp16)[name = tensor<string, []>("op_656_cast_fp16")];
|
425 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_2_mlp_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_2_mlp_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303864448)))];
|
426 |
+
tensor<fp16, [1, 4096, 1, 64]> var_657_cast_fp16 = mul(x = var_656_cast_fp16, y = blocks_2_mlp_proj_output_scales_to_fp16)[name = tensor<string, []>("op_657_cast_fp16")];
|
427 |
+
tensor<fp16, [1, 4096, 1, 64]> new_x = add(x = var_657_cast_fp16, y = x_39_cast_fp16)[name = tensor<string, []>("op_658_cast_fp16")];
|
428 |
+
} -> (new_x, new_k_cache_0, new_k_cache_1, new_k_cache_2, new_v_cache_0, new_v_cache_1, new_v_cache_2);
|
429 |
+
}
|
Llama-2-7b-hf_chunk10.mlmodelc/weights/weight.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:86d4446017950797cf7896941f17c78be0e7c925911e4555f70b1133d20f77b9
|
3 |
+
size 303872704
|
Llama-2-7b-hf_chunk11.mlmodelc/analytics/coremldata.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3412284b024b899a736cd77112d4b1a4a5faa19d954259e925ef429f58bd886b
|
3 |
+
size 243
|
Llama-2-7b-hf_chunk11.mlmodelc/coremldata.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:589729b2995d8ca8246bbb5d92b910207bab816ad67282b0a285bcd2de77f80e
|
3 |
+
size 791
|
Llama-2-7b-hf_chunk11.mlmodelc/metadata.json
ADDED
@@ -0,0 +1,218 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[
|
2 |
+
{
|
3 |
+
"metadataOutputVersion" : "3.0",
|
4 |
+
"storagePrecision" : "Mixed (Float16, Palettized (4 bits))",
|
5 |
+
"outputSchema" : [
|
6 |
+
{
|
7 |
+
"hasShapeFlexibility" : "0",
|
8 |
+
"isOptional" : "0",
|
9 |
+
"dataType" : "Float16",
|
10 |
+
"formattedType" : "MultiArray (Float16 1 × 4096 × 1 × 64)",
|
11 |
+
"shortDescription" : "",
|
12 |
+
"shape" : "[1, 4096, 1, 64]",
|
13 |
+
"name" : "new_x",
|
14 |
+
"type" : "MultiArray"
|
15 |
+
},
|
16 |
+
{
|
17 |
+
"hasShapeFlexibility" : "0",
|
18 |
+
"isOptional" : "0",
|
19 |
+
"dataType" : "Float16",
|
20 |
+
"formattedType" : "MultiArray (Float16 1 × 32 × 128 × 64)",
|
21 |
+
"shortDescription" : "",
|
22 |
+
"shape" : "[1, 32, 128, 64]",
|
23 |
+
"name" : "new_k_cache_0",
|
24 |
+
"type" : "MultiArray"
|
25 |
+
},
|
26 |
+
{
|
27 |
+
"hasShapeFlexibility" : "0",
|
28 |
+
"isOptional" : "0",
|
29 |
+
"dataType" : "Float16",
|
30 |
+
"formattedType" : "MultiArray (Float16 1 × 32 × 128 × 64)",
|
31 |
+
"shortDescription" : "",
|
32 |
+
"shape" : "[1, 32, 128, 64]",
|
33 |
+
"name" : "new_k_cache_1",
|
34 |
+
"type" : "MultiArray"
|
35 |
+
},
|
36 |
+
{
|
37 |
+
"hasShapeFlexibility" : "0",
|
38 |
+
"isOptional" : "0",
|
39 |
+
"dataType" : "Float16",
|
40 |
+
"formattedType" : "MultiArray (Float16 1 × 32 × 128 × 64)",
|
41 |
+
"shortDescription" : "",
|
42 |
+
"shape" : "[1, 32, 128, 64]",
|
43 |
+
"name" : "new_k_cache_2",
|
44 |
+
"type" : "MultiArray"
|
45 |
+
},
|
46 |
+
{
|
47 |
+
"hasShapeFlexibility" : "0",
|
48 |
+
"isOptional" : "0",
|
49 |
+
"dataType" : "Float16",
|
50 |
+
"formattedType" : "MultiArray (Float16 1 × 32 × 128 × 64)",
|
51 |
+
"shortDescription" : "",
|
52 |
+
"shape" : "[1, 32, 128, 64]",
|
53 |
+
"name" : "new_v_cache_0",
|
54 |
+
"type" : "MultiArray"
|
55 |
+
},
|
56 |
+
{
|
57 |
+
"hasShapeFlexibility" : "0",
|
58 |
+
"isOptional" : "0",
|
59 |
+
"dataType" : "Float16",
|
60 |
+
"formattedType" : "MultiArray (Float16 1 × 32 × 128 × 64)",
|
61 |
+
"shortDescription" : "",
|
62 |
+
"shape" : "[1, 32, 128, 64]",
|
63 |
+
"name" : "new_v_cache_1",
|
64 |
+
"type" : "MultiArray"
|
65 |
+
},
|
66 |
+
{
|
67 |
+
"hasShapeFlexibility" : "0",
|
68 |
+
"isOptional" : "0",
|
69 |
+
"dataType" : "Float16",
|
70 |
+
"formattedType" : "MultiArray (Float16 1 × 32 × 128 × 64)",
|
71 |
+
"shortDescription" : "",
|
72 |
+
"shape" : "[1, 32, 128, 64]",
|
73 |
+
"name" : "new_v_cache_2",
|
74 |
+
"type" : "MultiArray"
|
75 |
+
}
|
76 |
+
],
|
77 |
+
"modelParameters" : [
|
78 |
+
|
79 |
+
],
|
80 |
+
"specificationVersion" : 7,
|
81 |
+
"mlProgramOperationTypeHistogram" : {
|
82 |
+
"Concat" : 18,
|
83 |
+
"Ios16.rsqrt" : 6,
|
84 |
+
"Ios16.mul" : 63,
|
85 |
+
"SliceByIndex" : 12,
|
86 |
+
"Ios16.constexprLutToDense" : 21,
|
87 |
+
"Ios16.conv" : 21,
|
88 |
+
"Ios16.add" : 21,
|
89 |
+
"Ios16.reduceMean" : 6,
|
90 |
+
"Ios16.matmul" : 6,
|
91 |
+
"Ios16.softmax" : 3,
|
92 |
+
"Ios16.reshape" : 12,
|
93 |
+
"Ios16.silu" : 3
|
94 |
+
},
|
95 |
+
"computePrecision" : "Mixed (Float16, Int32)",
|
96 |
+
"isUpdatable" : "0",
|
97 |
+
"availability" : {
|
98 |
+
"macOS" : "13.0",
|
99 |
+
"tvOS" : "16.0",
|
100 |
+
"visionOS" : "1.0",
|
101 |
+
"watchOS" : "9.0",
|
102 |
+
"iOS" : "16.0",
|
103 |
+
"macCatalyst" : "16.0"
|
104 |
+
},
|
105 |
+
"modelType" : {
|
106 |
+
"name" : "MLModelType_mlProgram"
|
107 |
+
},
|
108 |
+
"userDefinedMetadata" : {
|
109 |
+
"com.github.apple.coremltools.source_dialect" : "TorchScript",
|
110 |
+
"com.github.apple.coremltools.source" : "torch==2.1.0",
|
111 |
+
"com.github.apple.coremltools.version" : "7.2"
|
112 |
+
},
|
113 |
+
"inputSchema" : [
|
114 |
+
{
|
115 |
+
"hasShapeFlexibility" : "0",
|
116 |
+
"isOptional" : "0",
|
117 |
+
"dataType" : "Float16",
|
118 |
+
"formattedType" : "MultiArray (Float16 1 × 4096 × 1 × 64)",
|
119 |
+
"shortDescription" : "",
|
120 |
+
"shape" : "[1, 4096, 1, 64]",
|
121 |
+
"name" : "x",
|
122 |
+
"type" : "MultiArray"
|
123 |
+
},
|
124 |
+
{
|
125 |
+
"hasShapeFlexibility" : "0",
|
126 |
+
"isOptional" : "0",
|
127 |
+
"dataType" : "Float16",
|
128 |
+
"formattedType" : "MultiArray (Float16 128 × 64)",
|
129 |
+
"shortDescription" : "",
|
130 |
+
"shape" : "[128, 64]",
|
131 |
+
"name" : "cos",
|
132 |
+
"type" : "MultiArray"
|
133 |
+
},
|
134 |
+
{
|
135 |
+
"hasShapeFlexibility" : "0",
|
136 |
+
"isOptional" : "0",
|
137 |
+
"dataType" : "Float16",
|
138 |
+
"formattedType" : "MultiArray (Float16 128 × 64)",
|
139 |
+
"shortDescription" : "",
|
140 |
+
"shape" : "[128, 64]",
|
141 |
+
"name" : "sin",
|
142 |
+
"type" : "MultiArray"
|
143 |
+
},
|
144 |
+
{
|
145 |
+
"hasShapeFlexibility" : "0",
|
146 |
+
"isOptional" : "0",
|
147 |
+
"dataType" : "Float16",
|
148 |
+
"formattedType" : "MultiArray (Float16 1 × 1 × 64 × 512)",
|
149 |
+
"shortDescription" : "",
|
150 |
+
"shape" : "[1, 1, 64, 512]",
|
151 |
+
"name" : "mask",
|
152 |
+
"type" : "MultiArray"
|
153 |
+
},
|
154 |
+
{
|
155 |
+
"hasShapeFlexibility" : "0",
|
156 |
+
"isOptional" : "1",
|
157 |
+
"dataType" : "Float16",
|
158 |
+
"formattedType" : "MultiArray (Float16 1 × 32 × 128 × 448)?",
|
159 |
+
"shortDescription" : "",
|
160 |
+
"shape" : "[1, 32, 128, 448]",
|
161 |
+
"name" : "k_cache_0",
|
162 |
+
"type" : "MultiArray"
|
163 |
+
},
|
164 |
+
{
|
165 |
+
"hasShapeFlexibility" : "0",
|
166 |
+
"isOptional" : "1",
|
167 |
+
"dataType" : "Float16",
|
168 |
+
"formattedType" : "MultiArray (Float16 1 × 32 × 128 × 448)?",
|
169 |
+
"shortDescription" : "",
|
170 |
+
"shape" : "[1, 32, 128, 448]",
|
171 |
+
"name" : "v_cache_0",
|
172 |
+
"type" : "MultiArray"
|
173 |
+
},
|
174 |
+
{
|
175 |
+
"hasShapeFlexibility" : "0",
|
176 |
+
"isOptional" : "1",
|
177 |
+
"dataType" : "Float16",
|
178 |
+
"formattedType" : "MultiArray (Float16 1 × 32 × 128 × 448)?",
|
179 |
+
"shortDescription" : "",
|
180 |
+
"shape" : "[1, 32, 128, 448]",
|
181 |
+
"name" : "k_cache_1",
|
182 |
+
"type" : "MultiArray"
|
183 |
+
},
|
184 |
+
{
|
185 |
+
"hasShapeFlexibility" : "0",
|
186 |
+
"isOptional" : "1",
|
187 |
+
"dataType" : "Float16",
|
188 |
+
"formattedType" : "MultiArray (Float16 1 × 32 × 128 × 448)?",
|
189 |
+
"shortDescription" : "",
|
190 |
+
"shape" : "[1, 32, 128, 448]",
|
191 |
+
"name" : "v_cache_1",
|
192 |
+
"type" : "MultiArray"
|
193 |
+
},
|
194 |
+
{
|
195 |
+
"hasShapeFlexibility" : "0",
|
196 |
+
"isOptional" : "1",
|
197 |
+
"dataType" : "Float16",
|
198 |
+
"formattedType" : "MultiArray (Float16 1 × 32 × 128 × 448)?",
|
199 |
+
"shortDescription" : "",
|
200 |
+
"shape" : "[1, 32, 128, 448]",
|
201 |
+
"name" : "k_cache_2",
|
202 |
+
"type" : "MultiArray"
|
203 |
+
},
|
204 |
+
{
|
205 |
+
"hasShapeFlexibility" : "0",
|
206 |
+
"isOptional" : "1",
|
207 |
+
"dataType" : "Float16",
|
208 |
+
"formattedType" : "MultiArray (Float16 1 × 32 × 128 × 448)?",
|
209 |
+
"shortDescription" : "",
|
210 |
+
"shape" : "[1, 32, 128, 448]",
|
211 |
+
"name" : "v_cache_2",
|
212 |
+
"type" : "MultiArray"
|
213 |
+
}
|
214 |
+
],
|
215 |
+
"generatedClassName" : "Llama_2_7b_hf_2024_05_25_14_03_55_chunk11",
|
216 |
+
"method" : "predict"
|
217 |
+
}
|
218 |
+
]
|
Llama-2-7b-hf_chunk11.mlmodelc/model.mil
ADDED
@@ -0,0 +1,429 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
program(1.0)
|
2 |
+
[buildInfo = dict<tensor<string, []>, tensor<string, []>>({{"coremlc-component-MIL", "5.33.5"}, {"coremlc-version", "1877.40.3"}, {"coremltools-component-torch", "2.1.0"}, {"coremltools-source-dialect", "TorchScript"}, {"coremltools-version", "7.2"}})]
|
3 |
+
{
|
4 |
+
func main<ios16>(tensor<fp16, [128, 64]> cos, tensor<fp16, [1, 32, 128, 448]> k_cache_0, tensor<fp16, [1, 32, 128, 448]> k_cache_1, tensor<fp16, [1, 32, 128, 448]> k_cache_2, tensor<fp16, [1, 1, 64, 512]> mask, tensor<fp16, [128, 64]> sin, tensor<fp16, [1, 32, 128, 448]> v_cache_0, tensor<fp16, [1, 32, 128, 448]> v_cache_1, tensor<fp16, [1, 32, 128, 448]> v_cache_2, tensor<fp16, [1, 4096, 1, 64]> x) [CoreML_InputDefaultValues = dict<tensor<string, []>, tensor<fp32, []>>({{"k_cache_0", 0}, {"k_cache_1", 0}, {"k_cache_2", 0}, {"v_cache_0", 0}, {"v_cache_1", 0}, {"v_cache_2", 0}})] {
|
5 |
+
tensor<fp16, [4096, 4096, 1, 1]> blocks_0_attn_q_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [8388608]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(64))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(8388736))), name = tensor<string, []>("blocks_0_attn_q_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 4096, 1, 1])];
|
6 |
+
tensor<fp16, [4096, 4096, 1, 1]> blocks_0_attn_k_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [8388608]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(8388864))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(16777536))), name = tensor<string, []>("blocks_0_attn_k_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 4096, 1, 1])];
|
7 |
+
tensor<fp16, [4096, 4096, 1, 1]> blocks_0_attn_v_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [8388608]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(16777664))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(25166336))), name = tensor<string, []>("blocks_0_attn_v_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 4096, 1, 1])];
|
8 |
+
tensor<fp16, [4096, 4096, 1, 1]> blocks_0_attn_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [8388608]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(25166464))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(33555136))), name = tensor<string, []>("blocks_0_attn_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 4096, 1, 1])];
|
9 |
+
tensor<fp16, [11008, 4096, 1, 1]> blocks_0_mlp_fc_1_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [22544384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(33555264))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(56099712))), name = tensor<string, []>("blocks_0_mlp_fc_1_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([11008, 4096, 1, 1])];
|
10 |
+
tensor<fp16, [11008, 4096, 1, 1]> blocks_0_mlp_fc_2_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [22544384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(56099840))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(78644288))), name = tensor<string, []>("blocks_0_mlp_fc_2_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([11008, 4096, 1, 1])];
|
11 |
+
tensor<fp16, [4096, 11008, 1, 1]> blocks_0_mlp_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [22544384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(78644416))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(101188864))), name = tensor<string, []>("blocks_0_mlp_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 11008, 1, 1])];
|
12 |
+
tensor<fp16, [4096, 4096, 1, 1]> blocks_1_attn_q_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [8388608]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(101188992))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(109577664))), name = tensor<string, []>("blocks_1_attn_q_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 4096, 1, 1])];
|
13 |
+
tensor<fp16, [4096, 4096, 1, 1]> blocks_1_attn_k_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [8388608]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(109577792))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(117966464))), name = tensor<string, []>("blocks_1_attn_k_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 4096, 1, 1])];
|
14 |
+
tensor<fp16, [4096, 4096, 1, 1]> blocks_1_attn_v_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [8388608]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(117966592))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(126355264))), name = tensor<string, []>("blocks_1_attn_v_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 4096, 1, 1])];
|
15 |
+
tensor<fp16, [4096, 4096, 1, 1]> blocks_1_attn_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [8388608]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(126355392))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(134744064))), name = tensor<string, []>("blocks_1_attn_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 4096, 1, 1])];
|
16 |
+
tensor<fp16, [11008, 4096, 1, 1]> blocks_1_mlp_fc_1_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [22544384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(134744192))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(157288640))), name = tensor<string, []>("blocks_1_mlp_fc_1_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([11008, 4096, 1, 1])];
|
17 |
+
tensor<fp16, [11008, 4096, 1, 1]> blocks_1_mlp_fc_2_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [22544384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(157288768))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(179833216))), name = tensor<string, []>("blocks_1_mlp_fc_2_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([11008, 4096, 1, 1])];
|
18 |
+
tensor<fp16, [4096, 11008, 1, 1]> blocks_1_mlp_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [22544384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(179833344))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(202377792))), name = tensor<string, []>("blocks_1_mlp_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 11008, 1, 1])];
|
19 |
+
tensor<fp16, [4096, 4096, 1, 1]> blocks_2_attn_q_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [8388608]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(202377920))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(210766592))), name = tensor<string, []>("blocks_2_attn_q_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 4096, 1, 1])];
|
20 |
+
tensor<fp16, [4096, 4096, 1, 1]> blocks_2_attn_k_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [8388608]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(210766720))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(219155392))), name = tensor<string, []>("blocks_2_attn_k_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 4096, 1, 1])];
|
21 |
+
tensor<fp16, [4096, 4096, 1, 1]> blocks_2_attn_v_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [8388608]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(219155520))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(227544192))), name = tensor<string, []>("blocks_2_attn_v_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 4096, 1, 1])];
|
22 |
+
tensor<fp16, [4096, 4096, 1, 1]> blocks_2_attn_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [8388608]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(227544320))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(235932992))), name = tensor<string, []>("blocks_2_attn_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 4096, 1, 1])];
|
23 |
+
tensor<fp16, [11008, 4096, 1, 1]> blocks_2_mlp_fc_1_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [22544384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(235933120))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(258477568))), name = tensor<string, []>("blocks_2_mlp_fc_1_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([11008, 4096, 1, 1])];
|
24 |
+
tensor<fp16, [11008, 4096, 1, 1]> blocks_2_mlp_fc_2_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [22544384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(258477696))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(281022144))), name = tensor<string, []>("blocks_2_mlp_fc_2_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([11008, 4096, 1, 1])];
|
25 |
+
tensor<fp16, [4096, 11008, 1, 1]> blocks_2_mlp_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [22544384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(281022272))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303566720))), name = tensor<string, []>("blocks_2_mlp_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 11008, 1, 1])];
|
26 |
+
tensor<int32, []> var_18 = const()[name = tensor<string, []>("op_18"), val = tensor<int32, []>(3)];
|
27 |
+
tensor<int32, []> var_23 = const()[name = tensor<string, []>("op_23"), val = tensor<int32, []>(-2)];
|
28 |
+
tensor<int32, []> var_25 = const()[name = tensor<string, []>("op_25"), val = tensor<int32, []>(-1)];
|
29 |
+
tensor<int32, []> var_32 = const()[name = tensor<string, []>("op_32"), val = tensor<int32, []>(1)];
|
30 |
+
tensor<bool, []> var_33 = const()[name = tensor<string, []>("op_33"), val = tensor<bool, []>(true)];
|
31 |
+
tensor<fp16, [1, 4096, 1, 64]> var_41_cast_fp16 = mul(x = x, y = x)[name = tensor<string, []>("op_41_cast_fp16")];
|
32 |
+
tensor<int32, [1]> var_42 = const()[name = tensor<string, []>("op_42"), val = tensor<int32, [1]>([1])];
|
33 |
+
tensor<fp16, [1, 1, 1, 64]> norm_x_1_cast_fp16 = reduce_mean(axes = var_42, keep_dims = var_33, x = var_41_cast_fp16)[name = tensor<string, []>("norm_x_1_cast_fp16")];
|
34 |
+
tensor<fp16, []> var_44_to_fp16 = const()[name = tensor<string, []>("op_44_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
|
35 |
+
tensor<fp16, [1, 1, 1, 64]> var_45_cast_fp16 = add(x = norm_x_1_cast_fp16, y = var_44_to_fp16)[name = tensor<string, []>("op_45_cast_fp16")];
|
36 |
+
tensor<fp16, []> var_46_epsilon_0_to_fp16 = const()[name = tensor<string, []>("op_46_epsilon_0_to_fp16"), val = tensor<fp16, []>(0x1p-24)];
|
37 |
+
tensor<fp16, [1, 1, 1, 64]> var_46_cast_fp16 = rsqrt(epsilon = var_46_epsilon_0_to_fp16, x = var_45_cast_fp16)[name = tensor<string, []>("op_46_cast_fp16")];
|
38 |
+
tensor<fp16, [1, 4096, 1, 64]> x_normed_1_cast_fp16 = mul(x = x, y = var_46_cast_fp16)[name = tensor<string, []>("x_normed_1_cast_fp16")];
|
39 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_0_norm_1_weight_to_fp16 = const()[name = tensor<string, []>("blocks_0_norm_1_weight_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303566848)))];
|
40 |
+
tensor<fp16, [1, 4096, 1, 64]> x_5_cast_fp16 = mul(x = x_normed_1_cast_fp16, y = blocks_0_norm_1_weight_to_fp16)[name = tensor<string, []>("x_5_cast_fp16")];
|
41 |
+
tensor<int32, [2]> var_58 = const()[name = tensor<string, []>("op_58"), val = tensor<int32, [2]>([1, 1])];
|
42 |
+
tensor<int32, [2]> var_60 = const()[name = tensor<string, []>("op_60"), val = tensor<int32, [2]>([1, 1])];
|
43 |
+
tensor<string, []> var_62_pad_type_0 = const()[name = tensor<string, []>("op_62_pad_type_0"), val = tensor<string, []>("custom")];
|
44 |
+
tensor<int32, [4]> var_62_pad_0 = const()[name = tensor<string, []>("op_62_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
45 |
+
tensor<fp16, [1, 4096, 1, 64]> var_62_cast_fp16 = conv(dilations = var_60, groups = var_32, pad = var_62_pad_0, pad_type = var_62_pad_type_0, strides = var_58, weight = blocks_0_attn_q_proj_weight_palettized_cast_fp16, x = x_5_cast_fp16)[name = tensor<string, []>("op_62_cast_fp16")];
|
46 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_0_attn_q_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_0_attn_q_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303575104)))];
|
47 |
+
tensor<fp16, [1, 4096, 1, 64]> q_1_cast_fp16 = mul(x = var_62_cast_fp16, y = blocks_0_attn_q_proj_output_scales_to_fp16)[name = tensor<string, []>("q_1_cast_fp16")];
|
48 |
+
tensor<int32, [2]> var_66 = const()[name = tensor<string, []>("op_66"), val = tensor<int32, [2]>([1, 1])];
|
49 |
+
tensor<int32, [2]> var_68 = const()[name = tensor<string, []>("op_68"), val = tensor<int32, [2]>([1, 1])];
|
50 |
+
tensor<string, []> var_70_pad_type_0 = const()[name = tensor<string, []>("op_70_pad_type_0"), val = tensor<string, []>("custom")];
|
51 |
+
tensor<int32, [4]> var_70_pad_0 = const()[name = tensor<string, []>("op_70_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
52 |
+
tensor<fp16, [1, 4096, 1, 64]> var_70_cast_fp16 = conv(dilations = var_68, groups = var_32, pad = var_70_pad_0, pad_type = var_70_pad_type_0, strides = var_66, weight = blocks_0_attn_k_proj_weight_palettized_cast_fp16, x = x_5_cast_fp16)[name = tensor<string, []>("op_70_cast_fp16")];
|
53 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_0_attn_k_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_0_attn_k_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303583360)))];
|
54 |
+
tensor<fp16, [1, 4096, 1, 64]> k_1_cast_fp16 = mul(x = var_70_cast_fp16, y = blocks_0_attn_k_proj_output_scales_to_fp16)[name = tensor<string, []>("k_1_cast_fp16")];
|
55 |
+
tensor<int32, [2]> var_74 = const()[name = tensor<string, []>("op_74"), val = tensor<int32, [2]>([1, 1])];
|
56 |
+
tensor<int32, [2]> var_76 = const()[name = tensor<string, []>("op_76"), val = tensor<int32, [2]>([1, 1])];
|
57 |
+
tensor<string, []> var_78_pad_type_0 = const()[name = tensor<string, []>("op_78_pad_type_0"), val = tensor<string, []>("custom")];
|
58 |
+
tensor<int32, [4]> var_78_pad_0 = const()[name = tensor<string, []>("op_78_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
59 |
+
tensor<fp16, [1, 4096, 1, 64]> var_78_cast_fp16 = conv(dilations = var_76, groups = var_32, pad = var_78_pad_0, pad_type = var_78_pad_type_0, strides = var_74, weight = blocks_0_attn_v_proj_weight_palettized_cast_fp16, x = x_5_cast_fp16)[name = tensor<string, []>("op_78_cast_fp16")];
|
60 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_0_attn_v_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_0_attn_v_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303591616)))];
|
61 |
+
tensor<fp16, [1, 4096, 1, 64]> v_1_cast_fp16 = mul(x = var_78_cast_fp16, y = blocks_0_attn_v_proj_output_scales_to_fp16)[name = tensor<string, []>("v_1_cast_fp16")];
|
62 |
+
tensor<int32, [4]> var_80 = const()[name = tensor<string, []>("op_80"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
63 |
+
tensor<fp16, [1, 32, 128, 64]> q_3_cast_fp16 = reshape(shape = var_80, x = q_1_cast_fp16)[name = tensor<string, []>("q_3_cast_fp16")];
|
64 |
+
tensor<int32, [4]> var_82 = const()[name = tensor<string, []>("op_82"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
65 |
+
tensor<fp16, [1, 32, 128, 64]> k_3_cast_fp16 = reshape(shape = var_82, x = k_1_cast_fp16)[name = tensor<string, []>("k_3_cast_fp16")];
|
66 |
+
tensor<int32, [4]> var_84 = const()[name = tensor<string, []>("op_84"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
67 |
+
tensor<fp16, [1, 32, 128, 64]> new_v_cache_0 = reshape(shape = var_84, x = v_1_cast_fp16)[name = tensor<string, []>("v_3_cast_fp16")];
|
68 |
+
tensor<int32, [4]> var_96_begin_0 = const()[name = tensor<string, []>("op_96_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
69 |
+
tensor<int32, [4]> var_96_end_0 = const()[name = tensor<string, []>("op_96_end_0"), val = tensor<int32, [4]>([1, 32, 64, 64])];
|
70 |
+
tensor<bool, [4]> var_96_end_mask_0 = const()[name = tensor<string, []>("op_96_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
|
71 |
+
tensor<fp16, [1, 32, 64, 64]> var_96_cast_fp16 = slice_by_index(begin = var_96_begin_0, end = var_96_end_0, end_mask = var_96_end_mask_0, x = q_3_cast_fp16)[name = tensor<string, []>("op_96_cast_fp16")];
|
72 |
+
tensor<int32, [4]> var_102_begin_0 = const()[name = tensor<string, []>("op_102_begin_0"), val = tensor<int32, [4]>([0, 0, 64, 0])];
|
73 |
+
tensor<int32, [4]> var_102_end_0 = const()[name = tensor<string, []>("op_102_end_0"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
74 |
+
tensor<bool, [4]> var_102_end_mask_0 = const()[name = tensor<string, []>("op_102_end_mask_0"), val = tensor<bool, [4]>([true, true, true, true])];
|
75 |
+
tensor<fp16, [1, 32, 64, 64]> var_102_cast_fp16 = slice_by_index(begin = var_102_begin_0, end = var_102_end_0, end_mask = var_102_end_mask_0, x = q_3_cast_fp16)[name = tensor<string, []>("op_102_cast_fp16")];
|
76 |
+
tensor<fp16, []> const_3_promoted_to_fp16 = const()[name = tensor<string, []>("const_3_promoted_to_fp16"), val = tensor<fp16, []>(-0x1p+0)];
|
77 |
+
tensor<fp16, [1, 32, 64, 64]> var_104_cast_fp16 = mul(x = var_102_cast_fp16, y = const_3_promoted_to_fp16)[name = tensor<string, []>("op_104_cast_fp16")];
|
78 |
+
tensor<bool, []> rotated_1_interleave_0 = const()[name = tensor<string, []>("rotated_1_interleave_0"), val = tensor<bool, []>(false)];
|
79 |
+
tensor<fp16, [1, 32, 128, 64]> rotated_1_cast_fp16 = concat(axis = var_23, interleave = rotated_1_interleave_0, values = (var_104_cast_fp16, var_96_cast_fp16))[name = tensor<string, []>("rotated_1_cast_fp16")];
|
80 |
+
tensor<fp16, [1, 32, 128, 64]> var_107_cast_fp16 = mul(x = q_3_cast_fp16, y = cos)[name = tensor<string, []>("op_107_cast_fp16")];
|
81 |
+
tensor<fp16, [1, 32, 128, 64]> var_108_cast_fp16 = mul(x = rotated_1_cast_fp16, y = sin)[name = tensor<string, []>("op_108_cast_fp16")];
|
82 |
+
tensor<fp16, [1, 32, 128, 64]> roped_1_cast_fp16 = add(x = var_107_cast_fp16, y = var_108_cast_fp16)[name = tensor<string, []>("roped_1_cast_fp16")];
|
83 |
+
tensor<int32, [4]> var_121_begin_0 = const()[name = tensor<string, []>("op_121_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
84 |
+
tensor<int32, [4]> var_121_end_0 = const()[name = tensor<string, []>("op_121_end_0"), val = tensor<int32, [4]>([1, 32, 64, 64])];
|
85 |
+
tensor<bool, [4]> var_121_end_mask_0 = const()[name = tensor<string, []>("op_121_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
|
86 |
+
tensor<fp16, [1, 32, 64, 64]> var_121_cast_fp16 = slice_by_index(begin = var_121_begin_0, end = var_121_end_0, end_mask = var_121_end_mask_0, x = k_3_cast_fp16)[name = tensor<string, []>("op_121_cast_fp16")];
|
87 |
+
tensor<int32, [4]> var_127_begin_0 = const()[name = tensor<string, []>("op_127_begin_0"), val = tensor<int32, [4]>([0, 0, 64, 0])];
|
88 |
+
tensor<int32, [4]> var_127_end_0 = const()[name = tensor<string, []>("op_127_end_0"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
89 |
+
tensor<bool, [4]> var_127_end_mask_0 = const()[name = tensor<string, []>("op_127_end_mask_0"), val = tensor<bool, [4]>([true, true, true, true])];
|
90 |
+
tensor<fp16, [1, 32, 64, 64]> var_127_cast_fp16 = slice_by_index(begin = var_127_begin_0, end = var_127_end_0, end_mask = var_127_end_mask_0, x = k_3_cast_fp16)[name = tensor<string, []>("op_127_cast_fp16")];
|
91 |
+
tensor<fp16, []> const_5_promoted_to_fp16 = const()[name = tensor<string, []>("const_5_promoted_to_fp16"), val = tensor<fp16, []>(-0x1p+0)];
|
92 |
+
tensor<fp16, [1, 32, 64, 64]> var_129_cast_fp16 = mul(x = var_127_cast_fp16, y = const_5_promoted_to_fp16)[name = tensor<string, []>("op_129_cast_fp16")];
|
93 |
+
tensor<bool, []> rotated_3_interleave_0 = const()[name = tensor<string, []>("rotated_3_interleave_0"), val = tensor<bool, []>(false)];
|
94 |
+
tensor<fp16, [1, 32, 128, 64]> rotated_3_cast_fp16 = concat(axis = var_23, interleave = rotated_3_interleave_0, values = (var_129_cast_fp16, var_121_cast_fp16))[name = tensor<string, []>("rotated_3_cast_fp16")];
|
95 |
+
tensor<fp16, [1, 32, 128, 64]> var_132_cast_fp16 = mul(x = k_3_cast_fp16, y = cos)[name = tensor<string, []>("op_132_cast_fp16")];
|
96 |
+
tensor<fp16, [1, 32, 128, 64]> var_133_cast_fp16 = mul(x = rotated_3_cast_fp16, y = sin)[name = tensor<string, []>("op_133_cast_fp16")];
|
97 |
+
tensor<fp16, [1, 32, 128, 64]> roped_3_cast_fp16 = add(x = var_132_cast_fp16, y = var_133_cast_fp16)[name = tensor<string, []>("roped_3_cast_fp16")];
|
98 |
+
tensor<bool, []> q_5_interleave_0 = const()[name = tensor<string, []>("q_5_interleave_0"), val = tensor<bool, []>(false)];
|
99 |
+
tensor<fp16, [1, 32, 128, 64]> q_5_cast_fp16 = concat(axis = var_23, interleave = q_5_interleave_0, values = roped_1_cast_fp16)[name = tensor<string, []>("q_5_cast_fp16")];
|
100 |
+
tensor<bool, []> k_5_interleave_0 = const()[name = tensor<string, []>("k_5_interleave_0"), val = tensor<bool, []>(false)];
|
101 |
+
tensor<fp16, [1, 32, 128, 64]> new_k_cache_0 = concat(axis = var_23, interleave = k_5_interleave_0, values = roped_3_cast_fp16)[name = tensor<string, []>("k_5_cast_fp16")];
|
102 |
+
tensor<bool, []> k_7_interleave_0 = const()[name = tensor<string, []>("k_7_interleave_0"), val = tensor<bool, []>(false)];
|
103 |
+
tensor<fp16, [1, 32, 128, 512]> k_7_cast_fp16 = concat(axis = var_25, interleave = k_7_interleave_0, values = (k_cache_0, new_k_cache_0))[name = tensor<string, []>("k_7_cast_fp16")];
|
104 |
+
tensor<bool, []> v_5_interleave_0 = const()[name = tensor<string, []>("v_5_interleave_0"), val = tensor<bool, []>(false)];
|
105 |
+
tensor<fp16, [1, 32, 128, 512]> v_5_cast_fp16 = concat(axis = var_25, interleave = v_5_interleave_0, values = (v_cache_0, new_v_cache_0))[name = tensor<string, []>("v_5_cast_fp16")];
|
106 |
+
tensor<fp16, []> var_155_to_fp16 = const()[name = tensor<string, []>("op_155_to_fp16"), val = tensor<fp16, []>(0x1.6ap-4)];
|
107 |
+
tensor<fp16, [1, 32, 128, 64]> var_156_cast_fp16 = mul(x = q_5_cast_fp16, y = var_155_to_fp16)[name = tensor<string, []>("op_156_cast_fp16")];
|
108 |
+
tensor<bool, []> attn_weights_1_transpose_x_0 = const()[name = tensor<string, []>("attn_weights_1_transpose_x_0"), val = tensor<bool, []>(true)];
|
109 |
+
tensor<bool, []> attn_weights_1_transpose_y_0 = const()[name = tensor<string, []>("attn_weights_1_transpose_y_0"), val = tensor<bool, []>(false)];
|
110 |
+
tensor<fp16, [1, 32, 64, 512]> attn_weights_1_cast_fp16 = matmul(transpose_x = attn_weights_1_transpose_x_0, transpose_y = attn_weights_1_transpose_y_0, x = var_156_cast_fp16, y = k_7_cast_fp16)[name = tensor<string, []>("attn_weights_1_cast_fp16")];
|
111 |
+
tensor<fp16, [1, 32, 64, 512]> attn_weights_3_cast_fp16 = add(x = attn_weights_1_cast_fp16, y = mask)[name = tensor<string, []>("attn_weights_3_cast_fp16")];
|
112 |
+
tensor<fp16, [1, 32, 64, 512]> var_164_cast_fp16 = softmax(axis = var_18, x = attn_weights_3_cast_fp16)[name = tensor<string, []>("op_164_cast_fp16")];
|
113 |
+
tensor<bool, []> attn_1_transpose_x_0 = const()[name = tensor<string, []>("attn_1_transpose_x_0"), val = tensor<bool, []>(false)];
|
114 |
+
tensor<bool, []> attn_1_transpose_y_0 = const()[name = tensor<string, []>("attn_1_transpose_y_0"), val = tensor<bool, []>(true)];
|
115 |
+
tensor<fp16, [1, 32, 128, 64]> attn_1_cast_fp16 = matmul(transpose_x = attn_1_transpose_x_0, transpose_y = attn_1_transpose_y_0, x = v_5_cast_fp16, y = var_164_cast_fp16)[name = tensor<string, []>("attn_1_cast_fp16")];
|
116 |
+
tensor<int32, [4]> var_168 = const()[name = tensor<string, []>("op_168"), val = tensor<int32, [4]>([1, 4096, 1, -1])];
|
117 |
+
tensor<fp16, [1, 4096, 1, 64]> input_1_cast_fp16 = reshape(shape = var_168, x = attn_1_cast_fp16)[name = tensor<string, []>("input_1_cast_fp16")];
|
118 |
+
tensor<int32, [2]> var_172 = const()[name = tensor<string, []>("op_172"), val = tensor<int32, [2]>([1, 1])];
|
119 |
+
tensor<int32, [2]> var_174 = const()[name = tensor<string, []>("op_174"), val = tensor<int32, [2]>([1, 1])];
|
120 |
+
tensor<string, []> var_176_pad_type_0 = const()[name = tensor<string, []>("op_176_pad_type_0"), val = tensor<string, []>("custom")];
|
121 |
+
tensor<int32, [4]> var_176_pad_0 = const()[name = tensor<string, []>("op_176_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
122 |
+
tensor<fp16, [1, 4096, 1, 64]> var_176_cast_fp16 = conv(dilations = var_174, groups = var_32, pad = var_176_pad_0, pad_type = var_176_pad_type_0, strides = var_172, weight = blocks_0_attn_proj_weight_palettized_cast_fp16, x = input_1_cast_fp16)[name = tensor<string, []>("op_176_cast_fp16")];
|
123 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_0_attn_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_0_attn_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303599872)))];
|
124 |
+
tensor<fp16, [1, 4096, 1, 64]> attention_output_1_cast_fp16 = mul(x = var_176_cast_fp16, y = blocks_0_attn_proj_output_scales_to_fp16)[name = tensor<string, []>("attention_output_1_cast_fp16")];
|
125 |
+
tensor<fp16, [1, 4096, 1, 64]> x_11_cast_fp16 = add(x = attention_output_1_cast_fp16, y = x)[name = tensor<string, []>("x_11_cast_fp16")];
|
126 |
+
tensor<fp16, [1, 4096, 1, 64]> var_185_cast_fp16 = mul(x = x_11_cast_fp16, y = x_11_cast_fp16)[name = tensor<string, []>("op_185_cast_fp16")];
|
127 |
+
tensor<int32, [1]> var_186 = const()[name = tensor<string, []>("op_186"), val = tensor<int32, [1]>([1])];
|
128 |
+
tensor<fp16, [1, 1, 1, 64]> norm_x_3_cast_fp16 = reduce_mean(axes = var_186, keep_dims = var_33, x = var_185_cast_fp16)[name = tensor<string, []>("norm_x_3_cast_fp16")];
|
129 |
+
tensor<fp16, []> var_188_to_fp16 = const()[name = tensor<string, []>("op_188_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
|
130 |
+
tensor<fp16, [1, 1, 1, 64]> var_189_cast_fp16 = add(x = norm_x_3_cast_fp16, y = var_188_to_fp16)[name = tensor<string, []>("op_189_cast_fp16")];
|
131 |
+
tensor<fp16, []> var_190_epsilon_0_to_fp16 = const()[name = tensor<string, []>("op_190_epsilon_0_to_fp16"), val = tensor<fp16, []>(0x1p-24)];
|
132 |
+
tensor<fp16, [1, 1, 1, 64]> var_190_cast_fp16 = rsqrt(epsilon = var_190_epsilon_0_to_fp16, x = var_189_cast_fp16)[name = tensor<string, []>("op_190_cast_fp16")];
|
133 |
+
tensor<fp16, [1, 4096, 1, 64]> x_normed_5_cast_fp16 = mul(x = x_11_cast_fp16, y = var_190_cast_fp16)[name = tensor<string, []>("x_normed_5_cast_fp16")];
|
134 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_0_norm_2_weight_to_fp16 = const()[name = tensor<string, []>("blocks_0_norm_2_weight_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303608128)))];
|
135 |
+
tensor<fp16, [1, 4096, 1, 64]> input_3_cast_fp16 = mul(x = x_normed_5_cast_fp16, y = blocks_0_norm_2_weight_to_fp16)[name = tensor<string, []>("input_3_cast_fp16")];
|
136 |
+
tensor<int32, [2]> var_202 = const()[name = tensor<string, []>("op_202"), val = tensor<int32, [2]>([1, 1])];
|
137 |
+
tensor<int32, [2]> var_204 = const()[name = tensor<string, []>("op_204"), val = tensor<int32, [2]>([1, 1])];
|
138 |
+
tensor<string, []> var_206_pad_type_0 = const()[name = tensor<string, []>("op_206_pad_type_0"), val = tensor<string, []>("custom")];
|
139 |
+
tensor<int32, [4]> var_206_pad_0 = const()[name = tensor<string, []>("op_206_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
140 |
+
tensor<fp16, [1, 11008, 1, 64]> var_206_cast_fp16 = conv(dilations = var_204, groups = var_32, pad = var_206_pad_0, pad_type = var_206_pad_type_0, strides = var_202, weight = blocks_0_mlp_fc_1_weight_palettized_cast_fp16, x = input_3_cast_fp16)[name = tensor<string, []>("op_206_cast_fp16")];
|
141 |
+
tensor<fp16, [1, 11008, 1, 1]> blocks_0_mlp_fc_1_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_0_mlp_fc_1_output_scales_to_fp16"), val = tensor<fp16, [1, 11008, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303616384)))];
|
142 |
+
tensor<fp16, [1, 11008, 1, 64]> input_5_cast_fp16 = mul(x = var_206_cast_fp16, y = blocks_0_mlp_fc_1_output_scales_to_fp16)[name = tensor<string, []>("input_5_cast_fp16")];
|
143 |
+
tensor<int32, [2]> var_210 = const()[name = tensor<string, []>("op_210"), val = tensor<int32, [2]>([1, 1])];
|
144 |
+
tensor<int32, [2]> var_212 = const()[name = tensor<string, []>("op_212"), val = tensor<int32, [2]>([1, 1])];
|
145 |
+
tensor<string, []> var_214_pad_type_0 = const()[name = tensor<string, []>("op_214_pad_type_0"), val = tensor<string, []>("custom")];
|
146 |
+
tensor<int32, [4]> var_214_pad_0 = const()[name = tensor<string, []>("op_214_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
147 |
+
tensor<fp16, [1, 11008, 1, 64]> var_214_cast_fp16 = conv(dilations = var_212, groups = var_32, pad = var_214_pad_0, pad_type = var_214_pad_type_0, strides = var_210, weight = blocks_0_mlp_fc_2_weight_palettized_cast_fp16, x = input_3_cast_fp16)[name = tensor<string, []>("op_214_cast_fp16")];
|
148 |
+
tensor<fp16, [1, 11008, 1, 1]> blocks_0_mlp_fc_2_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_0_mlp_fc_2_output_scales_to_fp16"), val = tensor<fp16, [1, 11008, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303638464)))];
|
149 |
+
tensor<fp16, [1, 11008, 1, 64]> x_fc_2_1_cast_fp16 = mul(x = var_214_cast_fp16, y = blocks_0_mlp_fc_2_output_scales_to_fp16)[name = tensor<string, []>("x_fc_2_1_cast_fp16")];
|
150 |
+
tensor<fp16, [1, 11008, 1, 64]> var_216_cast_fp16 = silu(x = input_5_cast_fp16)[name = tensor<string, []>("op_216_cast_fp16")];
|
151 |
+
tensor<fp16, [1, 11008, 1, 64]> input_7_cast_fp16 = mul(x = var_216_cast_fp16, y = x_fc_2_1_cast_fp16)[name = tensor<string, []>("input_7_cast_fp16")];
|
152 |
+
tensor<int32, [2]> var_220 = const()[name = tensor<string, []>("op_220"), val = tensor<int32, [2]>([1, 1])];
|
153 |
+
tensor<int32, [2]> var_222 = const()[name = tensor<string, []>("op_222"), val = tensor<int32, [2]>([1, 1])];
|
154 |
+
tensor<string, []> var_224_pad_type_0 = const()[name = tensor<string, []>("op_224_pad_type_0"), val = tensor<string, []>("custom")];
|
155 |
+
tensor<int32, [4]> var_224_pad_0 = const()[name = tensor<string, []>("op_224_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
156 |
+
tensor<fp16, [1, 4096, 1, 64]> var_224_cast_fp16 = conv(dilations = var_222, groups = var_32, pad = var_224_pad_0, pad_type = var_224_pad_type_0, strides = var_220, weight = blocks_0_mlp_proj_weight_palettized_cast_fp16, x = input_7_cast_fp16)[name = tensor<string, []>("op_224_cast_fp16")];
|
157 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_0_mlp_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_0_mlp_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303660544)))];
|
158 |
+
tensor<fp16, [1, 4096, 1, 64]> var_225_cast_fp16 = mul(x = var_224_cast_fp16, y = blocks_0_mlp_proj_output_scales_to_fp16)[name = tensor<string, []>("op_225_cast_fp16")];
|
159 |
+
tensor<fp16, [1, 4096, 1, 64]> x_15_cast_fp16 = add(x = var_225_cast_fp16, y = x_11_cast_fp16)[name = tensor<string, []>("x_15_cast_fp16")];
|
160 |
+
tensor<int32, []> var_232 = const()[name = tensor<string, []>("op_232"), val = tensor<int32, []>(3)];
|
161 |
+
tensor<int32, []> var_237 = const()[name = tensor<string, []>("op_237"), val = tensor<int32, []>(-2)];
|
162 |
+
tensor<int32, []> var_239 = const()[name = tensor<string, []>("op_239"), val = tensor<int32, []>(-1)];
|
163 |
+
tensor<int32, []> var_246 = const()[name = tensor<string, []>("op_246"), val = tensor<int32, []>(1)];
|
164 |
+
tensor<bool, []> var_247 = const()[name = tensor<string, []>("op_247"), val = tensor<bool, []>(true)];
|
165 |
+
tensor<fp16, [1, 4096, 1, 64]> var_254_cast_fp16 = mul(x = x_15_cast_fp16, y = x_15_cast_fp16)[name = tensor<string, []>("op_254_cast_fp16")];
|
166 |
+
tensor<int32, [1]> var_255 = const()[name = tensor<string, []>("op_255"), val = tensor<int32, [1]>([1])];
|
167 |
+
tensor<fp16, [1, 1, 1, 64]> norm_x_5_cast_fp16 = reduce_mean(axes = var_255, keep_dims = var_247, x = var_254_cast_fp16)[name = tensor<string, []>("norm_x_5_cast_fp16")];
|
168 |
+
tensor<fp16, []> var_257_to_fp16 = const()[name = tensor<string, []>("op_257_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
|
169 |
+
tensor<fp16, [1, 1, 1, 64]> var_258_cast_fp16 = add(x = norm_x_5_cast_fp16, y = var_257_to_fp16)[name = tensor<string, []>("op_258_cast_fp16")];
|
170 |
+
tensor<fp16, []> var_259_epsilon_0_to_fp16 = const()[name = tensor<string, []>("op_259_epsilon_0_to_fp16"), val = tensor<fp16, []>(0x1p-24)];
|
171 |
+
tensor<fp16, [1, 1, 1, 64]> var_259_cast_fp16 = rsqrt(epsilon = var_259_epsilon_0_to_fp16, x = var_258_cast_fp16)[name = tensor<string, []>("op_259_cast_fp16")];
|
172 |
+
tensor<fp16, [1, 4096, 1, 64]> x_normed_9_cast_fp16 = mul(x = x_15_cast_fp16, y = var_259_cast_fp16)[name = tensor<string, []>("x_normed_9_cast_fp16")];
|
173 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_1_norm_1_weight_to_fp16 = const()[name = tensor<string, []>("blocks_1_norm_1_weight_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303668800)))];
|
174 |
+
tensor<fp16, [1, 4096, 1, 64]> x_19_cast_fp16 = mul(x = x_normed_9_cast_fp16, y = blocks_1_norm_1_weight_to_fp16)[name = tensor<string, []>("x_19_cast_fp16")];
|
175 |
+
tensor<int32, [2]> var_274 = const()[name = tensor<string, []>("op_274"), val = tensor<int32, [2]>([1, 1])];
|
176 |
+
tensor<int32, [2]> var_276 = const()[name = tensor<string, []>("op_276"), val = tensor<int32, [2]>([1, 1])];
|
177 |
+
tensor<string, []> var_278_pad_type_0 = const()[name = tensor<string, []>("op_278_pad_type_0"), val = tensor<string, []>("custom")];
|
178 |
+
tensor<int32, [4]> var_278_pad_0 = const()[name = tensor<string, []>("op_278_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
179 |
+
tensor<fp16, [1, 4096, 1, 64]> var_278_cast_fp16 = conv(dilations = var_276, groups = var_246, pad = var_278_pad_0, pad_type = var_278_pad_type_0, strides = var_274, weight = blocks_1_attn_q_proj_weight_palettized_cast_fp16, x = x_19_cast_fp16)[name = tensor<string, []>("op_278_cast_fp16")];
|
180 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_1_attn_q_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_1_attn_q_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303677056)))];
|
181 |
+
tensor<fp16, [1, 4096, 1, 64]> q_7_cast_fp16 = mul(x = var_278_cast_fp16, y = blocks_1_attn_q_proj_output_scales_to_fp16)[name = tensor<string, []>("q_7_cast_fp16")];
|
182 |
+
tensor<int32, [2]> var_282 = const()[name = tensor<string, []>("op_282"), val = tensor<int32, [2]>([1, 1])];
|
183 |
+
tensor<int32, [2]> var_284 = const()[name = tensor<string, []>("op_284"), val = tensor<int32, [2]>([1, 1])];
|
184 |
+
tensor<string, []> var_286_pad_type_0 = const()[name = tensor<string, []>("op_286_pad_type_0"), val = tensor<string, []>("custom")];
|
185 |
+
tensor<int32, [4]> var_286_pad_0 = const()[name = tensor<string, []>("op_286_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
186 |
+
tensor<fp16, [1, 4096, 1, 64]> var_286_cast_fp16 = conv(dilations = var_284, groups = var_246, pad = var_286_pad_0, pad_type = var_286_pad_type_0, strides = var_282, weight = blocks_1_attn_k_proj_weight_palettized_cast_fp16, x = x_19_cast_fp16)[name = tensor<string, []>("op_286_cast_fp16")];
|
187 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_1_attn_k_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_1_attn_k_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303685312)))];
|
188 |
+
tensor<fp16, [1, 4096, 1, 64]> k_9_cast_fp16 = mul(x = var_286_cast_fp16, y = blocks_1_attn_k_proj_output_scales_to_fp16)[name = tensor<string, []>("k_9_cast_fp16")];
|
189 |
+
tensor<int32, [2]> var_290 = const()[name = tensor<string, []>("op_290"), val = tensor<int32, [2]>([1, 1])];
|
190 |
+
tensor<int32, [2]> var_292 = const()[name = tensor<string, []>("op_292"), val = tensor<int32, [2]>([1, 1])];
|
191 |
+
tensor<string, []> var_294_pad_type_0 = const()[name = tensor<string, []>("op_294_pad_type_0"), val = tensor<string, []>("custom")];
|
192 |
+
tensor<int32, [4]> var_294_pad_0 = const()[name = tensor<string, []>("op_294_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
193 |
+
tensor<fp16, [1, 4096, 1, 64]> var_294_cast_fp16 = conv(dilations = var_292, groups = var_246, pad = var_294_pad_0, pad_type = var_294_pad_type_0, strides = var_290, weight = blocks_1_attn_v_proj_weight_palettized_cast_fp16, x = x_19_cast_fp16)[name = tensor<string, []>("op_294_cast_fp16")];
|
194 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_1_attn_v_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_1_attn_v_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303693568)))];
|
195 |
+
tensor<fp16, [1, 4096, 1, 64]> v_7_cast_fp16 = mul(x = var_294_cast_fp16, y = blocks_1_attn_v_proj_output_scales_to_fp16)[name = tensor<string, []>("v_7_cast_fp16")];
|
196 |
+
tensor<int32, [4]> var_296 = const()[name = tensor<string, []>("op_296"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
197 |
+
tensor<fp16, [1, 32, 128, 64]> q_9_cast_fp16 = reshape(shape = var_296, x = q_7_cast_fp16)[name = tensor<string, []>("q_9_cast_fp16")];
|
198 |
+
tensor<int32, [4]> var_298 = const()[name = tensor<string, []>("op_298"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
199 |
+
tensor<fp16, [1, 32, 128, 64]> k_11_cast_fp16 = reshape(shape = var_298, x = k_9_cast_fp16)[name = tensor<string, []>("k_11_cast_fp16")];
|
200 |
+
tensor<int32, [4]> var_300 = const()[name = tensor<string, []>("op_300"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
201 |
+
tensor<fp16, [1, 32, 128, 64]> new_v_cache_1 = reshape(shape = var_300, x = v_7_cast_fp16)[name = tensor<string, []>("v_9_cast_fp16")];
|
202 |
+
tensor<int32, [4]> var_312_begin_0 = const()[name = tensor<string, []>("op_312_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
203 |
+
tensor<int32, [4]> var_312_end_0 = const()[name = tensor<string, []>("op_312_end_0"), val = tensor<int32, [4]>([1, 32, 64, 64])];
|
204 |
+
tensor<bool, [4]> var_312_end_mask_0 = const()[name = tensor<string, []>("op_312_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
|
205 |
+
tensor<fp16, [1, 32, 64, 64]> var_312_cast_fp16 = slice_by_index(begin = var_312_begin_0, end = var_312_end_0, end_mask = var_312_end_mask_0, x = q_9_cast_fp16)[name = tensor<string, []>("op_312_cast_fp16")];
|
206 |
+
tensor<int32, [4]> var_318_begin_0 = const()[name = tensor<string, []>("op_318_begin_0"), val = tensor<int32, [4]>([0, 0, 64, 0])];
|
207 |
+
tensor<int32, [4]> var_318_end_0 = const()[name = tensor<string, []>("op_318_end_0"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
208 |
+
tensor<bool, [4]> var_318_end_mask_0 = const()[name = tensor<string, []>("op_318_end_mask_0"), val = tensor<bool, [4]>([true, true, true, true])];
|
209 |
+
tensor<fp16, [1, 32, 64, 64]> var_318_cast_fp16 = slice_by_index(begin = var_318_begin_0, end = var_318_end_0, end_mask = var_318_end_mask_0, x = q_9_cast_fp16)[name = tensor<string, []>("op_318_cast_fp16")];
|
210 |
+
tensor<fp16, []> const_10_promoted_to_fp16 = const()[name = tensor<string, []>("const_10_promoted_to_fp16"), val = tensor<fp16, []>(-0x1p+0)];
|
211 |
+
tensor<fp16, [1, 32, 64, 64]> var_320_cast_fp16 = mul(x = var_318_cast_fp16, y = const_10_promoted_to_fp16)[name = tensor<string, []>("op_320_cast_fp16")];
|
212 |
+
tensor<bool, []> rotated_5_interleave_0 = const()[name = tensor<string, []>("rotated_5_interleave_0"), val = tensor<bool, []>(false)];
|
213 |
+
tensor<fp16, [1, 32, 128, 64]> rotated_5_cast_fp16 = concat(axis = var_237, interleave = rotated_5_interleave_0, values = (var_320_cast_fp16, var_312_cast_fp16))[name = tensor<string, []>("rotated_5_cast_fp16")];
|
214 |
+
tensor<fp16, [1, 32, 128, 64]> var_323_cast_fp16 = mul(x = q_9_cast_fp16, y = cos)[name = tensor<string, []>("op_323_cast_fp16")];
|
215 |
+
tensor<fp16, [1, 32, 128, 64]> var_324_cast_fp16 = mul(x = rotated_5_cast_fp16, y = sin)[name = tensor<string, []>("op_324_cast_fp16")];
|
216 |
+
tensor<fp16, [1, 32, 128, 64]> roped_5_cast_fp16 = add(x = var_323_cast_fp16, y = var_324_cast_fp16)[name = tensor<string, []>("roped_5_cast_fp16")];
|
217 |
+
tensor<int32, [4]> var_337_begin_0 = const()[name = tensor<string, []>("op_337_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
218 |
+
tensor<int32, [4]> var_337_end_0 = const()[name = tensor<string, []>("op_337_end_0"), val = tensor<int32, [4]>([1, 32, 64, 64])];
|
219 |
+
tensor<bool, [4]> var_337_end_mask_0 = const()[name = tensor<string, []>("op_337_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
|
220 |
+
tensor<fp16, [1, 32, 64, 64]> var_337_cast_fp16 = slice_by_index(begin = var_337_begin_0, end = var_337_end_0, end_mask = var_337_end_mask_0, x = k_11_cast_fp16)[name = tensor<string, []>("op_337_cast_fp16")];
|
221 |
+
tensor<int32, [4]> var_343_begin_0 = const()[name = tensor<string, []>("op_343_begin_0"), val = tensor<int32, [4]>([0, 0, 64, 0])];
|
222 |
+
tensor<int32, [4]> var_343_end_0 = const()[name = tensor<string, []>("op_343_end_0"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
223 |
+
tensor<bool, [4]> var_343_end_mask_0 = const()[name = tensor<string, []>("op_343_end_mask_0"), val = tensor<bool, [4]>([true, true, true, true])];
|
224 |
+
tensor<fp16, [1, 32, 64, 64]> var_343_cast_fp16 = slice_by_index(begin = var_343_begin_0, end = var_343_end_0, end_mask = var_343_end_mask_0, x = k_11_cast_fp16)[name = tensor<string, []>("op_343_cast_fp16")];
|
225 |
+
tensor<fp16, []> const_12_promoted_to_fp16 = const()[name = tensor<string, []>("const_12_promoted_to_fp16"), val = tensor<fp16, []>(-0x1p+0)];
|
226 |
+
tensor<fp16, [1, 32, 64, 64]> var_345_cast_fp16 = mul(x = var_343_cast_fp16, y = const_12_promoted_to_fp16)[name = tensor<string, []>("op_345_cast_fp16")];
|
227 |
+
tensor<bool, []> rotated_7_interleave_0 = const()[name = tensor<string, []>("rotated_7_interleave_0"), val = tensor<bool, []>(false)];
|
228 |
+
tensor<fp16, [1, 32, 128, 64]> rotated_7_cast_fp16 = concat(axis = var_237, interleave = rotated_7_interleave_0, values = (var_345_cast_fp16, var_337_cast_fp16))[name = tensor<string, []>("rotated_7_cast_fp16")];
|
229 |
+
tensor<fp16, [1, 32, 128, 64]> var_348_cast_fp16 = mul(x = k_11_cast_fp16, y = cos)[name = tensor<string, []>("op_348_cast_fp16")];
|
230 |
+
tensor<fp16, [1, 32, 128, 64]> var_349_cast_fp16 = mul(x = rotated_7_cast_fp16, y = sin)[name = tensor<string, []>("op_349_cast_fp16")];
|
231 |
+
tensor<fp16, [1, 32, 128, 64]> roped_7_cast_fp16 = add(x = var_348_cast_fp16, y = var_349_cast_fp16)[name = tensor<string, []>("roped_7_cast_fp16")];
|
232 |
+
tensor<bool, []> q_11_interleave_0 = const()[name = tensor<string, []>("q_11_interleave_0"), val = tensor<bool, []>(false)];
|
233 |
+
tensor<fp16, [1, 32, 128, 64]> q_11_cast_fp16 = concat(axis = var_237, interleave = q_11_interleave_0, values = roped_5_cast_fp16)[name = tensor<string, []>("q_11_cast_fp16")];
|
234 |
+
tensor<bool, []> k_13_interleave_0 = const()[name = tensor<string, []>("k_13_interleave_0"), val = tensor<bool, []>(false)];
|
235 |
+
tensor<fp16, [1, 32, 128, 64]> new_k_cache_1 = concat(axis = var_237, interleave = k_13_interleave_0, values = roped_7_cast_fp16)[name = tensor<string, []>("k_13_cast_fp16")];
|
236 |
+
tensor<bool, []> k_15_interleave_0 = const()[name = tensor<string, []>("k_15_interleave_0"), val = tensor<bool, []>(false)];
|
237 |
+
tensor<fp16, [1, 32, 128, 512]> k_15_cast_fp16 = concat(axis = var_239, interleave = k_15_interleave_0, values = (k_cache_1, new_k_cache_1))[name = tensor<string, []>("k_15_cast_fp16")];
|
238 |
+
tensor<bool, []> v_11_interleave_0 = const()[name = tensor<string, []>("v_11_interleave_0"), val = tensor<bool, []>(false)];
|
239 |
+
tensor<fp16, [1, 32, 128, 512]> v_11_cast_fp16 = concat(axis = var_239, interleave = v_11_interleave_0, values = (v_cache_1, new_v_cache_1))[name = tensor<string, []>("v_11_cast_fp16")];
|
240 |
+
tensor<fp16, []> var_371_to_fp16 = const()[name = tensor<string, []>("op_371_to_fp16"), val = tensor<fp16, []>(0x1.6ap-4)];
|
241 |
+
tensor<fp16, [1, 32, 128, 64]> var_372_cast_fp16 = mul(x = q_11_cast_fp16, y = var_371_to_fp16)[name = tensor<string, []>("op_372_cast_fp16")];
|
242 |
+
tensor<bool, []> attn_weights_5_transpose_x_0 = const()[name = tensor<string, []>("attn_weights_5_transpose_x_0"), val = tensor<bool, []>(true)];
|
243 |
+
tensor<bool, []> attn_weights_5_transpose_y_0 = const()[name = tensor<string, []>("attn_weights_5_transpose_y_0"), val = tensor<bool, []>(false)];
|
244 |
+
tensor<fp16, [1, 32, 64, 512]> attn_weights_5_cast_fp16 = matmul(transpose_x = attn_weights_5_transpose_x_0, transpose_y = attn_weights_5_transpose_y_0, x = var_372_cast_fp16, y = k_15_cast_fp16)[name = tensor<string, []>("attn_weights_5_cast_fp16")];
|
245 |
+
tensor<fp16, [1, 32, 64, 512]> attn_weights_7_cast_fp16 = add(x = attn_weights_5_cast_fp16, y = mask)[name = tensor<string, []>("attn_weights_7_cast_fp16")];
|
246 |
+
tensor<fp16, [1, 32, 64, 512]> var_380_cast_fp16 = softmax(axis = var_232, x = attn_weights_7_cast_fp16)[name = tensor<string, []>("op_380_cast_fp16")];
|
247 |
+
tensor<bool, []> attn_3_transpose_x_0 = const()[name = tensor<string, []>("attn_3_transpose_x_0"), val = tensor<bool, []>(false)];
|
248 |
+
tensor<bool, []> attn_3_transpose_y_0 = const()[name = tensor<string, []>("attn_3_transpose_y_0"), val = tensor<bool, []>(true)];
|
249 |
+
tensor<fp16, [1, 32, 128, 64]> attn_3_cast_fp16 = matmul(transpose_x = attn_3_transpose_x_0, transpose_y = attn_3_transpose_y_0, x = v_11_cast_fp16, y = var_380_cast_fp16)[name = tensor<string, []>("attn_3_cast_fp16")];
|
250 |
+
tensor<int32, [4]> var_384 = const()[name = tensor<string, []>("op_384"), val = tensor<int32, [4]>([1, 4096, 1, -1])];
|
251 |
+
tensor<fp16, [1, 4096, 1, 64]> input_9_cast_fp16 = reshape(shape = var_384, x = attn_3_cast_fp16)[name = tensor<string, []>("input_9_cast_fp16")];
|
252 |
+
tensor<int32, [2]> var_388 = const()[name = tensor<string, []>("op_388"), val = tensor<int32, [2]>([1, 1])];
|
253 |
+
tensor<int32, [2]> var_390 = const()[name = tensor<string, []>("op_390"), val = tensor<int32, [2]>([1, 1])];
|
254 |
+
tensor<string, []> var_392_pad_type_0 = const()[name = tensor<string, []>("op_392_pad_type_0"), val = tensor<string, []>("custom")];
|
255 |
+
tensor<int32, [4]> var_392_pad_0 = const()[name = tensor<string, []>("op_392_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
256 |
+
tensor<fp16, [1, 4096, 1, 64]> var_392_cast_fp16 = conv(dilations = var_390, groups = var_246, pad = var_392_pad_0, pad_type = var_392_pad_type_0, strides = var_388, weight = blocks_1_attn_proj_weight_palettized_cast_fp16, x = input_9_cast_fp16)[name = tensor<string, []>("op_392_cast_fp16")];
|
257 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_1_attn_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_1_attn_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303701824)))];
|
258 |
+
tensor<fp16, [1, 4096, 1, 64]> attention_output_3_cast_fp16 = mul(x = var_392_cast_fp16, y = blocks_1_attn_proj_output_scales_to_fp16)[name = tensor<string, []>("attention_output_3_cast_fp16")];
|
259 |
+
tensor<fp16, [1, 4096, 1, 64]> x_25_cast_fp16 = add(x = attention_output_3_cast_fp16, y = x_15_cast_fp16)[name = tensor<string, []>("x_25_cast_fp16")];
|
260 |
+
tensor<fp16, [1, 4096, 1, 64]> var_401_cast_fp16 = mul(x = x_25_cast_fp16, y = x_25_cast_fp16)[name = tensor<string, []>("op_401_cast_fp16")];
|
261 |
+
tensor<int32, [1]> var_402 = const()[name = tensor<string, []>("op_402"), val = tensor<int32, [1]>([1])];
|
262 |
+
tensor<fp16, [1, 1, 1, 64]> norm_x_7_cast_fp16 = reduce_mean(axes = var_402, keep_dims = var_247, x = var_401_cast_fp16)[name = tensor<string, []>("norm_x_7_cast_fp16")];
|
263 |
+
tensor<fp16, []> var_404_to_fp16 = const()[name = tensor<string, []>("op_404_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
|
264 |
+
tensor<fp16, [1, 1, 1, 64]> var_405_cast_fp16 = add(x = norm_x_7_cast_fp16, y = var_404_to_fp16)[name = tensor<string, []>("op_405_cast_fp16")];
|
265 |
+
tensor<fp16, []> var_406_epsilon_0_to_fp16 = const()[name = tensor<string, []>("op_406_epsilon_0_to_fp16"), val = tensor<fp16, []>(0x1p-24)];
|
266 |
+
tensor<fp16, [1, 1, 1, 64]> var_406_cast_fp16 = rsqrt(epsilon = var_406_epsilon_0_to_fp16, x = var_405_cast_fp16)[name = tensor<string, []>("op_406_cast_fp16")];
|
267 |
+
tensor<fp16, [1, 4096, 1, 64]> x_normed_13_cast_fp16 = mul(x = x_25_cast_fp16, y = var_406_cast_fp16)[name = tensor<string, []>("x_normed_13_cast_fp16")];
|
268 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_1_norm_2_weight_to_fp16 = const()[name = tensor<string, []>("blocks_1_norm_2_weight_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303710080)))];
|
269 |
+
tensor<fp16, [1, 4096, 1, 64]> input_11_cast_fp16 = mul(x = x_normed_13_cast_fp16, y = blocks_1_norm_2_weight_to_fp16)[name = tensor<string, []>("input_11_cast_fp16")];
|
270 |
+
tensor<int32, [2]> var_418 = const()[name = tensor<string, []>("op_418"), val = tensor<int32, [2]>([1, 1])];
|
271 |
+
tensor<int32, [2]> var_420 = const()[name = tensor<string, []>("op_420"), val = tensor<int32, [2]>([1, 1])];
|
272 |
+
tensor<string, []> var_422_pad_type_0 = const()[name = tensor<string, []>("op_422_pad_type_0"), val = tensor<string, []>("custom")];
|
273 |
+
tensor<int32, [4]> var_422_pad_0 = const()[name = tensor<string, []>("op_422_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
274 |
+
tensor<fp16, [1, 11008, 1, 64]> var_422_cast_fp16 = conv(dilations = var_420, groups = var_246, pad = var_422_pad_0, pad_type = var_422_pad_type_0, strides = var_418, weight = blocks_1_mlp_fc_1_weight_palettized_cast_fp16, x = input_11_cast_fp16)[name = tensor<string, []>("op_422_cast_fp16")];
|
275 |
+
tensor<fp16, [1, 11008, 1, 1]> blocks_1_mlp_fc_1_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_1_mlp_fc_1_output_scales_to_fp16"), val = tensor<fp16, [1, 11008, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303718336)))];
|
276 |
+
tensor<fp16, [1, 11008, 1, 64]> input_13_cast_fp16 = mul(x = var_422_cast_fp16, y = blocks_1_mlp_fc_1_output_scales_to_fp16)[name = tensor<string, []>("input_13_cast_fp16")];
|
277 |
+
tensor<int32, [2]> var_426 = const()[name = tensor<string, []>("op_426"), val = tensor<int32, [2]>([1, 1])];
|
278 |
+
tensor<int32, [2]> var_428 = const()[name = tensor<string, []>("op_428"), val = tensor<int32, [2]>([1, 1])];
|
279 |
+
tensor<string, []> var_430_pad_type_0 = const()[name = tensor<string, []>("op_430_pad_type_0"), val = tensor<string, []>("custom")];
|
280 |
+
tensor<int32, [4]> var_430_pad_0 = const()[name = tensor<string, []>("op_430_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
281 |
+
tensor<fp16, [1, 11008, 1, 64]> var_430_cast_fp16 = conv(dilations = var_428, groups = var_246, pad = var_430_pad_0, pad_type = var_430_pad_type_0, strides = var_426, weight = blocks_1_mlp_fc_2_weight_palettized_cast_fp16, x = input_11_cast_fp16)[name = tensor<string, []>("op_430_cast_fp16")];
|
282 |
+
tensor<fp16, [1, 11008, 1, 1]> blocks_1_mlp_fc_2_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_1_mlp_fc_2_output_scales_to_fp16"), val = tensor<fp16, [1, 11008, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303740416)))];
|
283 |
+
tensor<fp16, [1, 11008, 1, 64]> x_fc_2_3_cast_fp16 = mul(x = var_430_cast_fp16, y = blocks_1_mlp_fc_2_output_scales_to_fp16)[name = tensor<string, []>("x_fc_2_3_cast_fp16")];
|
284 |
+
tensor<fp16, [1, 11008, 1, 64]> var_432_cast_fp16 = silu(x = input_13_cast_fp16)[name = tensor<string, []>("op_432_cast_fp16")];
|
285 |
+
tensor<fp16, [1, 11008, 1, 64]> input_15_cast_fp16 = mul(x = var_432_cast_fp16, y = x_fc_2_3_cast_fp16)[name = tensor<string, []>("input_15_cast_fp16")];
|
286 |
+
tensor<int32, [2]> var_436 = const()[name = tensor<string, []>("op_436"), val = tensor<int32, [2]>([1, 1])];
|
287 |
+
tensor<int32, [2]> var_438 = const()[name = tensor<string, []>("op_438"), val = tensor<int32, [2]>([1, 1])];
|
288 |
+
tensor<string, []> var_440_pad_type_0 = const()[name = tensor<string, []>("op_440_pad_type_0"), val = tensor<string, []>("custom")];
|
289 |
+
tensor<int32, [4]> var_440_pad_0 = const()[name = tensor<string, []>("op_440_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
290 |
+
tensor<fp16, [1, 4096, 1, 64]> var_440_cast_fp16 = conv(dilations = var_438, groups = var_246, pad = var_440_pad_0, pad_type = var_440_pad_type_0, strides = var_436, weight = blocks_1_mlp_proj_weight_palettized_cast_fp16, x = input_15_cast_fp16)[name = tensor<string, []>("op_440_cast_fp16")];
|
291 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_1_mlp_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_1_mlp_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303762496)))];
|
292 |
+
tensor<fp16, [1, 4096, 1, 64]> var_441_cast_fp16 = mul(x = var_440_cast_fp16, y = blocks_1_mlp_proj_output_scales_to_fp16)[name = tensor<string, []>("op_441_cast_fp16")];
|
293 |
+
tensor<fp16, [1, 4096, 1, 64]> x_29_cast_fp16 = add(x = var_441_cast_fp16, y = x_25_cast_fp16)[name = tensor<string, []>("x_29_cast_fp16")];
|
294 |
+
tensor<int32, []> var_448 = const()[name = tensor<string, []>("op_448"), val = tensor<int32, []>(3)];
|
295 |
+
tensor<int32, []> var_453 = const()[name = tensor<string, []>("op_453"), val = tensor<int32, []>(-2)];
|
296 |
+
tensor<int32, []> var_455 = const()[name = tensor<string, []>("op_455"), val = tensor<int32, []>(-1)];
|
297 |
+
tensor<int32, []> var_462 = const()[name = tensor<string, []>("op_462"), val = tensor<int32, []>(1)];
|
298 |
+
tensor<bool, []> var_463 = const()[name = tensor<string, []>("op_463"), val = tensor<bool, []>(true)];
|
299 |
+
tensor<fp16, [1, 4096, 1, 64]> var_470_cast_fp16 = mul(x = x_29_cast_fp16, y = x_29_cast_fp16)[name = tensor<string, []>("op_470_cast_fp16")];
|
300 |
+
tensor<int32, [1]> var_471 = const()[name = tensor<string, []>("op_471"), val = tensor<int32, [1]>([1])];
|
301 |
+
tensor<fp16, [1, 1, 1, 64]> norm_x_9_cast_fp16 = reduce_mean(axes = var_471, keep_dims = var_463, x = var_470_cast_fp16)[name = tensor<string, []>("norm_x_9_cast_fp16")];
|
302 |
+
tensor<fp16, []> var_473_to_fp16 = const()[name = tensor<string, []>("op_473_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
|
303 |
+
tensor<fp16, [1, 1, 1, 64]> var_474_cast_fp16 = add(x = norm_x_9_cast_fp16, y = var_473_to_fp16)[name = tensor<string, []>("op_474_cast_fp16")];
|
304 |
+
tensor<fp16, []> var_475_epsilon_0_to_fp16 = const()[name = tensor<string, []>("op_475_epsilon_0_to_fp16"), val = tensor<fp16, []>(0x1p-24)];
|
305 |
+
tensor<fp16, [1, 1, 1, 64]> var_475_cast_fp16 = rsqrt(epsilon = var_475_epsilon_0_to_fp16, x = var_474_cast_fp16)[name = tensor<string, []>("op_475_cast_fp16")];
|
306 |
+
tensor<fp16, [1, 4096, 1, 64]> x_normed_17_cast_fp16 = mul(x = x_29_cast_fp16, y = var_475_cast_fp16)[name = tensor<string, []>("x_normed_17_cast_fp16")];
|
307 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_2_norm_1_weight_to_fp16 = const()[name = tensor<string, []>("blocks_2_norm_1_weight_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303770752)))];
|
308 |
+
tensor<fp16, [1, 4096, 1, 64]> x_33_cast_fp16 = mul(x = x_normed_17_cast_fp16, y = blocks_2_norm_1_weight_to_fp16)[name = tensor<string, []>("x_33_cast_fp16")];
|
309 |
+
tensor<int32, [2]> var_490 = const()[name = tensor<string, []>("op_490"), val = tensor<int32, [2]>([1, 1])];
|
310 |
+
tensor<int32, [2]> var_492 = const()[name = tensor<string, []>("op_492"), val = tensor<int32, [2]>([1, 1])];
|
311 |
+
tensor<string, []> var_494_pad_type_0 = const()[name = tensor<string, []>("op_494_pad_type_0"), val = tensor<string, []>("custom")];
|
312 |
+
tensor<int32, [4]> var_494_pad_0 = const()[name = tensor<string, []>("op_494_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
313 |
+
tensor<fp16, [1, 4096, 1, 64]> var_494_cast_fp16 = conv(dilations = var_492, groups = var_462, pad = var_494_pad_0, pad_type = var_494_pad_type_0, strides = var_490, weight = blocks_2_attn_q_proj_weight_palettized_cast_fp16, x = x_33_cast_fp16)[name = tensor<string, []>("op_494_cast_fp16")];
|
314 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_2_attn_q_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_2_attn_q_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303779008)))];
|
315 |
+
tensor<fp16, [1, 4096, 1, 64]> q_13_cast_fp16 = mul(x = var_494_cast_fp16, y = blocks_2_attn_q_proj_output_scales_to_fp16)[name = tensor<string, []>("q_13_cast_fp16")];
|
316 |
+
tensor<int32, [2]> var_498 = const()[name = tensor<string, []>("op_498"), val = tensor<int32, [2]>([1, 1])];
|
317 |
+
tensor<int32, [2]> var_500 = const()[name = tensor<string, []>("op_500"), val = tensor<int32, [2]>([1, 1])];
|
318 |
+
tensor<string, []> var_502_pad_type_0 = const()[name = tensor<string, []>("op_502_pad_type_0"), val = tensor<string, []>("custom")];
|
319 |
+
tensor<int32, [4]> var_502_pad_0 = const()[name = tensor<string, []>("op_502_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
320 |
+
tensor<fp16, [1, 4096, 1, 64]> var_502_cast_fp16 = conv(dilations = var_500, groups = var_462, pad = var_502_pad_0, pad_type = var_502_pad_type_0, strides = var_498, weight = blocks_2_attn_k_proj_weight_palettized_cast_fp16, x = x_33_cast_fp16)[name = tensor<string, []>("op_502_cast_fp16")];
|
321 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_2_attn_k_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_2_attn_k_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303787264)))];
|
322 |
+
tensor<fp16, [1, 4096, 1, 64]> k_17_cast_fp16 = mul(x = var_502_cast_fp16, y = blocks_2_attn_k_proj_output_scales_to_fp16)[name = tensor<string, []>("k_17_cast_fp16")];
|
323 |
+
tensor<int32, [2]> var_506 = const()[name = tensor<string, []>("op_506"), val = tensor<int32, [2]>([1, 1])];
|
324 |
+
tensor<int32, [2]> var_508 = const()[name = tensor<string, []>("op_508"), val = tensor<int32, [2]>([1, 1])];
|
325 |
+
tensor<string, []> var_510_pad_type_0 = const()[name = tensor<string, []>("op_510_pad_type_0"), val = tensor<string, []>("custom")];
|
326 |
+
tensor<int32, [4]> var_510_pad_0 = const()[name = tensor<string, []>("op_510_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
327 |
+
tensor<fp16, [1, 4096, 1, 64]> var_510_cast_fp16 = conv(dilations = var_508, groups = var_462, pad = var_510_pad_0, pad_type = var_510_pad_type_0, strides = var_506, weight = blocks_2_attn_v_proj_weight_palettized_cast_fp16, x = x_33_cast_fp16)[name = tensor<string, []>("op_510_cast_fp16")];
|
328 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_2_attn_v_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_2_attn_v_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303795520)))];
|
329 |
+
tensor<fp16, [1, 4096, 1, 64]> v_13_cast_fp16 = mul(x = var_510_cast_fp16, y = blocks_2_attn_v_proj_output_scales_to_fp16)[name = tensor<string, []>("v_13_cast_fp16")];
|
330 |
+
tensor<int32, [4]> var_512 = const()[name = tensor<string, []>("op_512"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
331 |
+
tensor<fp16, [1, 32, 128, 64]> q_15_cast_fp16 = reshape(shape = var_512, x = q_13_cast_fp16)[name = tensor<string, []>("q_15_cast_fp16")];
|
332 |
+
tensor<int32, [4]> var_514 = const()[name = tensor<string, []>("op_514"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
333 |
+
tensor<fp16, [1, 32, 128, 64]> k_19_cast_fp16 = reshape(shape = var_514, x = k_17_cast_fp16)[name = tensor<string, []>("k_19_cast_fp16")];
|
334 |
+
tensor<int32, [4]> var_516 = const()[name = tensor<string, []>("op_516"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
335 |
+
tensor<fp16, [1, 32, 128, 64]> new_v_cache_2 = reshape(shape = var_516, x = v_13_cast_fp16)[name = tensor<string, []>("v_15_cast_fp16")];
|
336 |
+
tensor<int32, [4]> var_528_begin_0 = const()[name = tensor<string, []>("op_528_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
337 |
+
tensor<int32, [4]> var_528_end_0 = const()[name = tensor<string, []>("op_528_end_0"), val = tensor<int32, [4]>([1, 32, 64, 64])];
|
338 |
+
tensor<bool, [4]> var_528_end_mask_0 = const()[name = tensor<string, []>("op_528_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
|
339 |
+
tensor<fp16, [1, 32, 64, 64]> var_528_cast_fp16 = slice_by_index(begin = var_528_begin_0, end = var_528_end_0, end_mask = var_528_end_mask_0, x = q_15_cast_fp16)[name = tensor<string, []>("op_528_cast_fp16")];
|
340 |
+
tensor<int32, [4]> var_534_begin_0 = const()[name = tensor<string, []>("op_534_begin_0"), val = tensor<int32, [4]>([0, 0, 64, 0])];
|
341 |
+
tensor<int32, [4]> var_534_end_0 = const()[name = tensor<string, []>("op_534_end_0"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
342 |
+
tensor<bool, [4]> var_534_end_mask_0 = const()[name = tensor<string, []>("op_534_end_mask_0"), val = tensor<bool, [4]>([true, true, true, true])];
|
343 |
+
tensor<fp16, [1, 32, 64, 64]> var_534_cast_fp16 = slice_by_index(begin = var_534_begin_0, end = var_534_end_0, end_mask = var_534_end_mask_0, x = q_15_cast_fp16)[name = tensor<string, []>("op_534_cast_fp16")];
|
344 |
+
tensor<fp16, []> const_17_promoted_to_fp16 = const()[name = tensor<string, []>("const_17_promoted_to_fp16"), val = tensor<fp16, []>(-0x1p+0)];
|
345 |
+
tensor<fp16, [1, 32, 64, 64]> var_536_cast_fp16 = mul(x = var_534_cast_fp16, y = const_17_promoted_to_fp16)[name = tensor<string, []>("op_536_cast_fp16")];
|
346 |
+
tensor<bool, []> rotated_9_interleave_0 = const()[name = tensor<string, []>("rotated_9_interleave_0"), val = tensor<bool, []>(false)];
|
347 |
+
tensor<fp16, [1, 32, 128, 64]> rotated_9_cast_fp16 = concat(axis = var_453, interleave = rotated_9_interleave_0, values = (var_536_cast_fp16, var_528_cast_fp16))[name = tensor<string, []>("rotated_9_cast_fp16")];
|
348 |
+
tensor<fp16, [1, 32, 128, 64]> var_539_cast_fp16 = mul(x = q_15_cast_fp16, y = cos)[name = tensor<string, []>("op_539_cast_fp16")];
|
349 |
+
tensor<fp16, [1, 32, 128, 64]> var_540_cast_fp16 = mul(x = rotated_9_cast_fp16, y = sin)[name = tensor<string, []>("op_540_cast_fp16")];
|
350 |
+
tensor<fp16, [1, 32, 128, 64]> roped_9_cast_fp16 = add(x = var_539_cast_fp16, y = var_540_cast_fp16)[name = tensor<string, []>("roped_9_cast_fp16")];
|
351 |
+
tensor<int32, [4]> var_553_begin_0 = const()[name = tensor<string, []>("op_553_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
352 |
+
tensor<int32, [4]> var_553_end_0 = const()[name = tensor<string, []>("op_553_end_0"), val = tensor<int32, [4]>([1, 32, 64, 64])];
|
353 |
+
tensor<bool, [4]> var_553_end_mask_0 = const()[name = tensor<string, []>("op_553_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
|
354 |
+
tensor<fp16, [1, 32, 64, 64]> var_553_cast_fp16 = slice_by_index(begin = var_553_begin_0, end = var_553_end_0, end_mask = var_553_end_mask_0, x = k_19_cast_fp16)[name = tensor<string, []>("op_553_cast_fp16")];
|
355 |
+
tensor<int32, [4]> var_559_begin_0 = const()[name = tensor<string, []>("op_559_begin_0"), val = tensor<int32, [4]>([0, 0, 64, 0])];
|
356 |
+
tensor<int32, [4]> var_559_end_0 = const()[name = tensor<string, []>("op_559_end_0"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
357 |
+
tensor<bool, [4]> var_559_end_mask_0 = const()[name = tensor<string, []>("op_559_end_mask_0"), val = tensor<bool, [4]>([true, true, true, true])];
|
358 |
+
tensor<fp16, [1, 32, 64, 64]> var_559_cast_fp16 = slice_by_index(begin = var_559_begin_0, end = var_559_end_0, end_mask = var_559_end_mask_0, x = k_19_cast_fp16)[name = tensor<string, []>("op_559_cast_fp16")];
|
359 |
+
tensor<fp16, []> const_19_promoted_to_fp16 = const()[name = tensor<string, []>("const_19_promoted_to_fp16"), val = tensor<fp16, []>(-0x1p+0)];
|
360 |
+
tensor<fp16, [1, 32, 64, 64]> var_561_cast_fp16 = mul(x = var_559_cast_fp16, y = const_19_promoted_to_fp16)[name = tensor<string, []>("op_561_cast_fp16")];
|
361 |
+
tensor<bool, []> rotated_interleave_0 = const()[name = tensor<string, []>("rotated_interleave_0"), val = tensor<bool, []>(false)];
|
362 |
+
tensor<fp16, [1, 32, 128, 64]> rotated_cast_fp16 = concat(axis = var_453, interleave = rotated_interleave_0, values = (var_561_cast_fp16, var_553_cast_fp16))[name = tensor<string, []>("rotated_cast_fp16")];
|
363 |
+
tensor<fp16, [1, 32, 128, 64]> var_564_cast_fp16 = mul(x = k_19_cast_fp16, y = cos)[name = tensor<string, []>("op_564_cast_fp16")];
|
364 |
+
tensor<fp16, [1, 32, 128, 64]> var_565_cast_fp16 = mul(x = rotated_cast_fp16, y = sin)[name = tensor<string, []>("op_565_cast_fp16")];
|
365 |
+
tensor<fp16, [1, 32, 128, 64]> roped_cast_fp16 = add(x = var_564_cast_fp16, y = var_565_cast_fp16)[name = tensor<string, []>("roped_cast_fp16")];
|
366 |
+
tensor<bool, []> q_interleave_0 = const()[name = tensor<string, []>("q_interleave_0"), val = tensor<bool, []>(false)];
|
367 |
+
tensor<fp16, [1, 32, 128, 64]> q_cast_fp16 = concat(axis = var_453, interleave = q_interleave_0, values = roped_9_cast_fp16)[name = tensor<string, []>("q_cast_fp16")];
|
368 |
+
tensor<bool, []> k_21_interleave_0 = const()[name = tensor<string, []>("k_21_interleave_0"), val = tensor<bool, []>(false)];
|
369 |
+
tensor<fp16, [1, 32, 128, 64]> new_k_cache_2 = concat(axis = var_453, interleave = k_21_interleave_0, values = roped_cast_fp16)[name = tensor<string, []>("k_21_cast_fp16")];
|
370 |
+
tensor<bool, []> k_interleave_0 = const()[name = tensor<string, []>("k_interleave_0"), val = tensor<bool, []>(false)];
|
371 |
+
tensor<fp16, [1, 32, 128, 512]> k_cast_fp16 = concat(axis = var_455, interleave = k_interleave_0, values = (k_cache_2, new_k_cache_2))[name = tensor<string, []>("k_cast_fp16")];
|
372 |
+
tensor<bool, []> v_interleave_0 = const()[name = tensor<string, []>("v_interleave_0"), val = tensor<bool, []>(false)];
|
373 |
+
tensor<fp16, [1, 32, 128, 512]> v_cast_fp16 = concat(axis = var_455, interleave = v_interleave_0, values = (v_cache_2, new_v_cache_2))[name = tensor<string, []>("v_cast_fp16")];
|
374 |
+
tensor<fp16, []> var_587_to_fp16 = const()[name = tensor<string, []>("op_587_to_fp16"), val = tensor<fp16, []>(0x1.6ap-4)];
|
375 |
+
tensor<fp16, [1, 32, 128, 64]> var_588_cast_fp16 = mul(x = q_cast_fp16, y = var_587_to_fp16)[name = tensor<string, []>("op_588_cast_fp16")];
|
376 |
+
tensor<bool, []> attn_weights_9_transpose_x_0 = const()[name = tensor<string, []>("attn_weights_9_transpose_x_0"), val = tensor<bool, []>(true)];
|
377 |
+
tensor<bool, []> attn_weights_9_transpose_y_0 = const()[name = tensor<string, []>("attn_weights_9_transpose_y_0"), val = tensor<bool, []>(false)];
|
378 |
+
tensor<fp16, [1, 32, 64, 512]> attn_weights_9_cast_fp16 = matmul(transpose_x = attn_weights_9_transpose_x_0, transpose_y = attn_weights_9_transpose_y_0, x = var_588_cast_fp16, y = k_cast_fp16)[name = tensor<string, []>("attn_weights_9_cast_fp16")];
|
379 |
+
tensor<fp16, [1, 32, 64, 512]> attn_weights_cast_fp16 = add(x = attn_weights_9_cast_fp16, y = mask)[name = tensor<string, []>("attn_weights_cast_fp16")];
|
380 |
+
tensor<fp16, [1, 32, 64, 512]> var_596_cast_fp16 = softmax(axis = var_448, x = attn_weights_cast_fp16)[name = tensor<string, []>("op_596_cast_fp16")];
|
381 |
+
tensor<bool, []> attn_5_transpose_x_0 = const()[name = tensor<string, []>("attn_5_transpose_x_0"), val = tensor<bool, []>(false)];
|
382 |
+
tensor<bool, []> attn_5_transpose_y_0 = const()[name = tensor<string, []>("attn_5_transpose_y_0"), val = tensor<bool, []>(true)];
|
383 |
+
tensor<fp16, [1, 32, 128, 64]> attn_5_cast_fp16 = matmul(transpose_x = attn_5_transpose_x_0, transpose_y = attn_5_transpose_y_0, x = v_cast_fp16, y = var_596_cast_fp16)[name = tensor<string, []>("attn_5_cast_fp16")];
|
384 |
+
tensor<int32, [4]> var_600 = const()[name = tensor<string, []>("op_600"), val = tensor<int32, [4]>([1, 4096, 1, -1])];
|
385 |
+
tensor<fp16, [1, 4096, 1, 64]> input_17_cast_fp16 = reshape(shape = var_600, x = attn_5_cast_fp16)[name = tensor<string, []>("input_17_cast_fp16")];
|
386 |
+
tensor<int32, [2]> var_604 = const()[name = tensor<string, []>("op_604"), val = tensor<int32, [2]>([1, 1])];
|
387 |
+
tensor<int32, [2]> var_606 = const()[name = tensor<string, []>("op_606"), val = tensor<int32, [2]>([1, 1])];
|
388 |
+
tensor<string, []> var_608_pad_type_0 = const()[name = tensor<string, []>("op_608_pad_type_0"), val = tensor<string, []>("custom")];
|
389 |
+
tensor<int32, [4]> var_608_pad_0 = const()[name = tensor<string, []>("op_608_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
390 |
+
tensor<fp16, [1, 4096, 1, 64]> var_608_cast_fp16 = conv(dilations = var_606, groups = var_462, pad = var_608_pad_0, pad_type = var_608_pad_type_0, strides = var_604, weight = blocks_2_attn_proj_weight_palettized_cast_fp16, x = input_17_cast_fp16)[name = tensor<string, []>("op_608_cast_fp16")];
|
391 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_2_attn_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_2_attn_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303803776)))];
|
392 |
+
tensor<fp16, [1, 4096, 1, 64]> attention_output_cast_fp16 = mul(x = var_608_cast_fp16, y = blocks_2_attn_proj_output_scales_to_fp16)[name = tensor<string, []>("attention_output_cast_fp16")];
|
393 |
+
tensor<fp16, [1, 4096, 1, 64]> x_39_cast_fp16 = add(x = attention_output_cast_fp16, y = x_29_cast_fp16)[name = tensor<string, []>("x_39_cast_fp16")];
|
394 |
+
tensor<fp16, [1, 4096, 1, 64]> var_617_cast_fp16 = mul(x = x_39_cast_fp16, y = x_39_cast_fp16)[name = tensor<string, []>("op_617_cast_fp16")];
|
395 |
+
tensor<int32, [1]> var_618 = const()[name = tensor<string, []>("op_618"), val = tensor<int32, [1]>([1])];
|
396 |
+
tensor<fp16, [1, 1, 1, 64]> norm_x_cast_fp16 = reduce_mean(axes = var_618, keep_dims = var_463, x = var_617_cast_fp16)[name = tensor<string, []>("norm_x_cast_fp16")];
|
397 |
+
tensor<fp16, []> var_620_to_fp16 = const()[name = tensor<string, []>("op_620_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
|
398 |
+
tensor<fp16, [1, 1, 1, 64]> var_621_cast_fp16 = add(x = norm_x_cast_fp16, y = var_620_to_fp16)[name = tensor<string, []>("op_621_cast_fp16")];
|
399 |
+
tensor<fp16, []> var_622_epsilon_0_to_fp16 = const()[name = tensor<string, []>("op_622_epsilon_0_to_fp16"), val = tensor<fp16, []>(0x1p-24)];
|
400 |
+
tensor<fp16, [1, 1, 1, 64]> var_622_cast_fp16 = rsqrt(epsilon = var_622_epsilon_0_to_fp16, x = var_621_cast_fp16)[name = tensor<string, []>("op_622_cast_fp16")];
|
401 |
+
tensor<fp16, [1, 4096, 1, 64]> x_normed_21_cast_fp16 = mul(x = x_39_cast_fp16, y = var_622_cast_fp16)[name = tensor<string, []>("x_normed_21_cast_fp16")];
|
402 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_2_norm_2_weight_to_fp16 = const()[name = tensor<string, []>("blocks_2_norm_2_weight_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303812032)))];
|
403 |
+
tensor<fp16, [1, 4096, 1, 64]> input_19_cast_fp16 = mul(x = x_normed_21_cast_fp16, y = blocks_2_norm_2_weight_to_fp16)[name = tensor<string, []>("input_19_cast_fp16")];
|
404 |
+
tensor<int32, [2]> var_634 = const()[name = tensor<string, []>("op_634"), val = tensor<int32, [2]>([1, 1])];
|
405 |
+
tensor<int32, [2]> var_636 = const()[name = tensor<string, []>("op_636"), val = tensor<int32, [2]>([1, 1])];
|
406 |
+
tensor<string, []> var_638_pad_type_0 = const()[name = tensor<string, []>("op_638_pad_type_0"), val = tensor<string, []>("custom")];
|
407 |
+
tensor<int32, [4]> var_638_pad_0 = const()[name = tensor<string, []>("op_638_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
408 |
+
tensor<fp16, [1, 11008, 1, 64]> var_638_cast_fp16 = conv(dilations = var_636, groups = var_462, pad = var_638_pad_0, pad_type = var_638_pad_type_0, strides = var_634, weight = blocks_2_mlp_fc_1_weight_palettized_cast_fp16, x = input_19_cast_fp16)[name = tensor<string, []>("op_638_cast_fp16")];
|
409 |
+
tensor<fp16, [1, 11008, 1, 1]> blocks_2_mlp_fc_1_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_2_mlp_fc_1_output_scales_to_fp16"), val = tensor<fp16, [1, 11008, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303820288)))];
|
410 |
+
tensor<fp16, [1, 11008, 1, 64]> input_21_cast_fp16 = mul(x = var_638_cast_fp16, y = blocks_2_mlp_fc_1_output_scales_to_fp16)[name = tensor<string, []>("input_21_cast_fp16")];
|
411 |
+
tensor<int32, [2]> var_642 = const()[name = tensor<string, []>("op_642"), val = tensor<int32, [2]>([1, 1])];
|
412 |
+
tensor<int32, [2]> var_644 = const()[name = tensor<string, []>("op_644"), val = tensor<int32, [2]>([1, 1])];
|
413 |
+
tensor<string, []> var_646_pad_type_0 = const()[name = tensor<string, []>("op_646_pad_type_0"), val = tensor<string, []>("custom")];
|
414 |
+
tensor<int32, [4]> var_646_pad_0 = const()[name = tensor<string, []>("op_646_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
415 |
+
tensor<fp16, [1, 11008, 1, 64]> var_646_cast_fp16 = conv(dilations = var_644, groups = var_462, pad = var_646_pad_0, pad_type = var_646_pad_type_0, strides = var_642, weight = blocks_2_mlp_fc_2_weight_palettized_cast_fp16, x = input_19_cast_fp16)[name = tensor<string, []>("op_646_cast_fp16")];
|
416 |
+
tensor<fp16, [1, 11008, 1, 1]> blocks_2_mlp_fc_2_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_2_mlp_fc_2_output_scales_to_fp16"), val = tensor<fp16, [1, 11008, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303842368)))];
|
417 |
+
tensor<fp16, [1, 11008, 1, 64]> x_fc_2_cast_fp16 = mul(x = var_646_cast_fp16, y = blocks_2_mlp_fc_2_output_scales_to_fp16)[name = tensor<string, []>("x_fc_2_cast_fp16")];
|
418 |
+
tensor<fp16, [1, 11008, 1, 64]> var_648_cast_fp16 = silu(x = input_21_cast_fp16)[name = tensor<string, []>("op_648_cast_fp16")];
|
419 |
+
tensor<fp16, [1, 11008, 1, 64]> input_cast_fp16 = mul(x = var_648_cast_fp16, y = x_fc_2_cast_fp16)[name = tensor<string, []>("input_cast_fp16")];
|
420 |
+
tensor<int32, [2]> var_652 = const()[name = tensor<string, []>("op_652"), val = tensor<int32, [2]>([1, 1])];
|
421 |
+
tensor<int32, [2]> var_654 = const()[name = tensor<string, []>("op_654"), val = tensor<int32, [2]>([1, 1])];
|
422 |
+
tensor<string, []> var_656_pad_type_0 = const()[name = tensor<string, []>("op_656_pad_type_0"), val = tensor<string, []>("custom")];
|
423 |
+
tensor<int32, [4]> var_656_pad_0 = const()[name = tensor<string, []>("op_656_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
424 |
+
tensor<fp16, [1, 4096, 1, 64]> var_656_cast_fp16 = conv(dilations = var_654, groups = var_462, pad = var_656_pad_0, pad_type = var_656_pad_type_0, strides = var_652, weight = blocks_2_mlp_proj_weight_palettized_cast_fp16, x = input_cast_fp16)[name = tensor<string, []>("op_656_cast_fp16")];
|
425 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_2_mlp_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_2_mlp_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303864448)))];
|
426 |
+
tensor<fp16, [1, 4096, 1, 64]> var_657_cast_fp16 = mul(x = var_656_cast_fp16, y = blocks_2_mlp_proj_output_scales_to_fp16)[name = tensor<string, []>("op_657_cast_fp16")];
|
427 |
+
tensor<fp16, [1, 4096, 1, 64]> new_x = add(x = var_657_cast_fp16, y = x_39_cast_fp16)[name = tensor<string, []>("op_658_cast_fp16")];
|
428 |
+
} -> (new_x, new_k_cache_0, new_k_cache_1, new_k_cache_2, new_v_cache_0, new_v_cache_1, new_v_cache_2);
|
429 |
+
}
|
Llama-2-7b-hf_chunk11.mlmodelc/weights/weight.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9506f3438a1c857418b2dd28a4631b401f24e3bd606f0427c7adbf510af1e2dc
|
3 |
+
size 303872704
|
Llama-2-7b-hf_chunk12.mlmodelc/analytics/coremldata.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a73e9cc1e9aaa1351af7ee9af6a10c0d8fd805fe2383635cee1714240351b5c2
|
3 |
+
size 243
|
Llama-2-7b-hf_chunk12.mlmodelc/coremldata.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1e030d81f910b53587cf130f1dba0c1d731ab715ebd6ca0b4f475da21707b21e
|
3 |
+
size 651
|
Llama-2-7b-hf_chunk12.mlmodelc/metadata.json
ADDED
@@ -0,0 +1,178 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[
|
2 |
+
{
|
3 |
+
"metadataOutputVersion" : "3.0",
|
4 |
+
"storagePrecision" : "Mixed (Float16, Palettized (4 bits))",
|
5 |
+
"outputSchema" : [
|
6 |
+
{
|
7 |
+
"hasShapeFlexibility" : "0",
|
8 |
+
"isOptional" : "0",
|
9 |
+
"dataType" : "Float16",
|
10 |
+
"formattedType" : "MultiArray (Float16 1 × 4096 × 1 × 64)",
|
11 |
+
"shortDescription" : "",
|
12 |
+
"shape" : "[1, 4096, 1, 64]",
|
13 |
+
"name" : "new_x",
|
14 |
+
"type" : "MultiArray"
|
15 |
+
},
|
16 |
+
{
|
17 |
+
"hasShapeFlexibility" : "0",
|
18 |
+
"isOptional" : "0",
|
19 |
+
"dataType" : "Float16",
|
20 |
+
"formattedType" : "MultiArray (Float16 1 × 32 × 128 × 64)",
|
21 |
+
"shortDescription" : "",
|
22 |
+
"shape" : "[1, 32, 128, 64]",
|
23 |
+
"name" : "new_k_cache_0",
|
24 |
+
"type" : "MultiArray"
|
25 |
+
},
|
26 |
+
{
|
27 |
+
"hasShapeFlexibility" : "0",
|
28 |
+
"isOptional" : "0",
|
29 |
+
"dataType" : "Float16",
|
30 |
+
"formattedType" : "MultiArray (Float16 1 × 32 × 128 × 64)",
|
31 |
+
"shortDescription" : "",
|
32 |
+
"shape" : "[1, 32, 128, 64]",
|
33 |
+
"name" : "new_k_cache_1",
|
34 |
+
"type" : "MultiArray"
|
35 |
+
},
|
36 |
+
{
|
37 |
+
"hasShapeFlexibility" : "0",
|
38 |
+
"isOptional" : "0",
|
39 |
+
"dataType" : "Float16",
|
40 |
+
"formattedType" : "MultiArray (Float16 1 × 32 × 128 × 64)",
|
41 |
+
"shortDescription" : "",
|
42 |
+
"shape" : "[1, 32, 128, 64]",
|
43 |
+
"name" : "new_v_cache_0",
|
44 |
+
"type" : "MultiArray"
|
45 |
+
},
|
46 |
+
{
|
47 |
+
"hasShapeFlexibility" : "0",
|
48 |
+
"isOptional" : "0",
|
49 |
+
"dataType" : "Float16",
|
50 |
+
"formattedType" : "MultiArray (Float16 1 × 32 × 128 × 64)",
|
51 |
+
"shortDescription" : "",
|
52 |
+
"shape" : "[1, 32, 128, 64]",
|
53 |
+
"name" : "new_v_cache_1",
|
54 |
+
"type" : "MultiArray"
|
55 |
+
}
|
56 |
+
],
|
57 |
+
"modelParameters" : [
|
58 |
+
|
59 |
+
],
|
60 |
+
"specificationVersion" : 7,
|
61 |
+
"mlProgramOperationTypeHistogram" : {
|
62 |
+
"Concat" : 12,
|
63 |
+
"Ios16.rsqrt" : 4,
|
64 |
+
"Ios16.mul" : 42,
|
65 |
+
"SliceByIndex" : 8,
|
66 |
+
"Ios16.constexprLutToDense" : 14,
|
67 |
+
"Ios16.conv" : 14,
|
68 |
+
"Ios16.add" : 14,
|
69 |
+
"Ios16.reduceMean" : 4,
|
70 |
+
"Ios16.matmul" : 4,
|
71 |
+
"Ios16.softmax" : 2,
|
72 |
+
"Ios16.reshape" : 8,
|
73 |
+
"Ios16.silu" : 2
|
74 |
+
},
|
75 |
+
"computePrecision" : "Mixed (Float16, Int32)",
|
76 |
+
"isUpdatable" : "0",
|
77 |
+
"availability" : {
|
78 |
+
"macOS" : "13.0",
|
79 |
+
"tvOS" : "16.0",
|
80 |
+
"visionOS" : "1.0",
|
81 |
+
"watchOS" : "9.0",
|
82 |
+
"iOS" : "16.0",
|
83 |
+
"macCatalyst" : "16.0"
|
84 |
+
},
|
85 |
+
"modelType" : {
|
86 |
+
"name" : "MLModelType_mlProgram"
|
87 |
+
},
|
88 |
+
"userDefinedMetadata" : {
|
89 |
+
"com.github.apple.coremltools.source_dialect" : "TorchScript",
|
90 |
+
"com.github.apple.coremltools.source" : "torch==2.1.0",
|
91 |
+
"com.github.apple.coremltools.version" : "7.2"
|
92 |
+
},
|
93 |
+
"inputSchema" : [
|
94 |
+
{
|
95 |
+
"hasShapeFlexibility" : "0",
|
96 |
+
"isOptional" : "0",
|
97 |
+
"dataType" : "Float16",
|
98 |
+
"formattedType" : "MultiArray (Float16 1 × 4096 × 1 × 64)",
|
99 |
+
"shortDescription" : "",
|
100 |
+
"shape" : "[1, 4096, 1, 64]",
|
101 |
+
"name" : "x",
|
102 |
+
"type" : "MultiArray"
|
103 |
+
},
|
104 |
+
{
|
105 |
+
"hasShapeFlexibility" : "0",
|
106 |
+
"isOptional" : "0",
|
107 |
+
"dataType" : "Float16",
|
108 |
+
"formattedType" : "MultiArray (Float16 128 × 64)",
|
109 |
+
"shortDescription" : "",
|
110 |
+
"shape" : "[128, 64]",
|
111 |
+
"name" : "cos",
|
112 |
+
"type" : "MultiArray"
|
113 |
+
},
|
114 |
+
{
|
115 |
+
"hasShapeFlexibility" : "0",
|
116 |
+
"isOptional" : "0",
|
117 |
+
"dataType" : "Float16",
|
118 |
+
"formattedType" : "MultiArray (Float16 128 × 64)",
|
119 |
+
"shortDescription" : "",
|
120 |
+
"shape" : "[128, 64]",
|
121 |
+
"name" : "sin",
|
122 |
+
"type" : "MultiArray"
|
123 |
+
},
|
124 |
+
{
|
125 |
+
"hasShapeFlexibility" : "0",
|
126 |
+
"isOptional" : "0",
|
127 |
+
"dataType" : "Float16",
|
128 |
+
"formattedType" : "MultiArray (Float16 1 × 1 × 64 × 512)",
|
129 |
+
"shortDescription" : "",
|
130 |
+
"shape" : "[1, 1, 64, 512]",
|
131 |
+
"name" : "mask",
|
132 |
+
"type" : "MultiArray"
|
133 |
+
},
|
134 |
+
{
|
135 |
+
"hasShapeFlexibility" : "0",
|
136 |
+
"isOptional" : "1",
|
137 |
+
"dataType" : "Float16",
|
138 |
+
"formattedType" : "MultiArray (Float16 1 × 32 × 128 × 448)?",
|
139 |
+
"shortDescription" : "",
|
140 |
+
"shape" : "[1, 32, 128, 448]",
|
141 |
+
"name" : "k_cache_0",
|
142 |
+
"type" : "MultiArray"
|
143 |
+
},
|
144 |
+
{
|
145 |
+
"hasShapeFlexibility" : "0",
|
146 |
+
"isOptional" : "1",
|
147 |
+
"dataType" : "Float16",
|
148 |
+
"formattedType" : "MultiArray (Float16 1 × 32 × 128 × 448)?",
|
149 |
+
"shortDescription" : "",
|
150 |
+
"shape" : "[1, 32, 128, 448]",
|
151 |
+
"name" : "v_cache_0",
|
152 |
+
"type" : "MultiArray"
|
153 |
+
},
|
154 |
+
{
|
155 |
+
"hasShapeFlexibility" : "0",
|
156 |
+
"isOptional" : "1",
|
157 |
+
"dataType" : "Float16",
|
158 |
+
"formattedType" : "MultiArray (Float16 1 × 32 × 128 × 448)?",
|
159 |
+
"shortDescription" : "",
|
160 |
+
"shape" : "[1, 32, 128, 448]",
|
161 |
+
"name" : "k_cache_1",
|
162 |
+
"type" : "MultiArray"
|
163 |
+
},
|
164 |
+
{
|
165 |
+
"hasShapeFlexibility" : "0",
|
166 |
+
"isOptional" : "1",
|
167 |
+
"dataType" : "Float16",
|
168 |
+
"formattedType" : "MultiArray (Float16 1 × 32 × 128 × 448)?",
|
169 |
+
"shortDescription" : "",
|
170 |
+
"shape" : "[1, 32, 128, 448]",
|
171 |
+
"name" : "v_cache_1",
|
172 |
+
"type" : "MultiArray"
|
173 |
+
}
|
174 |
+
],
|
175 |
+
"generatedClassName" : "Llama_2_7b_hf_2024_05_25_14_03_55_chunk12",
|
176 |
+
"method" : "predict"
|
177 |
+
}
|
178 |
+
]
|
Llama-2-7b-hf_chunk12.mlmodelc/model.mil
ADDED
@@ -0,0 +1,288 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
program(1.0)
|
2 |
+
[buildInfo = dict<tensor<string, []>, tensor<string, []>>({{"coremlc-component-MIL", "5.33.5"}, {"coremlc-version", "1877.40.3"}, {"coremltools-component-torch", "2.1.0"}, {"coremltools-source-dialect", "TorchScript"}, {"coremltools-version", "7.2"}})]
|
3 |
+
{
|
4 |
+
func main<ios16>(tensor<fp16, [128, 64]> cos, tensor<fp16, [1, 32, 128, 448]> k_cache_0, tensor<fp16, [1, 32, 128, 448]> k_cache_1, tensor<fp16, [1, 1, 64, 512]> mask, tensor<fp16, [128, 64]> sin, tensor<fp16, [1, 32, 128, 448]> v_cache_0, tensor<fp16, [1, 32, 128, 448]> v_cache_1, tensor<fp16, [1, 4096, 1, 64]> x) [CoreML_InputDefaultValues = dict<tensor<string, []>, tensor<fp32, []>>({{"k_cache_0", 0}, {"k_cache_1", 0}, {"v_cache_0", 0}, {"v_cache_1", 0}})] {
|
5 |
+
tensor<fp16, [4096, 4096, 1, 1]> blocks_0_attn_q_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [8388608]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(64))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(8388736))), name = tensor<string, []>("blocks_0_attn_q_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 4096, 1, 1])];
|
6 |
+
tensor<fp16, [4096, 4096, 1, 1]> blocks_0_attn_k_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [8388608]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(8388864))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(16777536))), name = tensor<string, []>("blocks_0_attn_k_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 4096, 1, 1])];
|
7 |
+
tensor<fp16, [4096, 4096, 1, 1]> blocks_0_attn_v_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [8388608]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(16777664))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(25166336))), name = tensor<string, []>("blocks_0_attn_v_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 4096, 1, 1])];
|
8 |
+
tensor<fp16, [4096, 4096, 1, 1]> blocks_0_attn_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [8388608]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(25166464))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(33555136))), name = tensor<string, []>("blocks_0_attn_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 4096, 1, 1])];
|
9 |
+
tensor<fp16, [11008, 4096, 1, 1]> blocks_0_mlp_fc_1_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [22544384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(33555264))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(56099712))), name = tensor<string, []>("blocks_0_mlp_fc_1_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([11008, 4096, 1, 1])];
|
10 |
+
tensor<fp16, [11008, 4096, 1, 1]> blocks_0_mlp_fc_2_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [22544384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(56099840))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(78644288))), name = tensor<string, []>("blocks_0_mlp_fc_2_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([11008, 4096, 1, 1])];
|
11 |
+
tensor<fp16, [4096, 11008, 1, 1]> blocks_0_mlp_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [22544384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(78644416))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(101188864))), name = tensor<string, []>("blocks_0_mlp_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 11008, 1, 1])];
|
12 |
+
tensor<fp16, [4096, 4096, 1, 1]> blocks_1_attn_q_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [8388608]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(101188992))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(109577664))), name = tensor<string, []>("blocks_1_attn_q_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 4096, 1, 1])];
|
13 |
+
tensor<fp16, [4096, 4096, 1, 1]> blocks_1_attn_k_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [8388608]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(109577792))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(117966464))), name = tensor<string, []>("blocks_1_attn_k_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 4096, 1, 1])];
|
14 |
+
tensor<fp16, [4096, 4096, 1, 1]> blocks_1_attn_v_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [8388608]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(117966592))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(126355264))), name = tensor<string, []>("blocks_1_attn_v_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 4096, 1, 1])];
|
15 |
+
tensor<fp16, [4096, 4096, 1, 1]> blocks_1_attn_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [8388608]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(126355392))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(134744064))), name = tensor<string, []>("blocks_1_attn_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 4096, 1, 1])];
|
16 |
+
tensor<fp16, [11008, 4096, 1, 1]> blocks_1_mlp_fc_1_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [22544384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(134744192))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(157288640))), name = tensor<string, []>("blocks_1_mlp_fc_1_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([11008, 4096, 1, 1])];
|
17 |
+
tensor<fp16, [11008, 4096, 1, 1]> blocks_1_mlp_fc_2_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [22544384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(157288768))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(179833216))), name = tensor<string, []>("blocks_1_mlp_fc_2_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([11008, 4096, 1, 1])];
|
18 |
+
tensor<fp16, [4096, 11008, 1, 1]> blocks_1_mlp_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [22544384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(179833344))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(202377792))), name = tensor<string, []>("blocks_1_mlp_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 11008, 1, 1])];
|
19 |
+
tensor<int32, []> var_14 = const()[name = tensor<string, []>("op_14"), val = tensor<int32, []>(3)];
|
20 |
+
tensor<int32, []> var_19 = const()[name = tensor<string, []>("op_19"), val = tensor<int32, []>(-2)];
|
21 |
+
tensor<int32, []> var_21 = const()[name = tensor<string, []>("op_21"), val = tensor<int32, []>(-1)];
|
22 |
+
tensor<int32, []> var_28 = const()[name = tensor<string, []>("op_28"), val = tensor<int32, []>(1)];
|
23 |
+
tensor<bool, []> var_29 = const()[name = tensor<string, []>("op_29"), val = tensor<bool, []>(true)];
|
24 |
+
tensor<fp16, [1, 4096, 1, 64]> var_37_cast_fp16 = mul(x = x, y = x)[name = tensor<string, []>("op_37_cast_fp16")];
|
25 |
+
tensor<int32, [1]> var_38 = const()[name = tensor<string, []>("op_38"), val = tensor<int32, [1]>([1])];
|
26 |
+
tensor<fp16, [1, 1, 1, 64]> norm_x_1_cast_fp16 = reduce_mean(axes = var_38, keep_dims = var_29, x = var_37_cast_fp16)[name = tensor<string, []>("norm_x_1_cast_fp16")];
|
27 |
+
tensor<fp16, []> var_40_to_fp16 = const()[name = tensor<string, []>("op_40_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
|
28 |
+
tensor<fp16, [1, 1, 1, 64]> var_41_cast_fp16 = add(x = norm_x_1_cast_fp16, y = var_40_to_fp16)[name = tensor<string, []>("op_41_cast_fp16")];
|
29 |
+
tensor<fp16, []> var_42_epsilon_0_to_fp16 = const()[name = tensor<string, []>("op_42_epsilon_0_to_fp16"), val = tensor<fp16, []>(0x1p-24)];
|
30 |
+
tensor<fp16, [1, 1, 1, 64]> var_42_cast_fp16 = rsqrt(epsilon = var_42_epsilon_0_to_fp16, x = var_41_cast_fp16)[name = tensor<string, []>("op_42_cast_fp16")];
|
31 |
+
tensor<fp16, [1, 4096, 1, 64]> x_normed_1_cast_fp16 = mul(x = x, y = var_42_cast_fp16)[name = tensor<string, []>("x_normed_1_cast_fp16")];
|
32 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_0_norm_1_weight_to_fp16 = const()[name = tensor<string, []>("blocks_0_norm_1_weight_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(202377920)))];
|
33 |
+
tensor<fp16, [1, 4096, 1, 64]> x_5_cast_fp16 = mul(x = x_normed_1_cast_fp16, y = blocks_0_norm_1_weight_to_fp16)[name = tensor<string, []>("x_5_cast_fp16")];
|
34 |
+
tensor<int32, [2]> var_54 = const()[name = tensor<string, []>("op_54"), val = tensor<int32, [2]>([1, 1])];
|
35 |
+
tensor<int32, [2]> var_56 = const()[name = tensor<string, []>("op_56"), val = tensor<int32, [2]>([1, 1])];
|
36 |
+
tensor<string, []> var_58_pad_type_0 = const()[name = tensor<string, []>("op_58_pad_type_0"), val = tensor<string, []>("custom")];
|
37 |
+
tensor<int32, [4]> var_58_pad_0 = const()[name = tensor<string, []>("op_58_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
38 |
+
tensor<fp16, [1, 4096, 1, 64]> var_58_cast_fp16 = conv(dilations = var_56, groups = var_28, pad = var_58_pad_0, pad_type = var_58_pad_type_0, strides = var_54, weight = blocks_0_attn_q_proj_weight_palettized_cast_fp16, x = x_5_cast_fp16)[name = tensor<string, []>("op_58_cast_fp16")];
|
39 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_0_attn_q_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_0_attn_q_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(202386176)))];
|
40 |
+
tensor<fp16, [1, 4096, 1, 64]> q_1_cast_fp16 = mul(x = var_58_cast_fp16, y = blocks_0_attn_q_proj_output_scales_to_fp16)[name = tensor<string, []>("q_1_cast_fp16")];
|
41 |
+
tensor<int32, [2]> var_62 = const()[name = tensor<string, []>("op_62"), val = tensor<int32, [2]>([1, 1])];
|
42 |
+
tensor<int32, [2]> var_64 = const()[name = tensor<string, []>("op_64"), val = tensor<int32, [2]>([1, 1])];
|
43 |
+
tensor<string, []> var_66_pad_type_0 = const()[name = tensor<string, []>("op_66_pad_type_0"), val = tensor<string, []>("custom")];
|
44 |
+
tensor<int32, [4]> var_66_pad_0 = const()[name = tensor<string, []>("op_66_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
45 |
+
tensor<fp16, [1, 4096, 1, 64]> var_66_cast_fp16 = conv(dilations = var_64, groups = var_28, pad = var_66_pad_0, pad_type = var_66_pad_type_0, strides = var_62, weight = blocks_0_attn_k_proj_weight_palettized_cast_fp16, x = x_5_cast_fp16)[name = tensor<string, []>("op_66_cast_fp16")];
|
46 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_0_attn_k_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_0_attn_k_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(202394432)))];
|
47 |
+
tensor<fp16, [1, 4096, 1, 64]> k_1_cast_fp16 = mul(x = var_66_cast_fp16, y = blocks_0_attn_k_proj_output_scales_to_fp16)[name = tensor<string, []>("k_1_cast_fp16")];
|
48 |
+
tensor<int32, [2]> var_70 = const()[name = tensor<string, []>("op_70"), val = tensor<int32, [2]>([1, 1])];
|
49 |
+
tensor<int32, [2]> var_72 = const()[name = tensor<string, []>("op_72"), val = tensor<int32, [2]>([1, 1])];
|
50 |
+
tensor<string, []> var_74_pad_type_0 = const()[name = tensor<string, []>("op_74_pad_type_0"), val = tensor<string, []>("custom")];
|
51 |
+
tensor<int32, [4]> var_74_pad_0 = const()[name = tensor<string, []>("op_74_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
52 |
+
tensor<fp16, [1, 4096, 1, 64]> var_74_cast_fp16 = conv(dilations = var_72, groups = var_28, pad = var_74_pad_0, pad_type = var_74_pad_type_0, strides = var_70, weight = blocks_0_attn_v_proj_weight_palettized_cast_fp16, x = x_5_cast_fp16)[name = tensor<string, []>("op_74_cast_fp16")];
|
53 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_0_attn_v_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_0_attn_v_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(202402688)))];
|
54 |
+
tensor<fp16, [1, 4096, 1, 64]> v_1_cast_fp16 = mul(x = var_74_cast_fp16, y = blocks_0_attn_v_proj_output_scales_to_fp16)[name = tensor<string, []>("v_1_cast_fp16")];
|
55 |
+
tensor<int32, [4]> var_76 = const()[name = tensor<string, []>("op_76"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
56 |
+
tensor<fp16, [1, 32, 128, 64]> q_3_cast_fp16 = reshape(shape = var_76, x = q_1_cast_fp16)[name = tensor<string, []>("q_3_cast_fp16")];
|
57 |
+
tensor<int32, [4]> var_78 = const()[name = tensor<string, []>("op_78"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
58 |
+
tensor<fp16, [1, 32, 128, 64]> k_3_cast_fp16 = reshape(shape = var_78, x = k_1_cast_fp16)[name = tensor<string, []>("k_3_cast_fp16")];
|
59 |
+
tensor<int32, [4]> var_80 = const()[name = tensor<string, []>("op_80"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
60 |
+
tensor<fp16, [1, 32, 128, 64]> new_v_cache_0 = reshape(shape = var_80, x = v_1_cast_fp16)[name = tensor<string, []>("v_3_cast_fp16")];
|
61 |
+
tensor<int32, [4]> var_92_begin_0 = const()[name = tensor<string, []>("op_92_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
62 |
+
tensor<int32, [4]> var_92_end_0 = const()[name = tensor<string, []>("op_92_end_0"), val = tensor<int32, [4]>([1, 32, 64, 64])];
|
63 |
+
tensor<bool, [4]> var_92_end_mask_0 = const()[name = tensor<string, []>("op_92_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
|
64 |
+
tensor<fp16, [1, 32, 64, 64]> var_92_cast_fp16 = slice_by_index(begin = var_92_begin_0, end = var_92_end_0, end_mask = var_92_end_mask_0, x = q_3_cast_fp16)[name = tensor<string, []>("op_92_cast_fp16")];
|
65 |
+
tensor<int32, [4]> var_98_begin_0 = const()[name = tensor<string, []>("op_98_begin_0"), val = tensor<int32, [4]>([0, 0, 64, 0])];
|
66 |
+
tensor<int32, [4]> var_98_end_0 = const()[name = tensor<string, []>("op_98_end_0"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
67 |
+
tensor<bool, [4]> var_98_end_mask_0 = const()[name = tensor<string, []>("op_98_end_mask_0"), val = tensor<bool, [4]>([true, true, true, true])];
|
68 |
+
tensor<fp16, [1, 32, 64, 64]> var_98_cast_fp16 = slice_by_index(begin = var_98_begin_0, end = var_98_end_0, end_mask = var_98_end_mask_0, x = q_3_cast_fp16)[name = tensor<string, []>("op_98_cast_fp16")];
|
69 |
+
tensor<fp16, []> const_3_promoted_to_fp16 = const()[name = tensor<string, []>("const_3_promoted_to_fp16"), val = tensor<fp16, []>(-0x1p+0)];
|
70 |
+
tensor<fp16, [1, 32, 64, 64]> var_100_cast_fp16 = mul(x = var_98_cast_fp16, y = const_3_promoted_to_fp16)[name = tensor<string, []>("op_100_cast_fp16")];
|
71 |
+
tensor<bool, []> rotated_1_interleave_0 = const()[name = tensor<string, []>("rotated_1_interleave_0"), val = tensor<bool, []>(false)];
|
72 |
+
tensor<fp16, [1, 32, 128, 64]> rotated_1_cast_fp16 = concat(axis = var_19, interleave = rotated_1_interleave_0, values = (var_100_cast_fp16, var_92_cast_fp16))[name = tensor<string, []>("rotated_1_cast_fp16")];
|
73 |
+
tensor<fp16, [1, 32, 128, 64]> var_103_cast_fp16 = mul(x = q_3_cast_fp16, y = cos)[name = tensor<string, []>("op_103_cast_fp16")];
|
74 |
+
tensor<fp16, [1, 32, 128, 64]> var_104_cast_fp16 = mul(x = rotated_1_cast_fp16, y = sin)[name = tensor<string, []>("op_104_cast_fp16")];
|
75 |
+
tensor<fp16, [1, 32, 128, 64]> roped_1_cast_fp16 = add(x = var_103_cast_fp16, y = var_104_cast_fp16)[name = tensor<string, []>("roped_1_cast_fp16")];
|
76 |
+
tensor<int32, [4]> var_117_begin_0 = const()[name = tensor<string, []>("op_117_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
77 |
+
tensor<int32, [4]> var_117_end_0 = const()[name = tensor<string, []>("op_117_end_0"), val = tensor<int32, [4]>([1, 32, 64, 64])];
|
78 |
+
tensor<bool, [4]> var_117_end_mask_0 = const()[name = tensor<string, []>("op_117_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
|
79 |
+
tensor<fp16, [1, 32, 64, 64]> var_117_cast_fp16 = slice_by_index(begin = var_117_begin_0, end = var_117_end_0, end_mask = var_117_end_mask_0, x = k_3_cast_fp16)[name = tensor<string, []>("op_117_cast_fp16")];
|
80 |
+
tensor<int32, [4]> var_123_begin_0 = const()[name = tensor<string, []>("op_123_begin_0"), val = tensor<int32, [4]>([0, 0, 64, 0])];
|
81 |
+
tensor<int32, [4]> var_123_end_0 = const()[name = tensor<string, []>("op_123_end_0"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
82 |
+
tensor<bool, [4]> var_123_end_mask_0 = const()[name = tensor<string, []>("op_123_end_mask_0"), val = tensor<bool, [4]>([true, true, true, true])];
|
83 |
+
tensor<fp16, [1, 32, 64, 64]> var_123_cast_fp16 = slice_by_index(begin = var_123_begin_0, end = var_123_end_0, end_mask = var_123_end_mask_0, x = k_3_cast_fp16)[name = tensor<string, []>("op_123_cast_fp16")];
|
84 |
+
tensor<fp16, []> const_5_promoted_to_fp16 = const()[name = tensor<string, []>("const_5_promoted_to_fp16"), val = tensor<fp16, []>(-0x1p+0)];
|
85 |
+
tensor<fp16, [1, 32, 64, 64]> var_125_cast_fp16 = mul(x = var_123_cast_fp16, y = const_5_promoted_to_fp16)[name = tensor<string, []>("op_125_cast_fp16")];
|
86 |
+
tensor<bool, []> rotated_3_interleave_0 = const()[name = tensor<string, []>("rotated_3_interleave_0"), val = tensor<bool, []>(false)];
|
87 |
+
tensor<fp16, [1, 32, 128, 64]> rotated_3_cast_fp16 = concat(axis = var_19, interleave = rotated_3_interleave_0, values = (var_125_cast_fp16, var_117_cast_fp16))[name = tensor<string, []>("rotated_3_cast_fp16")];
|
88 |
+
tensor<fp16, [1, 32, 128, 64]> var_128_cast_fp16 = mul(x = k_3_cast_fp16, y = cos)[name = tensor<string, []>("op_128_cast_fp16")];
|
89 |
+
tensor<fp16, [1, 32, 128, 64]> var_129_cast_fp16 = mul(x = rotated_3_cast_fp16, y = sin)[name = tensor<string, []>("op_129_cast_fp16")];
|
90 |
+
tensor<fp16, [1, 32, 128, 64]> roped_3_cast_fp16 = add(x = var_128_cast_fp16, y = var_129_cast_fp16)[name = tensor<string, []>("roped_3_cast_fp16")];
|
91 |
+
tensor<bool, []> q_5_interleave_0 = const()[name = tensor<string, []>("q_5_interleave_0"), val = tensor<bool, []>(false)];
|
92 |
+
tensor<fp16, [1, 32, 128, 64]> q_5_cast_fp16 = concat(axis = var_19, interleave = q_5_interleave_0, values = roped_1_cast_fp16)[name = tensor<string, []>("q_5_cast_fp16")];
|
93 |
+
tensor<bool, []> k_5_interleave_0 = const()[name = tensor<string, []>("k_5_interleave_0"), val = tensor<bool, []>(false)];
|
94 |
+
tensor<fp16, [1, 32, 128, 64]> new_k_cache_0 = concat(axis = var_19, interleave = k_5_interleave_0, values = roped_3_cast_fp16)[name = tensor<string, []>("k_5_cast_fp16")];
|
95 |
+
tensor<bool, []> k_7_interleave_0 = const()[name = tensor<string, []>("k_7_interleave_0"), val = tensor<bool, []>(false)];
|
96 |
+
tensor<fp16, [1, 32, 128, 512]> k_7_cast_fp16 = concat(axis = var_21, interleave = k_7_interleave_0, values = (k_cache_0, new_k_cache_0))[name = tensor<string, []>("k_7_cast_fp16")];
|
97 |
+
tensor<bool, []> v_5_interleave_0 = const()[name = tensor<string, []>("v_5_interleave_0"), val = tensor<bool, []>(false)];
|
98 |
+
tensor<fp16, [1, 32, 128, 512]> v_5_cast_fp16 = concat(axis = var_21, interleave = v_5_interleave_0, values = (v_cache_0, new_v_cache_0))[name = tensor<string, []>("v_5_cast_fp16")];
|
99 |
+
tensor<fp16, []> var_151_to_fp16 = const()[name = tensor<string, []>("op_151_to_fp16"), val = tensor<fp16, []>(0x1.6ap-4)];
|
100 |
+
tensor<fp16, [1, 32, 128, 64]> var_152_cast_fp16 = mul(x = q_5_cast_fp16, y = var_151_to_fp16)[name = tensor<string, []>("op_152_cast_fp16")];
|
101 |
+
tensor<bool, []> attn_weights_1_transpose_x_0 = const()[name = tensor<string, []>("attn_weights_1_transpose_x_0"), val = tensor<bool, []>(true)];
|
102 |
+
tensor<bool, []> attn_weights_1_transpose_y_0 = const()[name = tensor<string, []>("attn_weights_1_transpose_y_0"), val = tensor<bool, []>(false)];
|
103 |
+
tensor<fp16, [1, 32, 64, 512]> attn_weights_1_cast_fp16 = matmul(transpose_x = attn_weights_1_transpose_x_0, transpose_y = attn_weights_1_transpose_y_0, x = var_152_cast_fp16, y = k_7_cast_fp16)[name = tensor<string, []>("attn_weights_1_cast_fp16")];
|
104 |
+
tensor<fp16, [1, 32, 64, 512]> attn_weights_3_cast_fp16 = add(x = attn_weights_1_cast_fp16, y = mask)[name = tensor<string, []>("attn_weights_3_cast_fp16")];
|
105 |
+
tensor<fp16, [1, 32, 64, 512]> var_160_cast_fp16 = softmax(axis = var_14, x = attn_weights_3_cast_fp16)[name = tensor<string, []>("op_160_cast_fp16")];
|
106 |
+
tensor<bool, []> attn_1_transpose_x_0 = const()[name = tensor<string, []>("attn_1_transpose_x_0"), val = tensor<bool, []>(false)];
|
107 |
+
tensor<bool, []> attn_1_transpose_y_0 = const()[name = tensor<string, []>("attn_1_transpose_y_0"), val = tensor<bool, []>(true)];
|
108 |
+
tensor<fp16, [1, 32, 128, 64]> attn_1_cast_fp16 = matmul(transpose_x = attn_1_transpose_x_0, transpose_y = attn_1_transpose_y_0, x = v_5_cast_fp16, y = var_160_cast_fp16)[name = tensor<string, []>("attn_1_cast_fp16")];
|
109 |
+
tensor<int32, [4]> var_164 = const()[name = tensor<string, []>("op_164"), val = tensor<int32, [4]>([1, 4096, 1, -1])];
|
110 |
+
tensor<fp16, [1, 4096, 1, 64]> input_1_cast_fp16 = reshape(shape = var_164, x = attn_1_cast_fp16)[name = tensor<string, []>("input_1_cast_fp16")];
|
111 |
+
tensor<int32, [2]> var_168 = const()[name = tensor<string, []>("op_168"), val = tensor<int32, [2]>([1, 1])];
|
112 |
+
tensor<int32, [2]> var_170 = const()[name = tensor<string, []>("op_170"), val = tensor<int32, [2]>([1, 1])];
|
113 |
+
tensor<string, []> var_172_pad_type_0 = const()[name = tensor<string, []>("op_172_pad_type_0"), val = tensor<string, []>("custom")];
|
114 |
+
tensor<int32, [4]> var_172_pad_0 = const()[name = tensor<string, []>("op_172_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
115 |
+
tensor<fp16, [1, 4096, 1, 64]> var_172_cast_fp16 = conv(dilations = var_170, groups = var_28, pad = var_172_pad_0, pad_type = var_172_pad_type_0, strides = var_168, weight = blocks_0_attn_proj_weight_palettized_cast_fp16, x = input_1_cast_fp16)[name = tensor<string, []>("op_172_cast_fp16")];
|
116 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_0_attn_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_0_attn_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(202410944)))];
|
117 |
+
tensor<fp16, [1, 4096, 1, 64]> attention_output_1_cast_fp16 = mul(x = var_172_cast_fp16, y = blocks_0_attn_proj_output_scales_to_fp16)[name = tensor<string, []>("attention_output_1_cast_fp16")];
|
118 |
+
tensor<fp16, [1, 4096, 1, 64]> x_11_cast_fp16 = add(x = attention_output_1_cast_fp16, y = x)[name = tensor<string, []>("x_11_cast_fp16")];
|
119 |
+
tensor<fp16, [1, 4096, 1, 64]> var_181_cast_fp16 = mul(x = x_11_cast_fp16, y = x_11_cast_fp16)[name = tensor<string, []>("op_181_cast_fp16")];
|
120 |
+
tensor<int32, [1]> var_182 = const()[name = tensor<string, []>("op_182"), val = tensor<int32, [1]>([1])];
|
121 |
+
tensor<fp16, [1, 1, 1, 64]> norm_x_3_cast_fp16 = reduce_mean(axes = var_182, keep_dims = var_29, x = var_181_cast_fp16)[name = tensor<string, []>("norm_x_3_cast_fp16")];
|
122 |
+
tensor<fp16, []> var_184_to_fp16 = const()[name = tensor<string, []>("op_184_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
|
123 |
+
tensor<fp16, [1, 1, 1, 64]> var_185_cast_fp16 = add(x = norm_x_3_cast_fp16, y = var_184_to_fp16)[name = tensor<string, []>("op_185_cast_fp16")];
|
124 |
+
tensor<fp16, []> var_186_epsilon_0_to_fp16 = const()[name = tensor<string, []>("op_186_epsilon_0_to_fp16"), val = tensor<fp16, []>(0x1p-24)];
|
125 |
+
tensor<fp16, [1, 1, 1, 64]> var_186_cast_fp16 = rsqrt(epsilon = var_186_epsilon_0_to_fp16, x = var_185_cast_fp16)[name = tensor<string, []>("op_186_cast_fp16")];
|
126 |
+
tensor<fp16, [1, 4096, 1, 64]> x_normed_5_cast_fp16 = mul(x = x_11_cast_fp16, y = var_186_cast_fp16)[name = tensor<string, []>("x_normed_5_cast_fp16")];
|
127 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_0_norm_2_weight_to_fp16 = const()[name = tensor<string, []>("blocks_0_norm_2_weight_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(202419200)))];
|
128 |
+
tensor<fp16, [1, 4096, 1, 64]> input_3_cast_fp16 = mul(x = x_normed_5_cast_fp16, y = blocks_0_norm_2_weight_to_fp16)[name = tensor<string, []>("input_3_cast_fp16")];
|
129 |
+
tensor<int32, [2]> var_198 = const()[name = tensor<string, []>("op_198"), val = tensor<int32, [2]>([1, 1])];
|
130 |
+
tensor<int32, [2]> var_200 = const()[name = tensor<string, []>("op_200"), val = tensor<int32, [2]>([1, 1])];
|
131 |
+
tensor<string, []> var_202_pad_type_0 = const()[name = tensor<string, []>("op_202_pad_type_0"), val = tensor<string, []>("custom")];
|
132 |
+
tensor<int32, [4]> var_202_pad_0 = const()[name = tensor<string, []>("op_202_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
133 |
+
tensor<fp16, [1, 11008, 1, 64]> var_202_cast_fp16 = conv(dilations = var_200, groups = var_28, pad = var_202_pad_0, pad_type = var_202_pad_type_0, strides = var_198, weight = blocks_0_mlp_fc_1_weight_palettized_cast_fp16, x = input_3_cast_fp16)[name = tensor<string, []>("op_202_cast_fp16")];
|
134 |
+
tensor<fp16, [1, 11008, 1, 1]> blocks_0_mlp_fc_1_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_0_mlp_fc_1_output_scales_to_fp16"), val = tensor<fp16, [1, 11008, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(202427456)))];
|
135 |
+
tensor<fp16, [1, 11008, 1, 64]> input_5_cast_fp16 = mul(x = var_202_cast_fp16, y = blocks_0_mlp_fc_1_output_scales_to_fp16)[name = tensor<string, []>("input_5_cast_fp16")];
|
136 |
+
tensor<int32, [2]> var_206 = const()[name = tensor<string, []>("op_206"), val = tensor<int32, [2]>([1, 1])];
|
137 |
+
tensor<int32, [2]> var_208 = const()[name = tensor<string, []>("op_208"), val = tensor<int32, [2]>([1, 1])];
|
138 |
+
tensor<string, []> var_210_pad_type_0 = const()[name = tensor<string, []>("op_210_pad_type_0"), val = tensor<string, []>("custom")];
|
139 |
+
tensor<int32, [4]> var_210_pad_0 = const()[name = tensor<string, []>("op_210_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
140 |
+
tensor<fp16, [1, 11008, 1, 64]> var_210_cast_fp16 = conv(dilations = var_208, groups = var_28, pad = var_210_pad_0, pad_type = var_210_pad_type_0, strides = var_206, weight = blocks_0_mlp_fc_2_weight_palettized_cast_fp16, x = input_3_cast_fp16)[name = tensor<string, []>("op_210_cast_fp16")];
|
141 |
+
tensor<fp16, [1, 11008, 1, 1]> blocks_0_mlp_fc_2_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_0_mlp_fc_2_output_scales_to_fp16"), val = tensor<fp16, [1, 11008, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(202449536)))];
|
142 |
+
tensor<fp16, [1, 11008, 1, 64]> x_fc_2_1_cast_fp16 = mul(x = var_210_cast_fp16, y = blocks_0_mlp_fc_2_output_scales_to_fp16)[name = tensor<string, []>("x_fc_2_1_cast_fp16")];
|
143 |
+
tensor<fp16, [1, 11008, 1, 64]> var_212_cast_fp16 = silu(x = input_5_cast_fp16)[name = tensor<string, []>("op_212_cast_fp16")];
|
144 |
+
tensor<fp16, [1, 11008, 1, 64]> input_7_cast_fp16 = mul(x = var_212_cast_fp16, y = x_fc_2_1_cast_fp16)[name = tensor<string, []>("input_7_cast_fp16")];
|
145 |
+
tensor<int32, [2]> var_216 = const()[name = tensor<string, []>("op_216"), val = tensor<int32, [2]>([1, 1])];
|
146 |
+
tensor<int32, [2]> var_218 = const()[name = tensor<string, []>("op_218"), val = tensor<int32, [2]>([1, 1])];
|
147 |
+
tensor<string, []> var_220_pad_type_0 = const()[name = tensor<string, []>("op_220_pad_type_0"), val = tensor<string, []>("custom")];
|
148 |
+
tensor<int32, [4]> var_220_pad_0 = const()[name = tensor<string, []>("op_220_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
149 |
+
tensor<fp16, [1, 4096, 1, 64]> var_220_cast_fp16 = conv(dilations = var_218, groups = var_28, pad = var_220_pad_0, pad_type = var_220_pad_type_0, strides = var_216, weight = blocks_0_mlp_proj_weight_palettized_cast_fp16, x = input_7_cast_fp16)[name = tensor<string, []>("op_220_cast_fp16")];
|
150 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_0_mlp_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_0_mlp_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(202471616)))];
|
151 |
+
tensor<fp16, [1, 4096, 1, 64]> var_221_cast_fp16 = mul(x = var_220_cast_fp16, y = blocks_0_mlp_proj_output_scales_to_fp16)[name = tensor<string, []>("op_221_cast_fp16")];
|
152 |
+
tensor<fp16, [1, 4096, 1, 64]> x_15_cast_fp16 = add(x = var_221_cast_fp16, y = x_11_cast_fp16)[name = tensor<string, []>("x_15_cast_fp16")];
|
153 |
+
tensor<int32, []> var_228 = const()[name = tensor<string, []>("op_228"), val = tensor<int32, []>(3)];
|
154 |
+
tensor<int32, []> var_233 = const()[name = tensor<string, []>("op_233"), val = tensor<int32, []>(-2)];
|
155 |
+
tensor<int32, []> var_235 = const()[name = tensor<string, []>("op_235"), val = tensor<int32, []>(-1)];
|
156 |
+
tensor<int32, []> var_242 = const()[name = tensor<string, []>("op_242"), val = tensor<int32, []>(1)];
|
157 |
+
tensor<bool, []> var_243 = const()[name = tensor<string, []>("op_243"), val = tensor<bool, []>(true)];
|
158 |
+
tensor<fp16, [1, 4096, 1, 64]> var_250_cast_fp16 = mul(x = x_15_cast_fp16, y = x_15_cast_fp16)[name = tensor<string, []>("op_250_cast_fp16")];
|
159 |
+
tensor<int32, [1]> var_251 = const()[name = tensor<string, []>("op_251"), val = tensor<int32, [1]>([1])];
|
160 |
+
tensor<fp16, [1, 1, 1, 64]> norm_x_5_cast_fp16 = reduce_mean(axes = var_251, keep_dims = var_243, x = var_250_cast_fp16)[name = tensor<string, []>("norm_x_5_cast_fp16")];
|
161 |
+
tensor<fp16, []> var_253_to_fp16 = const()[name = tensor<string, []>("op_253_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
|
162 |
+
tensor<fp16, [1, 1, 1, 64]> var_254_cast_fp16 = add(x = norm_x_5_cast_fp16, y = var_253_to_fp16)[name = tensor<string, []>("op_254_cast_fp16")];
|
163 |
+
tensor<fp16, []> var_255_epsilon_0_to_fp16 = const()[name = tensor<string, []>("op_255_epsilon_0_to_fp16"), val = tensor<fp16, []>(0x1p-24)];
|
164 |
+
tensor<fp16, [1, 1, 1, 64]> var_255_cast_fp16 = rsqrt(epsilon = var_255_epsilon_0_to_fp16, x = var_254_cast_fp16)[name = tensor<string, []>("op_255_cast_fp16")];
|
165 |
+
tensor<fp16, [1, 4096, 1, 64]> x_normed_9_cast_fp16 = mul(x = x_15_cast_fp16, y = var_255_cast_fp16)[name = tensor<string, []>("x_normed_9_cast_fp16")];
|
166 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_1_norm_1_weight_to_fp16 = const()[name = tensor<string, []>("blocks_1_norm_1_weight_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(202479872)))];
|
167 |
+
tensor<fp16, [1, 4096, 1, 64]> x_19_cast_fp16 = mul(x = x_normed_9_cast_fp16, y = blocks_1_norm_1_weight_to_fp16)[name = tensor<string, []>("x_19_cast_fp16")];
|
168 |
+
tensor<int32, [2]> var_270 = const()[name = tensor<string, []>("op_270"), val = tensor<int32, [2]>([1, 1])];
|
169 |
+
tensor<int32, [2]> var_272 = const()[name = tensor<string, []>("op_272"), val = tensor<int32, [2]>([1, 1])];
|
170 |
+
tensor<string, []> var_274_pad_type_0 = const()[name = tensor<string, []>("op_274_pad_type_0"), val = tensor<string, []>("custom")];
|
171 |
+
tensor<int32, [4]> var_274_pad_0 = const()[name = tensor<string, []>("op_274_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
172 |
+
tensor<fp16, [1, 4096, 1, 64]> var_274_cast_fp16 = conv(dilations = var_272, groups = var_242, pad = var_274_pad_0, pad_type = var_274_pad_type_0, strides = var_270, weight = blocks_1_attn_q_proj_weight_palettized_cast_fp16, x = x_19_cast_fp16)[name = tensor<string, []>("op_274_cast_fp16")];
|
173 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_1_attn_q_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_1_attn_q_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(202488128)))];
|
174 |
+
tensor<fp16, [1, 4096, 1, 64]> q_7_cast_fp16 = mul(x = var_274_cast_fp16, y = blocks_1_attn_q_proj_output_scales_to_fp16)[name = tensor<string, []>("q_7_cast_fp16")];
|
175 |
+
tensor<int32, [2]> var_278 = const()[name = tensor<string, []>("op_278"), val = tensor<int32, [2]>([1, 1])];
|
176 |
+
tensor<int32, [2]> var_280 = const()[name = tensor<string, []>("op_280"), val = tensor<int32, [2]>([1, 1])];
|
177 |
+
tensor<string, []> var_282_pad_type_0 = const()[name = tensor<string, []>("op_282_pad_type_0"), val = tensor<string, []>("custom")];
|
178 |
+
tensor<int32, [4]> var_282_pad_0 = const()[name = tensor<string, []>("op_282_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
179 |
+
tensor<fp16, [1, 4096, 1, 64]> var_282_cast_fp16 = conv(dilations = var_280, groups = var_242, pad = var_282_pad_0, pad_type = var_282_pad_type_0, strides = var_278, weight = blocks_1_attn_k_proj_weight_palettized_cast_fp16, x = x_19_cast_fp16)[name = tensor<string, []>("op_282_cast_fp16")];
|
180 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_1_attn_k_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_1_attn_k_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(202496384)))];
|
181 |
+
tensor<fp16, [1, 4096, 1, 64]> k_9_cast_fp16 = mul(x = var_282_cast_fp16, y = blocks_1_attn_k_proj_output_scales_to_fp16)[name = tensor<string, []>("k_9_cast_fp16")];
|
182 |
+
tensor<int32, [2]> var_286 = const()[name = tensor<string, []>("op_286"), val = tensor<int32, [2]>([1, 1])];
|
183 |
+
tensor<int32, [2]> var_288 = const()[name = tensor<string, []>("op_288"), val = tensor<int32, [2]>([1, 1])];
|
184 |
+
tensor<string, []> var_290_pad_type_0 = const()[name = tensor<string, []>("op_290_pad_type_0"), val = tensor<string, []>("custom")];
|
185 |
+
tensor<int32, [4]> var_290_pad_0 = const()[name = tensor<string, []>("op_290_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
186 |
+
tensor<fp16, [1, 4096, 1, 64]> var_290_cast_fp16 = conv(dilations = var_288, groups = var_242, pad = var_290_pad_0, pad_type = var_290_pad_type_0, strides = var_286, weight = blocks_1_attn_v_proj_weight_palettized_cast_fp16, x = x_19_cast_fp16)[name = tensor<string, []>("op_290_cast_fp16")];
|
187 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_1_attn_v_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_1_attn_v_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(202504640)))];
|
188 |
+
tensor<fp16, [1, 4096, 1, 64]> v_7_cast_fp16 = mul(x = var_290_cast_fp16, y = blocks_1_attn_v_proj_output_scales_to_fp16)[name = tensor<string, []>("v_7_cast_fp16")];
|
189 |
+
tensor<int32, [4]> var_292 = const()[name = tensor<string, []>("op_292"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
190 |
+
tensor<fp16, [1, 32, 128, 64]> q_9_cast_fp16 = reshape(shape = var_292, x = q_7_cast_fp16)[name = tensor<string, []>("q_9_cast_fp16")];
|
191 |
+
tensor<int32, [4]> var_294 = const()[name = tensor<string, []>("op_294"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
192 |
+
tensor<fp16, [1, 32, 128, 64]> k_11_cast_fp16 = reshape(shape = var_294, x = k_9_cast_fp16)[name = tensor<string, []>("k_11_cast_fp16")];
|
193 |
+
tensor<int32, [4]> var_296 = const()[name = tensor<string, []>("op_296"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
194 |
+
tensor<fp16, [1, 32, 128, 64]> new_v_cache_1 = reshape(shape = var_296, x = v_7_cast_fp16)[name = tensor<string, []>("v_9_cast_fp16")];
|
195 |
+
tensor<int32, [4]> var_308_begin_0 = const()[name = tensor<string, []>("op_308_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
196 |
+
tensor<int32, [4]> var_308_end_0 = const()[name = tensor<string, []>("op_308_end_0"), val = tensor<int32, [4]>([1, 32, 64, 64])];
|
197 |
+
tensor<bool, [4]> var_308_end_mask_0 = const()[name = tensor<string, []>("op_308_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
|
198 |
+
tensor<fp16, [1, 32, 64, 64]> var_308_cast_fp16 = slice_by_index(begin = var_308_begin_0, end = var_308_end_0, end_mask = var_308_end_mask_0, x = q_9_cast_fp16)[name = tensor<string, []>("op_308_cast_fp16")];
|
199 |
+
tensor<int32, [4]> var_314_begin_0 = const()[name = tensor<string, []>("op_314_begin_0"), val = tensor<int32, [4]>([0, 0, 64, 0])];
|
200 |
+
tensor<int32, [4]> var_314_end_0 = const()[name = tensor<string, []>("op_314_end_0"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
201 |
+
tensor<bool, [4]> var_314_end_mask_0 = const()[name = tensor<string, []>("op_314_end_mask_0"), val = tensor<bool, [4]>([true, true, true, true])];
|
202 |
+
tensor<fp16, [1, 32, 64, 64]> var_314_cast_fp16 = slice_by_index(begin = var_314_begin_0, end = var_314_end_0, end_mask = var_314_end_mask_0, x = q_9_cast_fp16)[name = tensor<string, []>("op_314_cast_fp16")];
|
203 |
+
tensor<fp16, []> const_10_promoted_to_fp16 = const()[name = tensor<string, []>("const_10_promoted_to_fp16"), val = tensor<fp16, []>(-0x1p+0)];
|
204 |
+
tensor<fp16, [1, 32, 64, 64]> var_316_cast_fp16 = mul(x = var_314_cast_fp16, y = const_10_promoted_to_fp16)[name = tensor<string, []>("op_316_cast_fp16")];
|
205 |
+
tensor<bool, []> rotated_5_interleave_0 = const()[name = tensor<string, []>("rotated_5_interleave_0"), val = tensor<bool, []>(false)];
|
206 |
+
tensor<fp16, [1, 32, 128, 64]> rotated_5_cast_fp16 = concat(axis = var_233, interleave = rotated_5_interleave_0, values = (var_316_cast_fp16, var_308_cast_fp16))[name = tensor<string, []>("rotated_5_cast_fp16")];
|
207 |
+
tensor<fp16, [1, 32, 128, 64]> var_319_cast_fp16 = mul(x = q_9_cast_fp16, y = cos)[name = tensor<string, []>("op_319_cast_fp16")];
|
208 |
+
tensor<fp16, [1, 32, 128, 64]> var_320_cast_fp16 = mul(x = rotated_5_cast_fp16, y = sin)[name = tensor<string, []>("op_320_cast_fp16")];
|
209 |
+
tensor<fp16, [1, 32, 128, 64]> roped_5_cast_fp16 = add(x = var_319_cast_fp16, y = var_320_cast_fp16)[name = tensor<string, []>("roped_5_cast_fp16")];
|
210 |
+
tensor<int32, [4]> var_333_begin_0 = const()[name = tensor<string, []>("op_333_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
211 |
+
tensor<int32, [4]> var_333_end_0 = const()[name = tensor<string, []>("op_333_end_0"), val = tensor<int32, [4]>([1, 32, 64, 64])];
|
212 |
+
tensor<bool, [4]> var_333_end_mask_0 = const()[name = tensor<string, []>("op_333_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
|
213 |
+
tensor<fp16, [1, 32, 64, 64]> var_333_cast_fp16 = slice_by_index(begin = var_333_begin_0, end = var_333_end_0, end_mask = var_333_end_mask_0, x = k_11_cast_fp16)[name = tensor<string, []>("op_333_cast_fp16")];
|
214 |
+
tensor<int32, [4]> var_339_begin_0 = const()[name = tensor<string, []>("op_339_begin_0"), val = tensor<int32, [4]>([0, 0, 64, 0])];
|
215 |
+
tensor<int32, [4]> var_339_end_0 = const()[name = tensor<string, []>("op_339_end_0"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
216 |
+
tensor<bool, [4]> var_339_end_mask_0 = const()[name = tensor<string, []>("op_339_end_mask_0"), val = tensor<bool, [4]>([true, true, true, true])];
|
217 |
+
tensor<fp16, [1, 32, 64, 64]> var_339_cast_fp16 = slice_by_index(begin = var_339_begin_0, end = var_339_end_0, end_mask = var_339_end_mask_0, x = k_11_cast_fp16)[name = tensor<string, []>("op_339_cast_fp16")];
|
218 |
+
tensor<fp16, []> const_12_promoted_to_fp16 = const()[name = tensor<string, []>("const_12_promoted_to_fp16"), val = tensor<fp16, []>(-0x1p+0)];
|
219 |
+
tensor<fp16, [1, 32, 64, 64]> var_341_cast_fp16 = mul(x = var_339_cast_fp16, y = const_12_promoted_to_fp16)[name = tensor<string, []>("op_341_cast_fp16")];
|
220 |
+
tensor<bool, []> rotated_interleave_0 = const()[name = tensor<string, []>("rotated_interleave_0"), val = tensor<bool, []>(false)];
|
221 |
+
tensor<fp16, [1, 32, 128, 64]> rotated_cast_fp16 = concat(axis = var_233, interleave = rotated_interleave_0, values = (var_341_cast_fp16, var_333_cast_fp16))[name = tensor<string, []>("rotated_cast_fp16")];
|
222 |
+
tensor<fp16, [1, 32, 128, 64]> var_344_cast_fp16 = mul(x = k_11_cast_fp16, y = cos)[name = tensor<string, []>("op_344_cast_fp16")];
|
223 |
+
tensor<fp16, [1, 32, 128, 64]> var_345_cast_fp16 = mul(x = rotated_cast_fp16, y = sin)[name = tensor<string, []>("op_345_cast_fp16")];
|
224 |
+
tensor<fp16, [1, 32, 128, 64]> roped_cast_fp16 = add(x = var_344_cast_fp16, y = var_345_cast_fp16)[name = tensor<string, []>("roped_cast_fp16")];
|
225 |
+
tensor<bool, []> q_interleave_0 = const()[name = tensor<string, []>("q_interleave_0"), val = tensor<bool, []>(false)];
|
226 |
+
tensor<fp16, [1, 32, 128, 64]> q_cast_fp16 = concat(axis = var_233, interleave = q_interleave_0, values = roped_5_cast_fp16)[name = tensor<string, []>("q_cast_fp16")];
|
227 |
+
tensor<bool, []> k_13_interleave_0 = const()[name = tensor<string, []>("k_13_interleave_0"), val = tensor<bool, []>(false)];
|
228 |
+
tensor<fp16, [1, 32, 128, 64]> new_k_cache_1 = concat(axis = var_233, interleave = k_13_interleave_0, values = roped_cast_fp16)[name = tensor<string, []>("k_13_cast_fp16")];
|
229 |
+
tensor<bool, []> k_interleave_0 = const()[name = tensor<string, []>("k_interleave_0"), val = tensor<bool, []>(false)];
|
230 |
+
tensor<fp16, [1, 32, 128, 512]> k_cast_fp16 = concat(axis = var_235, interleave = k_interleave_0, values = (k_cache_1, new_k_cache_1))[name = tensor<string, []>("k_cast_fp16")];
|
231 |
+
tensor<bool, []> v_interleave_0 = const()[name = tensor<string, []>("v_interleave_0"), val = tensor<bool, []>(false)];
|
232 |
+
tensor<fp16, [1, 32, 128, 512]> v_cast_fp16 = concat(axis = var_235, interleave = v_interleave_0, values = (v_cache_1, new_v_cache_1))[name = tensor<string, []>("v_cast_fp16")];
|
233 |
+
tensor<fp16, []> var_367_to_fp16 = const()[name = tensor<string, []>("op_367_to_fp16"), val = tensor<fp16, []>(0x1.6ap-4)];
|
234 |
+
tensor<fp16, [1, 32, 128, 64]> var_368_cast_fp16 = mul(x = q_cast_fp16, y = var_367_to_fp16)[name = tensor<string, []>("op_368_cast_fp16")];
|
235 |
+
tensor<bool, []> attn_weights_5_transpose_x_0 = const()[name = tensor<string, []>("attn_weights_5_transpose_x_0"), val = tensor<bool, []>(true)];
|
236 |
+
tensor<bool, []> attn_weights_5_transpose_y_0 = const()[name = tensor<string, []>("attn_weights_5_transpose_y_0"), val = tensor<bool, []>(false)];
|
237 |
+
tensor<fp16, [1, 32, 64, 512]> attn_weights_5_cast_fp16 = matmul(transpose_x = attn_weights_5_transpose_x_0, transpose_y = attn_weights_5_transpose_y_0, x = var_368_cast_fp16, y = k_cast_fp16)[name = tensor<string, []>("attn_weights_5_cast_fp16")];
|
238 |
+
tensor<fp16, [1, 32, 64, 512]> attn_weights_cast_fp16 = add(x = attn_weights_5_cast_fp16, y = mask)[name = tensor<string, []>("attn_weights_cast_fp16")];
|
239 |
+
tensor<fp16, [1, 32, 64, 512]> var_376_cast_fp16 = softmax(axis = var_228, x = attn_weights_cast_fp16)[name = tensor<string, []>("op_376_cast_fp16")];
|
240 |
+
tensor<bool, []> attn_3_transpose_x_0 = const()[name = tensor<string, []>("attn_3_transpose_x_0"), val = tensor<bool, []>(false)];
|
241 |
+
tensor<bool, []> attn_3_transpose_y_0 = const()[name = tensor<string, []>("attn_3_transpose_y_0"), val = tensor<bool, []>(true)];
|
242 |
+
tensor<fp16, [1, 32, 128, 64]> attn_3_cast_fp16 = matmul(transpose_x = attn_3_transpose_x_0, transpose_y = attn_3_transpose_y_0, x = v_cast_fp16, y = var_376_cast_fp16)[name = tensor<string, []>("attn_3_cast_fp16")];
|
243 |
+
tensor<int32, [4]> var_380 = const()[name = tensor<string, []>("op_380"), val = tensor<int32, [4]>([1, 4096, 1, -1])];
|
244 |
+
tensor<fp16, [1, 4096, 1, 64]> input_9_cast_fp16 = reshape(shape = var_380, x = attn_3_cast_fp16)[name = tensor<string, []>("input_9_cast_fp16")];
|
245 |
+
tensor<int32, [2]> var_384 = const()[name = tensor<string, []>("op_384"), val = tensor<int32, [2]>([1, 1])];
|
246 |
+
tensor<int32, [2]> var_386 = const()[name = tensor<string, []>("op_386"), val = tensor<int32, [2]>([1, 1])];
|
247 |
+
tensor<string, []> var_388_pad_type_0 = const()[name = tensor<string, []>("op_388_pad_type_0"), val = tensor<string, []>("custom")];
|
248 |
+
tensor<int32, [4]> var_388_pad_0 = const()[name = tensor<string, []>("op_388_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
249 |
+
tensor<fp16, [1, 4096, 1, 64]> var_388_cast_fp16 = conv(dilations = var_386, groups = var_242, pad = var_388_pad_0, pad_type = var_388_pad_type_0, strides = var_384, weight = blocks_1_attn_proj_weight_palettized_cast_fp16, x = input_9_cast_fp16)[name = tensor<string, []>("op_388_cast_fp16")];
|
250 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_1_attn_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_1_attn_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(202512896)))];
|
251 |
+
tensor<fp16, [1, 4096, 1, 64]> attention_output_cast_fp16 = mul(x = var_388_cast_fp16, y = blocks_1_attn_proj_output_scales_to_fp16)[name = tensor<string, []>("attention_output_cast_fp16")];
|
252 |
+
tensor<fp16, [1, 4096, 1, 64]> x_25_cast_fp16 = add(x = attention_output_cast_fp16, y = x_15_cast_fp16)[name = tensor<string, []>("x_25_cast_fp16")];
|
253 |
+
tensor<fp16, [1, 4096, 1, 64]> var_397_cast_fp16 = mul(x = x_25_cast_fp16, y = x_25_cast_fp16)[name = tensor<string, []>("op_397_cast_fp16")];
|
254 |
+
tensor<int32, [1]> var_398 = const()[name = tensor<string, []>("op_398"), val = tensor<int32, [1]>([1])];
|
255 |
+
tensor<fp16, [1, 1, 1, 64]> norm_x_cast_fp16 = reduce_mean(axes = var_398, keep_dims = var_243, x = var_397_cast_fp16)[name = tensor<string, []>("norm_x_cast_fp16")];
|
256 |
+
tensor<fp16, []> var_400_to_fp16 = const()[name = tensor<string, []>("op_400_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
|
257 |
+
tensor<fp16, [1, 1, 1, 64]> var_401_cast_fp16 = add(x = norm_x_cast_fp16, y = var_400_to_fp16)[name = tensor<string, []>("op_401_cast_fp16")];
|
258 |
+
tensor<fp16, []> var_402_epsilon_0_to_fp16 = const()[name = tensor<string, []>("op_402_epsilon_0_to_fp16"), val = tensor<fp16, []>(0x1p-24)];
|
259 |
+
tensor<fp16, [1, 1, 1, 64]> var_402_cast_fp16 = rsqrt(epsilon = var_402_epsilon_0_to_fp16, x = var_401_cast_fp16)[name = tensor<string, []>("op_402_cast_fp16")];
|
260 |
+
tensor<fp16, [1, 4096, 1, 64]> x_normed_13_cast_fp16 = mul(x = x_25_cast_fp16, y = var_402_cast_fp16)[name = tensor<string, []>("x_normed_13_cast_fp16")];
|
261 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_1_norm_2_weight_to_fp16 = const()[name = tensor<string, []>("blocks_1_norm_2_weight_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(202521152)))];
|
262 |
+
tensor<fp16, [1, 4096, 1, 64]> input_11_cast_fp16 = mul(x = x_normed_13_cast_fp16, y = blocks_1_norm_2_weight_to_fp16)[name = tensor<string, []>("input_11_cast_fp16")];
|
263 |
+
tensor<int32, [2]> var_414 = const()[name = tensor<string, []>("op_414"), val = tensor<int32, [2]>([1, 1])];
|
264 |
+
tensor<int32, [2]> var_416 = const()[name = tensor<string, []>("op_416"), val = tensor<int32, [2]>([1, 1])];
|
265 |
+
tensor<string, []> var_418_pad_type_0 = const()[name = tensor<string, []>("op_418_pad_type_0"), val = tensor<string, []>("custom")];
|
266 |
+
tensor<int32, [4]> var_418_pad_0 = const()[name = tensor<string, []>("op_418_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
267 |
+
tensor<fp16, [1, 11008, 1, 64]> var_418_cast_fp16 = conv(dilations = var_416, groups = var_242, pad = var_418_pad_0, pad_type = var_418_pad_type_0, strides = var_414, weight = blocks_1_mlp_fc_1_weight_palettized_cast_fp16, x = input_11_cast_fp16)[name = tensor<string, []>("op_418_cast_fp16")];
|
268 |
+
tensor<fp16, [1, 11008, 1, 1]> blocks_1_mlp_fc_1_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_1_mlp_fc_1_output_scales_to_fp16"), val = tensor<fp16, [1, 11008, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(202529408)))];
|
269 |
+
tensor<fp16, [1, 11008, 1, 64]> input_13_cast_fp16 = mul(x = var_418_cast_fp16, y = blocks_1_mlp_fc_1_output_scales_to_fp16)[name = tensor<string, []>("input_13_cast_fp16")];
|
270 |
+
tensor<int32, [2]> var_422 = const()[name = tensor<string, []>("op_422"), val = tensor<int32, [2]>([1, 1])];
|
271 |
+
tensor<int32, [2]> var_424 = const()[name = tensor<string, []>("op_424"), val = tensor<int32, [2]>([1, 1])];
|
272 |
+
tensor<string, []> var_426_pad_type_0 = const()[name = tensor<string, []>("op_426_pad_type_0"), val = tensor<string, []>("custom")];
|
273 |
+
tensor<int32, [4]> var_426_pad_0 = const()[name = tensor<string, []>("op_426_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
274 |
+
tensor<fp16, [1, 11008, 1, 64]> var_426_cast_fp16 = conv(dilations = var_424, groups = var_242, pad = var_426_pad_0, pad_type = var_426_pad_type_0, strides = var_422, weight = blocks_1_mlp_fc_2_weight_palettized_cast_fp16, x = input_11_cast_fp16)[name = tensor<string, []>("op_426_cast_fp16")];
|
275 |
+
tensor<fp16, [1, 11008, 1, 1]> blocks_1_mlp_fc_2_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_1_mlp_fc_2_output_scales_to_fp16"), val = tensor<fp16, [1, 11008, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(202551488)))];
|
276 |
+
tensor<fp16, [1, 11008, 1, 64]> x_fc_2_cast_fp16 = mul(x = var_426_cast_fp16, y = blocks_1_mlp_fc_2_output_scales_to_fp16)[name = tensor<string, []>("x_fc_2_cast_fp16")];
|
277 |
+
tensor<fp16, [1, 11008, 1, 64]> var_428_cast_fp16 = silu(x = input_13_cast_fp16)[name = tensor<string, []>("op_428_cast_fp16")];
|
278 |
+
tensor<fp16, [1, 11008, 1, 64]> input_cast_fp16 = mul(x = var_428_cast_fp16, y = x_fc_2_cast_fp16)[name = tensor<string, []>("input_cast_fp16")];
|
279 |
+
tensor<int32, [2]> var_432 = const()[name = tensor<string, []>("op_432"), val = tensor<int32, [2]>([1, 1])];
|
280 |
+
tensor<int32, [2]> var_434 = const()[name = tensor<string, []>("op_434"), val = tensor<int32, [2]>([1, 1])];
|
281 |
+
tensor<string, []> var_436_pad_type_0 = const()[name = tensor<string, []>("op_436_pad_type_0"), val = tensor<string, []>("custom")];
|
282 |
+
tensor<int32, [4]> var_436_pad_0 = const()[name = tensor<string, []>("op_436_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
283 |
+
tensor<fp16, [1, 4096, 1, 64]> var_436_cast_fp16 = conv(dilations = var_434, groups = var_242, pad = var_436_pad_0, pad_type = var_436_pad_type_0, strides = var_432, weight = blocks_1_mlp_proj_weight_palettized_cast_fp16, x = input_cast_fp16)[name = tensor<string, []>("op_436_cast_fp16")];
|
284 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_1_mlp_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_1_mlp_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(202573568)))];
|
285 |
+
tensor<fp16, [1, 4096, 1, 64]> var_437_cast_fp16 = mul(x = var_436_cast_fp16, y = blocks_1_mlp_proj_output_scales_to_fp16)[name = tensor<string, []>("op_437_cast_fp16")];
|
286 |
+
tensor<fp16, [1, 4096, 1, 64]> new_x = add(x = var_437_cast_fp16, y = x_25_cast_fp16)[name = tensor<string, []>("op_438_cast_fp16")];
|
287 |
+
} -> (new_x, new_k_cache_0, new_k_cache_1, new_v_cache_0, new_v_cache_1);
|
288 |
+
}
|
Llama-2-7b-hf_chunk12.mlmodelc/weights/weight.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d3d085d837454b4685bcd36331b09a5b0b329f7ef4da1f2dbed101b7ec075630
|
3 |
+
size 202581824
|
Llama-2-7b-hf_chunk13.mlmodelc/analytics/coremldata.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:55bbf17f4d2567d045baa3ae69337cad81c45f822491151ed7a5b29327f874f6
|
3 |
+
size 243
|
Llama-2-7b-hf_chunk13.mlmodelc/coremldata.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d860ea43d6f8ebbf70594a29be6231ee1d324bdaf2f26417eb82297acb920e17
|
3 |
+
size 309
|
Llama-2-7b-hf_chunk13.mlmodelc/metadata.json
ADDED
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[
|
2 |
+
{
|
3 |
+
"metadataOutputVersion" : "3.0",
|
4 |
+
"storagePrecision" : "Float16",
|
5 |
+
"outputSchema" : [
|
6 |
+
{
|
7 |
+
"hasShapeFlexibility" : "0",
|
8 |
+
"isOptional" : "0",
|
9 |
+
"dataType" : "Float16",
|
10 |
+
"formattedType" : "MultiArray (Float16 1 × 64 × 32000)",
|
11 |
+
"shortDescription" : "",
|
12 |
+
"shape" : "[1, 64, 32000]",
|
13 |
+
"name" : "logits",
|
14 |
+
"type" : "MultiArray"
|
15 |
+
}
|
16 |
+
],
|
17 |
+
"modelParameters" : [
|
18 |
+
|
19 |
+
],
|
20 |
+
"specificationVersion" : 7,
|
21 |
+
"mlProgramOperationTypeHistogram" : {
|
22 |
+
"Concat" : 1,
|
23 |
+
"Ios16.add" : 1,
|
24 |
+
"Ios16.mul" : 3,
|
25 |
+
"Ios16.rsqrt" : 1,
|
26 |
+
"Transpose" : 1,
|
27 |
+
"Ios16.reshape" : 3,
|
28 |
+
"Ios16.reduceMean" : 1,
|
29 |
+
"Ios16.matmul" : 2,
|
30 |
+
"Squeeze" : 1
|
31 |
+
},
|
32 |
+
"computePrecision" : "Mixed (Float16, Int32)",
|
33 |
+
"isUpdatable" : "0",
|
34 |
+
"availability" : {
|
35 |
+
"macOS" : "13.0",
|
36 |
+
"tvOS" : "16.0",
|
37 |
+
"visionOS" : "1.0",
|
38 |
+
"watchOS" : "9.0",
|
39 |
+
"iOS" : "16.0",
|
40 |
+
"macCatalyst" : "16.0"
|
41 |
+
},
|
42 |
+
"modelType" : {
|
43 |
+
"name" : "MLModelType_mlProgram"
|
44 |
+
},
|
45 |
+
"userDefinedMetadata" : {
|
46 |
+
"com.github.apple.coremltools.source_dialect" : "TorchScript",
|
47 |
+
"com.github.apple.coremltools.source" : "torch==2.1.0",
|
48 |
+
"com.github.apple.coremltools.version" : "7.2"
|
49 |
+
},
|
50 |
+
"inputSchema" : [
|
51 |
+
{
|
52 |
+
"hasShapeFlexibility" : "0",
|
53 |
+
"isOptional" : "0",
|
54 |
+
"dataType" : "Float16",
|
55 |
+
"formattedType" : "MultiArray (Float16 1 × 4096 × 1 × 64)",
|
56 |
+
"shortDescription" : "",
|
57 |
+
"shape" : "[1, 4096, 1, 64]",
|
58 |
+
"name" : "x",
|
59 |
+
"type" : "MultiArray"
|
60 |
+
}
|
61 |
+
],
|
62 |
+
"generatedClassName" : "Llama_2_7b_hf_2024_05_25_14_03_55_chunk13",
|
63 |
+
"method" : "predict"
|
64 |
+
}
|
65 |
+
]
|
Llama-2-7b-hf_chunk13.mlmodelc/model.mil
ADDED
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
program(1.0)
|
2 |
+
[buildInfo = dict<tensor<string, []>, tensor<string, []>>({{"coremlc-component-MIL", "5.33.5"}, {"coremlc-version", "1877.40.3"}, {"coremltools-component-torch", "2.1.0"}, {"coremltools-source-dialect", "TorchScript"}, {"coremltools-version", "7.2"}})]
|
3 |
+
{
|
4 |
+
func main<ios16>(tensor<fp16, [1, 4096, 1, 64]> x) {
|
5 |
+
tensor<bool, []> var_6 = const()[name = tensor<string, []>("op_6"), val = tensor<bool, []>(true)];
|
6 |
+
tensor<fp16, [1, 4096, 1, 64]> var_13_cast_fp16 = mul(x = x, y = x)[name = tensor<string, []>("op_13_cast_fp16")];
|
7 |
+
tensor<int32, [1]> var_14 = const()[name = tensor<string, []>("op_14"), val = tensor<int32, [1]>([1])];
|
8 |
+
tensor<fp16, [1, 1, 1, 64]> norm_x_cast_fp16 = reduce_mean(axes = var_14, keep_dims = var_6, x = var_13_cast_fp16)[name = tensor<string, []>("norm_x_cast_fp16")];
|
9 |
+
tensor<fp16, []> var_16_to_fp16 = const()[name = tensor<string, []>("op_16_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
|
10 |
+
tensor<fp16, [1, 1, 1, 64]> var_17_cast_fp16 = add(x = norm_x_cast_fp16, y = var_16_to_fp16)[name = tensor<string, []>("op_17_cast_fp16")];
|
11 |
+
tensor<fp16, []> var_18_epsilon_0_to_fp16 = const()[name = tensor<string, []>("op_18_epsilon_0_to_fp16"), val = tensor<fp16, []>(0x1p-24)];
|
12 |
+
tensor<fp16, [1, 1, 1, 64]> var_18_cast_fp16 = rsqrt(epsilon = var_18_epsilon_0_to_fp16, x = var_17_cast_fp16)[name = tensor<string, []>("op_18_cast_fp16")];
|
13 |
+
tensor<fp16, [1, 4096, 1, 64]> x_normed_1_cast_fp16 = mul(x = x, y = var_18_cast_fp16)[name = tensor<string, []>("x_normed_1_cast_fp16")];
|
14 |
+
tensor<fp16, [1, 4096, 1, 1]> ln_f_weight_to_fp16 = const()[name = tensor<string, []>("ln_f_weight_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(64)))];
|
15 |
+
tensor<fp16, [1, 4096, 1, 64]> x_cast_fp16 = mul(x = x_normed_1_cast_fp16, y = ln_f_weight_to_fp16)[name = tensor<string, []>("x_cast_fp16")];
|
16 |
+
tensor<int32, [1]> var_23_axes_0 = const()[name = tensor<string, []>("op_23_axes_0"), val = tensor<int32, [1]>([2])];
|
17 |
+
tensor<fp16, [1, 4096, 64]> var_23_cast_fp16 = squeeze(axes = var_23_axes_0, x = x_cast_fp16)[name = tensor<string, []>("op_23_cast_fp16")];
|
18 |
+
tensor<int32, [3]> var_26_perm_0 = const()[name = tensor<string, []>("op_26_perm_0"), val = tensor<int32, [3]>([0, 2, 1])];
|
19 |
+
tensor<int32, [2]> concat_4 = const()[name = tensor<string, []>("concat_4"), val = tensor<int32, [2]>([64, 4096])];
|
20 |
+
tensor<fp16, [1, 64, 4096]> transpose_4 = transpose(perm = var_26_perm_0, x = var_23_cast_fp16)[name = tensor<string, []>("transpose_4")];
|
21 |
+
tensor<fp16, [64, 4096]> reshape_0_cast_fp16 = reshape(shape = concat_4, x = transpose_4)[name = tensor<string, []>("reshape_0_cast_fp16")];
|
22 |
+
tensor<bool, []> matmul_0_transpose_x_0 = const()[name = tensor<string, []>("matmul_0_transpose_x_0"), val = tensor<bool, []>(false)];
|
23 |
+
tensor<bool, []> matmul_0_transpose_y_0 = const()[name = tensor<string, []>("matmul_0_transpose_y_0"), val = tensor<bool, []>(false)];
|
24 |
+
tensor<fp16, [4096, 16384]> transpose_1_to_fp16 = const()[name = tensor<string, []>("transpose_1_to_fp16"), val = tensor<fp16, [4096, 16384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(8320)))];
|
25 |
+
tensor<fp16, [64, 16384]> matmul_0_cast_fp16 = matmul(transpose_x = matmul_0_transpose_x_0, transpose_y = matmul_0_transpose_y_0, x = reshape_0_cast_fp16, y = transpose_1_to_fp16)[name = tensor<string, []>("matmul_0_cast_fp16")];
|
26 |
+
tensor<int32, [3]> concat_8 = const()[name = tensor<string, []>("concat_8"), val = tensor<int32, [3]>([1, 64, 16384])];
|
27 |
+
tensor<fp16, [1, 64, 16384]> reshape_2_cast_fp16 = reshape(shape = concat_8, x = matmul_0_cast_fp16)[name = tensor<string, []>("reshape_2_cast_fp16")];
|
28 |
+
tensor<bool, []> matmul_1_transpose_x_0 = const()[name = tensor<string, []>("matmul_1_transpose_x_0"), val = tensor<bool, []>(false)];
|
29 |
+
tensor<bool, []> matmul_1_transpose_y_0 = const()[name = tensor<string, []>("matmul_1_transpose_y_0"), val = tensor<bool, []>(false)];
|
30 |
+
tensor<fp16, [4096, 15616]> transpose_3_to_fp16 = const()[name = tensor<string, []>("transpose_3_to_fp16"), val = tensor<fp16, [4096, 15616]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(134226112)))];
|
31 |
+
tensor<fp16, [64, 15616]> matmul_1_cast_fp16 = matmul(transpose_x = matmul_1_transpose_x_0, transpose_y = matmul_1_transpose_y_0, x = reshape_0_cast_fp16, y = transpose_3_to_fp16)[name = tensor<string, []>("matmul_1_cast_fp16")];
|
32 |
+
tensor<int32, [3]> concat_16 = const()[name = tensor<string, []>("concat_16"), val = tensor<int32, [3]>([1, 64, 15616])];
|
33 |
+
tensor<fp16, [1, 64, 15616]> reshape_5_cast_fp16 = reshape(shape = concat_16, x = matmul_1_cast_fp16)[name = tensor<string, []>("reshape_5_cast_fp16")];
|
34 |
+
tensor<int32, []> var_41 = const()[name = tensor<string, []>("op_41"), val = tensor<int32, []>(-1)];
|
35 |
+
tensor<bool, []> var_42_interleave_0 = const()[name = tensor<string, []>("op_42_interleave_0"), val = tensor<bool, []>(false)];
|
36 |
+
tensor<fp16, [1, 64, 32000]> logits = concat(axis = var_41, interleave = var_42_interleave_0, values = (reshape_2_cast_fp16, reshape_5_cast_fp16))[name = tensor<string, []>("op_42_cast_fp16")];
|
37 |
+
} -> (logits);
|
38 |
+
}
|
Llama-2-7b-hf_chunk13.mlmodelc/weights/weight.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:23cc0c8382a52638c94e9c9963873d35d3222e897233b39b03f4cc92deae2edb
|
3 |
+
size 262152448
|
Llama-2-7b-hf_chunk2.mlmodelc/analytics/coremldata.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3412284b024b899a736cd77112d4b1a4a5faa19d954259e925ef429f58bd886b
|
3 |
+
size 243
|
Llama-2-7b-hf_chunk2.mlmodelc/coremldata.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:589729b2995d8ca8246bbb5d92b910207bab816ad67282b0a285bcd2de77f80e
|
3 |
+
size 791
|
Llama-2-7b-hf_chunk2.mlmodelc/metadata.json
ADDED
@@ -0,0 +1,218 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[
|
2 |
+
{
|
3 |
+
"metadataOutputVersion" : "3.0",
|
4 |
+
"storagePrecision" : "Mixed (Float16, Palettized (4 bits))",
|
5 |
+
"outputSchema" : [
|
6 |
+
{
|
7 |
+
"hasShapeFlexibility" : "0",
|
8 |
+
"isOptional" : "0",
|
9 |
+
"dataType" : "Float16",
|
10 |
+
"formattedType" : "MultiArray (Float16 1 × 4096 × 1 × 64)",
|
11 |
+
"shortDescription" : "",
|
12 |
+
"shape" : "[1, 4096, 1, 64]",
|
13 |
+
"name" : "new_x",
|
14 |
+
"type" : "MultiArray"
|
15 |
+
},
|
16 |
+
{
|
17 |
+
"hasShapeFlexibility" : "0",
|
18 |
+
"isOptional" : "0",
|
19 |
+
"dataType" : "Float16",
|
20 |
+
"formattedType" : "MultiArray (Float16 1 × 32 × 128 × 64)",
|
21 |
+
"shortDescription" : "",
|
22 |
+
"shape" : "[1, 32, 128, 64]",
|
23 |
+
"name" : "new_k_cache_0",
|
24 |
+
"type" : "MultiArray"
|
25 |
+
},
|
26 |
+
{
|
27 |
+
"hasShapeFlexibility" : "0",
|
28 |
+
"isOptional" : "0",
|
29 |
+
"dataType" : "Float16",
|
30 |
+
"formattedType" : "MultiArray (Float16 1 × 32 × 128 × 64)",
|
31 |
+
"shortDescription" : "",
|
32 |
+
"shape" : "[1, 32, 128, 64]",
|
33 |
+
"name" : "new_k_cache_1",
|
34 |
+
"type" : "MultiArray"
|
35 |
+
},
|
36 |
+
{
|
37 |
+
"hasShapeFlexibility" : "0",
|
38 |
+
"isOptional" : "0",
|
39 |
+
"dataType" : "Float16",
|
40 |
+
"formattedType" : "MultiArray (Float16 1 × 32 × 128 × 64)",
|
41 |
+
"shortDescription" : "",
|
42 |
+
"shape" : "[1, 32, 128, 64]",
|
43 |
+
"name" : "new_k_cache_2",
|
44 |
+
"type" : "MultiArray"
|
45 |
+
},
|
46 |
+
{
|
47 |
+
"hasShapeFlexibility" : "0",
|
48 |
+
"isOptional" : "0",
|
49 |
+
"dataType" : "Float16",
|
50 |
+
"formattedType" : "MultiArray (Float16 1 × 32 × 128 × 64)",
|
51 |
+
"shortDescription" : "",
|
52 |
+
"shape" : "[1, 32, 128, 64]",
|
53 |
+
"name" : "new_v_cache_0",
|
54 |
+
"type" : "MultiArray"
|
55 |
+
},
|
56 |
+
{
|
57 |
+
"hasShapeFlexibility" : "0",
|
58 |
+
"isOptional" : "0",
|
59 |
+
"dataType" : "Float16",
|
60 |
+
"formattedType" : "MultiArray (Float16 1 × 32 × 128 × 64)",
|
61 |
+
"shortDescription" : "",
|
62 |
+
"shape" : "[1, 32, 128, 64]",
|
63 |
+
"name" : "new_v_cache_1",
|
64 |
+
"type" : "MultiArray"
|
65 |
+
},
|
66 |
+
{
|
67 |
+
"hasShapeFlexibility" : "0",
|
68 |
+
"isOptional" : "0",
|
69 |
+
"dataType" : "Float16",
|
70 |
+
"formattedType" : "MultiArray (Float16 1 × 32 × 128 × 64)",
|
71 |
+
"shortDescription" : "",
|
72 |
+
"shape" : "[1, 32, 128, 64]",
|
73 |
+
"name" : "new_v_cache_2",
|
74 |
+
"type" : "MultiArray"
|
75 |
+
}
|
76 |
+
],
|
77 |
+
"modelParameters" : [
|
78 |
+
|
79 |
+
],
|
80 |
+
"specificationVersion" : 7,
|
81 |
+
"mlProgramOperationTypeHistogram" : {
|
82 |
+
"Concat" : 18,
|
83 |
+
"Ios16.rsqrt" : 6,
|
84 |
+
"Ios16.mul" : 63,
|
85 |
+
"SliceByIndex" : 12,
|
86 |
+
"Ios16.constexprLutToDense" : 21,
|
87 |
+
"Ios16.conv" : 21,
|
88 |
+
"Ios16.add" : 21,
|
89 |
+
"Ios16.reduceMean" : 6,
|
90 |
+
"Ios16.matmul" : 6,
|
91 |
+
"Ios16.softmax" : 3,
|
92 |
+
"Ios16.reshape" : 12,
|
93 |
+
"Ios16.silu" : 3
|
94 |
+
},
|
95 |
+
"computePrecision" : "Mixed (Float16, Int32)",
|
96 |
+
"isUpdatable" : "0",
|
97 |
+
"availability" : {
|
98 |
+
"macOS" : "13.0",
|
99 |
+
"tvOS" : "16.0",
|
100 |
+
"visionOS" : "1.0",
|
101 |
+
"watchOS" : "9.0",
|
102 |
+
"iOS" : "16.0",
|
103 |
+
"macCatalyst" : "16.0"
|
104 |
+
},
|
105 |
+
"modelType" : {
|
106 |
+
"name" : "MLModelType_mlProgram"
|
107 |
+
},
|
108 |
+
"userDefinedMetadata" : {
|
109 |
+
"com.github.apple.coremltools.source_dialect" : "TorchScript",
|
110 |
+
"com.github.apple.coremltools.source" : "torch==2.1.0",
|
111 |
+
"com.github.apple.coremltools.version" : "7.2"
|
112 |
+
},
|
113 |
+
"inputSchema" : [
|
114 |
+
{
|
115 |
+
"hasShapeFlexibility" : "0",
|
116 |
+
"isOptional" : "0",
|
117 |
+
"dataType" : "Float16",
|
118 |
+
"formattedType" : "MultiArray (Float16 1 × 4096 × 1 × 64)",
|
119 |
+
"shortDescription" : "",
|
120 |
+
"shape" : "[1, 4096, 1, 64]",
|
121 |
+
"name" : "x",
|
122 |
+
"type" : "MultiArray"
|
123 |
+
},
|
124 |
+
{
|
125 |
+
"hasShapeFlexibility" : "0",
|
126 |
+
"isOptional" : "0",
|
127 |
+
"dataType" : "Float16",
|
128 |
+
"formattedType" : "MultiArray (Float16 128 × 64)",
|
129 |
+
"shortDescription" : "",
|
130 |
+
"shape" : "[128, 64]",
|
131 |
+
"name" : "cos",
|
132 |
+
"type" : "MultiArray"
|
133 |
+
},
|
134 |
+
{
|
135 |
+
"hasShapeFlexibility" : "0",
|
136 |
+
"isOptional" : "0",
|
137 |
+
"dataType" : "Float16",
|
138 |
+
"formattedType" : "MultiArray (Float16 128 × 64)",
|
139 |
+
"shortDescription" : "",
|
140 |
+
"shape" : "[128, 64]",
|
141 |
+
"name" : "sin",
|
142 |
+
"type" : "MultiArray"
|
143 |
+
},
|
144 |
+
{
|
145 |
+
"hasShapeFlexibility" : "0",
|
146 |
+
"isOptional" : "0",
|
147 |
+
"dataType" : "Float16",
|
148 |
+
"formattedType" : "MultiArray (Float16 1 × 1 × 64 × 512)",
|
149 |
+
"shortDescription" : "",
|
150 |
+
"shape" : "[1, 1, 64, 512]",
|
151 |
+
"name" : "mask",
|
152 |
+
"type" : "MultiArray"
|
153 |
+
},
|
154 |
+
{
|
155 |
+
"hasShapeFlexibility" : "0",
|
156 |
+
"isOptional" : "1",
|
157 |
+
"dataType" : "Float16",
|
158 |
+
"formattedType" : "MultiArray (Float16 1 × 32 × 128 × 448)?",
|
159 |
+
"shortDescription" : "",
|
160 |
+
"shape" : "[1, 32, 128, 448]",
|
161 |
+
"name" : "k_cache_0",
|
162 |
+
"type" : "MultiArray"
|
163 |
+
},
|
164 |
+
{
|
165 |
+
"hasShapeFlexibility" : "0",
|
166 |
+
"isOptional" : "1",
|
167 |
+
"dataType" : "Float16",
|
168 |
+
"formattedType" : "MultiArray (Float16 1 × 32 × 128 × 448)?",
|
169 |
+
"shortDescription" : "",
|
170 |
+
"shape" : "[1, 32, 128, 448]",
|
171 |
+
"name" : "v_cache_0",
|
172 |
+
"type" : "MultiArray"
|
173 |
+
},
|
174 |
+
{
|
175 |
+
"hasShapeFlexibility" : "0",
|
176 |
+
"isOptional" : "1",
|
177 |
+
"dataType" : "Float16",
|
178 |
+
"formattedType" : "MultiArray (Float16 1 × 32 × 128 × 448)?",
|
179 |
+
"shortDescription" : "",
|
180 |
+
"shape" : "[1, 32, 128, 448]",
|
181 |
+
"name" : "k_cache_1",
|
182 |
+
"type" : "MultiArray"
|
183 |
+
},
|
184 |
+
{
|
185 |
+
"hasShapeFlexibility" : "0",
|
186 |
+
"isOptional" : "1",
|
187 |
+
"dataType" : "Float16",
|
188 |
+
"formattedType" : "MultiArray (Float16 1 × 32 × 128 × 448)?",
|
189 |
+
"shortDescription" : "",
|
190 |
+
"shape" : "[1, 32, 128, 448]",
|
191 |
+
"name" : "v_cache_1",
|
192 |
+
"type" : "MultiArray"
|
193 |
+
},
|
194 |
+
{
|
195 |
+
"hasShapeFlexibility" : "0",
|
196 |
+
"isOptional" : "1",
|
197 |
+
"dataType" : "Float16",
|
198 |
+
"formattedType" : "MultiArray (Float16 1 × 32 × 128 × 448)?",
|
199 |
+
"shortDescription" : "",
|
200 |
+
"shape" : "[1, 32, 128, 448]",
|
201 |
+
"name" : "k_cache_2",
|
202 |
+
"type" : "MultiArray"
|
203 |
+
},
|
204 |
+
{
|
205 |
+
"hasShapeFlexibility" : "0",
|
206 |
+
"isOptional" : "1",
|
207 |
+
"dataType" : "Float16",
|
208 |
+
"formattedType" : "MultiArray (Float16 1 × 32 × 128 × 448)?",
|
209 |
+
"shortDescription" : "",
|
210 |
+
"shape" : "[1, 32, 128, 448]",
|
211 |
+
"name" : "v_cache_2",
|
212 |
+
"type" : "MultiArray"
|
213 |
+
}
|
214 |
+
],
|
215 |
+
"generatedClassName" : "Llama_2_7b_hf_2024_05_25_14_03_55_chunk2",
|
216 |
+
"method" : "predict"
|
217 |
+
}
|
218 |
+
]
|
Llama-2-7b-hf_chunk2.mlmodelc/model.mil
ADDED
@@ -0,0 +1,429 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
program(1.0)
|
2 |
+
[buildInfo = dict<tensor<string, []>, tensor<string, []>>({{"coremlc-component-MIL", "5.33.5"}, {"coremlc-version", "1877.40.3"}, {"coremltools-component-torch", "2.1.0"}, {"coremltools-source-dialect", "TorchScript"}, {"coremltools-version", "7.2"}})]
|
3 |
+
{
|
4 |
+
func main<ios16>(tensor<fp16, [128, 64]> cos, tensor<fp16, [1, 32, 128, 448]> k_cache_0, tensor<fp16, [1, 32, 128, 448]> k_cache_1, tensor<fp16, [1, 32, 128, 448]> k_cache_2, tensor<fp16, [1, 1, 64, 512]> mask, tensor<fp16, [128, 64]> sin, tensor<fp16, [1, 32, 128, 448]> v_cache_0, tensor<fp16, [1, 32, 128, 448]> v_cache_1, tensor<fp16, [1, 32, 128, 448]> v_cache_2, tensor<fp16, [1, 4096, 1, 64]> x) [CoreML_InputDefaultValues = dict<tensor<string, []>, tensor<fp32, []>>({{"k_cache_0", 0}, {"k_cache_1", 0}, {"k_cache_2", 0}, {"v_cache_0", 0}, {"v_cache_1", 0}, {"v_cache_2", 0}})] {
|
5 |
+
tensor<fp16, [4096, 4096, 1, 1]> blocks_0_attn_q_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [8388608]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(64))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(8388736))), name = tensor<string, []>("blocks_0_attn_q_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 4096, 1, 1])];
|
6 |
+
tensor<fp16, [4096, 4096, 1, 1]> blocks_0_attn_k_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [8388608]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(8388864))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(16777536))), name = tensor<string, []>("blocks_0_attn_k_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 4096, 1, 1])];
|
7 |
+
tensor<fp16, [4096, 4096, 1, 1]> blocks_0_attn_v_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [8388608]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(16777664))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(25166336))), name = tensor<string, []>("blocks_0_attn_v_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 4096, 1, 1])];
|
8 |
+
tensor<fp16, [4096, 4096, 1, 1]> blocks_0_attn_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [8388608]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(25166464))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(33555136))), name = tensor<string, []>("blocks_0_attn_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 4096, 1, 1])];
|
9 |
+
tensor<fp16, [11008, 4096, 1, 1]> blocks_0_mlp_fc_1_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [22544384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(33555264))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(56099712))), name = tensor<string, []>("blocks_0_mlp_fc_1_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([11008, 4096, 1, 1])];
|
10 |
+
tensor<fp16, [11008, 4096, 1, 1]> blocks_0_mlp_fc_2_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [22544384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(56099840))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(78644288))), name = tensor<string, []>("blocks_0_mlp_fc_2_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([11008, 4096, 1, 1])];
|
11 |
+
tensor<fp16, [4096, 11008, 1, 1]> blocks_0_mlp_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [22544384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(78644416))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(101188864))), name = tensor<string, []>("blocks_0_mlp_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 11008, 1, 1])];
|
12 |
+
tensor<fp16, [4096, 4096, 1, 1]> blocks_1_attn_q_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [8388608]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(101188992))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(109577664))), name = tensor<string, []>("blocks_1_attn_q_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 4096, 1, 1])];
|
13 |
+
tensor<fp16, [4096, 4096, 1, 1]> blocks_1_attn_k_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [8388608]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(109577792))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(117966464))), name = tensor<string, []>("blocks_1_attn_k_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 4096, 1, 1])];
|
14 |
+
tensor<fp16, [4096, 4096, 1, 1]> blocks_1_attn_v_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [8388608]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(117966592))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(126355264))), name = tensor<string, []>("blocks_1_attn_v_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 4096, 1, 1])];
|
15 |
+
tensor<fp16, [4096, 4096, 1, 1]> blocks_1_attn_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [8388608]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(126355392))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(134744064))), name = tensor<string, []>("blocks_1_attn_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 4096, 1, 1])];
|
16 |
+
tensor<fp16, [11008, 4096, 1, 1]> blocks_1_mlp_fc_1_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [22544384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(134744192))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(157288640))), name = tensor<string, []>("blocks_1_mlp_fc_1_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([11008, 4096, 1, 1])];
|
17 |
+
tensor<fp16, [11008, 4096, 1, 1]> blocks_1_mlp_fc_2_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [22544384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(157288768))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(179833216))), name = tensor<string, []>("blocks_1_mlp_fc_2_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([11008, 4096, 1, 1])];
|
18 |
+
tensor<fp16, [4096, 11008, 1, 1]> blocks_1_mlp_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [22544384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(179833344))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(202377792))), name = tensor<string, []>("blocks_1_mlp_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 11008, 1, 1])];
|
19 |
+
tensor<fp16, [4096, 4096, 1, 1]> blocks_2_attn_q_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [8388608]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(202377920))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(210766592))), name = tensor<string, []>("blocks_2_attn_q_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 4096, 1, 1])];
|
20 |
+
tensor<fp16, [4096, 4096, 1, 1]> blocks_2_attn_k_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [8388608]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(210766720))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(219155392))), name = tensor<string, []>("blocks_2_attn_k_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 4096, 1, 1])];
|
21 |
+
tensor<fp16, [4096, 4096, 1, 1]> blocks_2_attn_v_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [8388608]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(219155520))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(227544192))), name = tensor<string, []>("blocks_2_attn_v_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 4096, 1, 1])];
|
22 |
+
tensor<fp16, [4096, 4096, 1, 1]> blocks_2_attn_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [8388608]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(227544320))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(235932992))), name = tensor<string, []>("blocks_2_attn_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 4096, 1, 1])];
|
23 |
+
tensor<fp16, [11008, 4096, 1, 1]> blocks_2_mlp_fc_1_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [22544384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(235933120))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(258477568))), name = tensor<string, []>("blocks_2_mlp_fc_1_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([11008, 4096, 1, 1])];
|
24 |
+
tensor<fp16, [11008, 4096, 1, 1]> blocks_2_mlp_fc_2_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [22544384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(258477696))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(281022144))), name = tensor<string, []>("blocks_2_mlp_fc_2_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([11008, 4096, 1, 1])];
|
25 |
+
tensor<fp16, [4096, 11008, 1, 1]> blocks_2_mlp_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [22544384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(281022272))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303566720))), name = tensor<string, []>("blocks_2_mlp_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 11008, 1, 1])];
|
26 |
+
tensor<int32, []> var_18 = const()[name = tensor<string, []>("op_18"), val = tensor<int32, []>(3)];
|
27 |
+
tensor<int32, []> var_23 = const()[name = tensor<string, []>("op_23"), val = tensor<int32, []>(-2)];
|
28 |
+
tensor<int32, []> var_25 = const()[name = tensor<string, []>("op_25"), val = tensor<int32, []>(-1)];
|
29 |
+
tensor<int32, []> var_32 = const()[name = tensor<string, []>("op_32"), val = tensor<int32, []>(1)];
|
30 |
+
tensor<bool, []> var_33 = const()[name = tensor<string, []>("op_33"), val = tensor<bool, []>(true)];
|
31 |
+
tensor<fp16, [1, 4096, 1, 64]> var_41_cast_fp16 = mul(x = x, y = x)[name = tensor<string, []>("op_41_cast_fp16")];
|
32 |
+
tensor<int32, [1]> var_42 = const()[name = tensor<string, []>("op_42"), val = tensor<int32, [1]>([1])];
|
33 |
+
tensor<fp16, [1, 1, 1, 64]> norm_x_1_cast_fp16 = reduce_mean(axes = var_42, keep_dims = var_33, x = var_41_cast_fp16)[name = tensor<string, []>("norm_x_1_cast_fp16")];
|
34 |
+
tensor<fp16, []> var_44_to_fp16 = const()[name = tensor<string, []>("op_44_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
|
35 |
+
tensor<fp16, [1, 1, 1, 64]> var_45_cast_fp16 = add(x = norm_x_1_cast_fp16, y = var_44_to_fp16)[name = tensor<string, []>("op_45_cast_fp16")];
|
36 |
+
tensor<fp16, []> var_46_epsilon_0_to_fp16 = const()[name = tensor<string, []>("op_46_epsilon_0_to_fp16"), val = tensor<fp16, []>(0x1p-24)];
|
37 |
+
tensor<fp16, [1, 1, 1, 64]> var_46_cast_fp16 = rsqrt(epsilon = var_46_epsilon_0_to_fp16, x = var_45_cast_fp16)[name = tensor<string, []>("op_46_cast_fp16")];
|
38 |
+
tensor<fp16, [1, 4096, 1, 64]> x_normed_1_cast_fp16 = mul(x = x, y = var_46_cast_fp16)[name = tensor<string, []>("x_normed_1_cast_fp16")];
|
39 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_0_norm_1_weight_to_fp16 = const()[name = tensor<string, []>("blocks_0_norm_1_weight_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303566848)))];
|
40 |
+
tensor<fp16, [1, 4096, 1, 64]> x_5_cast_fp16 = mul(x = x_normed_1_cast_fp16, y = blocks_0_norm_1_weight_to_fp16)[name = tensor<string, []>("x_5_cast_fp16")];
|
41 |
+
tensor<int32, [2]> var_58 = const()[name = tensor<string, []>("op_58"), val = tensor<int32, [2]>([1, 1])];
|
42 |
+
tensor<int32, [2]> var_60 = const()[name = tensor<string, []>("op_60"), val = tensor<int32, [2]>([1, 1])];
|
43 |
+
tensor<string, []> var_62_pad_type_0 = const()[name = tensor<string, []>("op_62_pad_type_0"), val = tensor<string, []>("custom")];
|
44 |
+
tensor<int32, [4]> var_62_pad_0 = const()[name = tensor<string, []>("op_62_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
45 |
+
tensor<fp16, [1, 4096, 1, 64]> var_62_cast_fp16 = conv(dilations = var_60, groups = var_32, pad = var_62_pad_0, pad_type = var_62_pad_type_0, strides = var_58, weight = blocks_0_attn_q_proj_weight_palettized_cast_fp16, x = x_5_cast_fp16)[name = tensor<string, []>("op_62_cast_fp16")];
|
46 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_0_attn_q_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_0_attn_q_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303575104)))];
|
47 |
+
tensor<fp16, [1, 4096, 1, 64]> q_1_cast_fp16 = mul(x = var_62_cast_fp16, y = blocks_0_attn_q_proj_output_scales_to_fp16)[name = tensor<string, []>("q_1_cast_fp16")];
|
48 |
+
tensor<int32, [2]> var_66 = const()[name = tensor<string, []>("op_66"), val = tensor<int32, [2]>([1, 1])];
|
49 |
+
tensor<int32, [2]> var_68 = const()[name = tensor<string, []>("op_68"), val = tensor<int32, [2]>([1, 1])];
|
50 |
+
tensor<string, []> var_70_pad_type_0 = const()[name = tensor<string, []>("op_70_pad_type_0"), val = tensor<string, []>("custom")];
|
51 |
+
tensor<int32, [4]> var_70_pad_0 = const()[name = tensor<string, []>("op_70_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
52 |
+
tensor<fp16, [1, 4096, 1, 64]> var_70_cast_fp16 = conv(dilations = var_68, groups = var_32, pad = var_70_pad_0, pad_type = var_70_pad_type_0, strides = var_66, weight = blocks_0_attn_k_proj_weight_palettized_cast_fp16, x = x_5_cast_fp16)[name = tensor<string, []>("op_70_cast_fp16")];
|
53 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_0_attn_k_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_0_attn_k_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303583360)))];
|
54 |
+
tensor<fp16, [1, 4096, 1, 64]> k_1_cast_fp16 = mul(x = var_70_cast_fp16, y = blocks_0_attn_k_proj_output_scales_to_fp16)[name = tensor<string, []>("k_1_cast_fp16")];
|
55 |
+
tensor<int32, [2]> var_74 = const()[name = tensor<string, []>("op_74"), val = tensor<int32, [2]>([1, 1])];
|
56 |
+
tensor<int32, [2]> var_76 = const()[name = tensor<string, []>("op_76"), val = tensor<int32, [2]>([1, 1])];
|
57 |
+
tensor<string, []> var_78_pad_type_0 = const()[name = tensor<string, []>("op_78_pad_type_0"), val = tensor<string, []>("custom")];
|
58 |
+
tensor<int32, [4]> var_78_pad_0 = const()[name = tensor<string, []>("op_78_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
59 |
+
tensor<fp16, [1, 4096, 1, 64]> var_78_cast_fp16 = conv(dilations = var_76, groups = var_32, pad = var_78_pad_0, pad_type = var_78_pad_type_0, strides = var_74, weight = blocks_0_attn_v_proj_weight_palettized_cast_fp16, x = x_5_cast_fp16)[name = tensor<string, []>("op_78_cast_fp16")];
|
60 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_0_attn_v_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_0_attn_v_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303591616)))];
|
61 |
+
tensor<fp16, [1, 4096, 1, 64]> v_1_cast_fp16 = mul(x = var_78_cast_fp16, y = blocks_0_attn_v_proj_output_scales_to_fp16)[name = tensor<string, []>("v_1_cast_fp16")];
|
62 |
+
tensor<int32, [4]> var_80 = const()[name = tensor<string, []>("op_80"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
63 |
+
tensor<fp16, [1, 32, 128, 64]> q_3_cast_fp16 = reshape(shape = var_80, x = q_1_cast_fp16)[name = tensor<string, []>("q_3_cast_fp16")];
|
64 |
+
tensor<int32, [4]> var_82 = const()[name = tensor<string, []>("op_82"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
65 |
+
tensor<fp16, [1, 32, 128, 64]> k_3_cast_fp16 = reshape(shape = var_82, x = k_1_cast_fp16)[name = tensor<string, []>("k_3_cast_fp16")];
|
66 |
+
tensor<int32, [4]> var_84 = const()[name = tensor<string, []>("op_84"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
67 |
+
tensor<fp16, [1, 32, 128, 64]> new_v_cache_0 = reshape(shape = var_84, x = v_1_cast_fp16)[name = tensor<string, []>("v_3_cast_fp16")];
|
68 |
+
tensor<int32, [4]> var_96_begin_0 = const()[name = tensor<string, []>("op_96_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
69 |
+
tensor<int32, [4]> var_96_end_0 = const()[name = tensor<string, []>("op_96_end_0"), val = tensor<int32, [4]>([1, 32, 64, 64])];
|
70 |
+
tensor<bool, [4]> var_96_end_mask_0 = const()[name = tensor<string, []>("op_96_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
|
71 |
+
tensor<fp16, [1, 32, 64, 64]> var_96_cast_fp16 = slice_by_index(begin = var_96_begin_0, end = var_96_end_0, end_mask = var_96_end_mask_0, x = q_3_cast_fp16)[name = tensor<string, []>("op_96_cast_fp16")];
|
72 |
+
tensor<int32, [4]> var_102_begin_0 = const()[name = tensor<string, []>("op_102_begin_0"), val = tensor<int32, [4]>([0, 0, 64, 0])];
|
73 |
+
tensor<int32, [4]> var_102_end_0 = const()[name = tensor<string, []>("op_102_end_0"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
74 |
+
tensor<bool, [4]> var_102_end_mask_0 = const()[name = tensor<string, []>("op_102_end_mask_0"), val = tensor<bool, [4]>([true, true, true, true])];
|
75 |
+
tensor<fp16, [1, 32, 64, 64]> var_102_cast_fp16 = slice_by_index(begin = var_102_begin_0, end = var_102_end_0, end_mask = var_102_end_mask_0, x = q_3_cast_fp16)[name = tensor<string, []>("op_102_cast_fp16")];
|
76 |
+
tensor<fp16, []> const_3_promoted_to_fp16 = const()[name = tensor<string, []>("const_3_promoted_to_fp16"), val = tensor<fp16, []>(-0x1p+0)];
|
77 |
+
tensor<fp16, [1, 32, 64, 64]> var_104_cast_fp16 = mul(x = var_102_cast_fp16, y = const_3_promoted_to_fp16)[name = tensor<string, []>("op_104_cast_fp16")];
|
78 |
+
tensor<bool, []> rotated_1_interleave_0 = const()[name = tensor<string, []>("rotated_1_interleave_0"), val = tensor<bool, []>(false)];
|
79 |
+
tensor<fp16, [1, 32, 128, 64]> rotated_1_cast_fp16 = concat(axis = var_23, interleave = rotated_1_interleave_0, values = (var_104_cast_fp16, var_96_cast_fp16))[name = tensor<string, []>("rotated_1_cast_fp16")];
|
80 |
+
tensor<fp16, [1, 32, 128, 64]> var_107_cast_fp16 = mul(x = q_3_cast_fp16, y = cos)[name = tensor<string, []>("op_107_cast_fp16")];
|
81 |
+
tensor<fp16, [1, 32, 128, 64]> var_108_cast_fp16 = mul(x = rotated_1_cast_fp16, y = sin)[name = tensor<string, []>("op_108_cast_fp16")];
|
82 |
+
tensor<fp16, [1, 32, 128, 64]> roped_1_cast_fp16 = add(x = var_107_cast_fp16, y = var_108_cast_fp16)[name = tensor<string, []>("roped_1_cast_fp16")];
|
83 |
+
tensor<int32, [4]> var_121_begin_0 = const()[name = tensor<string, []>("op_121_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
84 |
+
tensor<int32, [4]> var_121_end_0 = const()[name = tensor<string, []>("op_121_end_0"), val = tensor<int32, [4]>([1, 32, 64, 64])];
|
85 |
+
tensor<bool, [4]> var_121_end_mask_0 = const()[name = tensor<string, []>("op_121_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
|
86 |
+
tensor<fp16, [1, 32, 64, 64]> var_121_cast_fp16 = slice_by_index(begin = var_121_begin_0, end = var_121_end_0, end_mask = var_121_end_mask_0, x = k_3_cast_fp16)[name = tensor<string, []>("op_121_cast_fp16")];
|
87 |
+
tensor<int32, [4]> var_127_begin_0 = const()[name = tensor<string, []>("op_127_begin_0"), val = tensor<int32, [4]>([0, 0, 64, 0])];
|
88 |
+
tensor<int32, [4]> var_127_end_0 = const()[name = tensor<string, []>("op_127_end_0"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
89 |
+
tensor<bool, [4]> var_127_end_mask_0 = const()[name = tensor<string, []>("op_127_end_mask_0"), val = tensor<bool, [4]>([true, true, true, true])];
|
90 |
+
tensor<fp16, [1, 32, 64, 64]> var_127_cast_fp16 = slice_by_index(begin = var_127_begin_0, end = var_127_end_0, end_mask = var_127_end_mask_0, x = k_3_cast_fp16)[name = tensor<string, []>("op_127_cast_fp16")];
|
91 |
+
tensor<fp16, []> const_5_promoted_to_fp16 = const()[name = tensor<string, []>("const_5_promoted_to_fp16"), val = tensor<fp16, []>(-0x1p+0)];
|
92 |
+
tensor<fp16, [1, 32, 64, 64]> var_129_cast_fp16 = mul(x = var_127_cast_fp16, y = const_5_promoted_to_fp16)[name = tensor<string, []>("op_129_cast_fp16")];
|
93 |
+
tensor<bool, []> rotated_3_interleave_0 = const()[name = tensor<string, []>("rotated_3_interleave_0"), val = tensor<bool, []>(false)];
|
94 |
+
tensor<fp16, [1, 32, 128, 64]> rotated_3_cast_fp16 = concat(axis = var_23, interleave = rotated_3_interleave_0, values = (var_129_cast_fp16, var_121_cast_fp16))[name = tensor<string, []>("rotated_3_cast_fp16")];
|
95 |
+
tensor<fp16, [1, 32, 128, 64]> var_132_cast_fp16 = mul(x = k_3_cast_fp16, y = cos)[name = tensor<string, []>("op_132_cast_fp16")];
|
96 |
+
tensor<fp16, [1, 32, 128, 64]> var_133_cast_fp16 = mul(x = rotated_3_cast_fp16, y = sin)[name = tensor<string, []>("op_133_cast_fp16")];
|
97 |
+
tensor<fp16, [1, 32, 128, 64]> roped_3_cast_fp16 = add(x = var_132_cast_fp16, y = var_133_cast_fp16)[name = tensor<string, []>("roped_3_cast_fp16")];
|
98 |
+
tensor<bool, []> q_5_interleave_0 = const()[name = tensor<string, []>("q_5_interleave_0"), val = tensor<bool, []>(false)];
|
99 |
+
tensor<fp16, [1, 32, 128, 64]> q_5_cast_fp16 = concat(axis = var_23, interleave = q_5_interleave_0, values = roped_1_cast_fp16)[name = tensor<string, []>("q_5_cast_fp16")];
|
100 |
+
tensor<bool, []> k_5_interleave_0 = const()[name = tensor<string, []>("k_5_interleave_0"), val = tensor<bool, []>(false)];
|
101 |
+
tensor<fp16, [1, 32, 128, 64]> new_k_cache_0 = concat(axis = var_23, interleave = k_5_interleave_0, values = roped_3_cast_fp16)[name = tensor<string, []>("k_5_cast_fp16")];
|
102 |
+
tensor<bool, []> k_7_interleave_0 = const()[name = tensor<string, []>("k_7_interleave_0"), val = tensor<bool, []>(false)];
|
103 |
+
tensor<fp16, [1, 32, 128, 512]> k_7_cast_fp16 = concat(axis = var_25, interleave = k_7_interleave_0, values = (k_cache_0, new_k_cache_0))[name = tensor<string, []>("k_7_cast_fp16")];
|
104 |
+
tensor<bool, []> v_5_interleave_0 = const()[name = tensor<string, []>("v_5_interleave_0"), val = tensor<bool, []>(false)];
|
105 |
+
tensor<fp16, [1, 32, 128, 512]> v_5_cast_fp16 = concat(axis = var_25, interleave = v_5_interleave_0, values = (v_cache_0, new_v_cache_0))[name = tensor<string, []>("v_5_cast_fp16")];
|
106 |
+
tensor<fp16, []> var_155_to_fp16 = const()[name = tensor<string, []>("op_155_to_fp16"), val = tensor<fp16, []>(0x1.6ap-4)];
|
107 |
+
tensor<fp16, [1, 32, 128, 64]> var_156_cast_fp16 = mul(x = q_5_cast_fp16, y = var_155_to_fp16)[name = tensor<string, []>("op_156_cast_fp16")];
|
108 |
+
tensor<bool, []> attn_weights_1_transpose_x_0 = const()[name = tensor<string, []>("attn_weights_1_transpose_x_0"), val = tensor<bool, []>(true)];
|
109 |
+
tensor<bool, []> attn_weights_1_transpose_y_0 = const()[name = tensor<string, []>("attn_weights_1_transpose_y_0"), val = tensor<bool, []>(false)];
|
110 |
+
tensor<fp16, [1, 32, 64, 512]> attn_weights_1_cast_fp16 = matmul(transpose_x = attn_weights_1_transpose_x_0, transpose_y = attn_weights_1_transpose_y_0, x = var_156_cast_fp16, y = k_7_cast_fp16)[name = tensor<string, []>("attn_weights_1_cast_fp16")];
|
111 |
+
tensor<fp16, [1, 32, 64, 512]> attn_weights_3_cast_fp16 = add(x = attn_weights_1_cast_fp16, y = mask)[name = tensor<string, []>("attn_weights_3_cast_fp16")];
|
112 |
+
tensor<fp16, [1, 32, 64, 512]> var_164_cast_fp16 = softmax(axis = var_18, x = attn_weights_3_cast_fp16)[name = tensor<string, []>("op_164_cast_fp16")];
|
113 |
+
tensor<bool, []> attn_1_transpose_x_0 = const()[name = tensor<string, []>("attn_1_transpose_x_0"), val = tensor<bool, []>(false)];
|
114 |
+
tensor<bool, []> attn_1_transpose_y_0 = const()[name = tensor<string, []>("attn_1_transpose_y_0"), val = tensor<bool, []>(true)];
|
115 |
+
tensor<fp16, [1, 32, 128, 64]> attn_1_cast_fp16 = matmul(transpose_x = attn_1_transpose_x_0, transpose_y = attn_1_transpose_y_0, x = v_5_cast_fp16, y = var_164_cast_fp16)[name = tensor<string, []>("attn_1_cast_fp16")];
|
116 |
+
tensor<int32, [4]> var_168 = const()[name = tensor<string, []>("op_168"), val = tensor<int32, [4]>([1, 4096, 1, -1])];
|
117 |
+
tensor<fp16, [1, 4096, 1, 64]> input_1_cast_fp16 = reshape(shape = var_168, x = attn_1_cast_fp16)[name = tensor<string, []>("input_1_cast_fp16")];
|
118 |
+
tensor<int32, [2]> var_172 = const()[name = tensor<string, []>("op_172"), val = tensor<int32, [2]>([1, 1])];
|
119 |
+
tensor<int32, [2]> var_174 = const()[name = tensor<string, []>("op_174"), val = tensor<int32, [2]>([1, 1])];
|
120 |
+
tensor<string, []> var_176_pad_type_0 = const()[name = tensor<string, []>("op_176_pad_type_0"), val = tensor<string, []>("custom")];
|
121 |
+
tensor<int32, [4]> var_176_pad_0 = const()[name = tensor<string, []>("op_176_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
122 |
+
tensor<fp16, [1, 4096, 1, 64]> var_176_cast_fp16 = conv(dilations = var_174, groups = var_32, pad = var_176_pad_0, pad_type = var_176_pad_type_0, strides = var_172, weight = blocks_0_attn_proj_weight_palettized_cast_fp16, x = input_1_cast_fp16)[name = tensor<string, []>("op_176_cast_fp16")];
|
123 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_0_attn_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_0_attn_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303599872)))];
|
124 |
+
tensor<fp16, [1, 4096, 1, 64]> attention_output_1_cast_fp16 = mul(x = var_176_cast_fp16, y = blocks_0_attn_proj_output_scales_to_fp16)[name = tensor<string, []>("attention_output_1_cast_fp16")];
|
125 |
+
tensor<fp16, [1, 4096, 1, 64]> x_11_cast_fp16 = add(x = attention_output_1_cast_fp16, y = x)[name = tensor<string, []>("x_11_cast_fp16")];
|
126 |
+
tensor<fp16, [1, 4096, 1, 64]> var_185_cast_fp16 = mul(x = x_11_cast_fp16, y = x_11_cast_fp16)[name = tensor<string, []>("op_185_cast_fp16")];
|
127 |
+
tensor<int32, [1]> var_186 = const()[name = tensor<string, []>("op_186"), val = tensor<int32, [1]>([1])];
|
128 |
+
tensor<fp16, [1, 1, 1, 64]> norm_x_3_cast_fp16 = reduce_mean(axes = var_186, keep_dims = var_33, x = var_185_cast_fp16)[name = tensor<string, []>("norm_x_3_cast_fp16")];
|
129 |
+
tensor<fp16, []> var_188_to_fp16 = const()[name = tensor<string, []>("op_188_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
|
130 |
+
tensor<fp16, [1, 1, 1, 64]> var_189_cast_fp16 = add(x = norm_x_3_cast_fp16, y = var_188_to_fp16)[name = tensor<string, []>("op_189_cast_fp16")];
|
131 |
+
tensor<fp16, []> var_190_epsilon_0_to_fp16 = const()[name = tensor<string, []>("op_190_epsilon_0_to_fp16"), val = tensor<fp16, []>(0x1p-24)];
|
132 |
+
tensor<fp16, [1, 1, 1, 64]> var_190_cast_fp16 = rsqrt(epsilon = var_190_epsilon_0_to_fp16, x = var_189_cast_fp16)[name = tensor<string, []>("op_190_cast_fp16")];
|
133 |
+
tensor<fp16, [1, 4096, 1, 64]> x_normed_5_cast_fp16 = mul(x = x_11_cast_fp16, y = var_190_cast_fp16)[name = tensor<string, []>("x_normed_5_cast_fp16")];
|
134 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_0_norm_2_weight_to_fp16 = const()[name = tensor<string, []>("blocks_0_norm_2_weight_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303608128)))];
|
135 |
+
tensor<fp16, [1, 4096, 1, 64]> input_3_cast_fp16 = mul(x = x_normed_5_cast_fp16, y = blocks_0_norm_2_weight_to_fp16)[name = tensor<string, []>("input_3_cast_fp16")];
|
136 |
+
tensor<int32, [2]> var_202 = const()[name = tensor<string, []>("op_202"), val = tensor<int32, [2]>([1, 1])];
|
137 |
+
tensor<int32, [2]> var_204 = const()[name = tensor<string, []>("op_204"), val = tensor<int32, [2]>([1, 1])];
|
138 |
+
tensor<string, []> var_206_pad_type_0 = const()[name = tensor<string, []>("op_206_pad_type_0"), val = tensor<string, []>("custom")];
|
139 |
+
tensor<int32, [4]> var_206_pad_0 = const()[name = tensor<string, []>("op_206_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
140 |
+
tensor<fp16, [1, 11008, 1, 64]> var_206_cast_fp16 = conv(dilations = var_204, groups = var_32, pad = var_206_pad_0, pad_type = var_206_pad_type_0, strides = var_202, weight = blocks_0_mlp_fc_1_weight_palettized_cast_fp16, x = input_3_cast_fp16)[name = tensor<string, []>("op_206_cast_fp16")];
|
141 |
+
tensor<fp16, [1, 11008, 1, 1]> blocks_0_mlp_fc_1_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_0_mlp_fc_1_output_scales_to_fp16"), val = tensor<fp16, [1, 11008, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303616384)))];
|
142 |
+
tensor<fp16, [1, 11008, 1, 64]> input_5_cast_fp16 = mul(x = var_206_cast_fp16, y = blocks_0_mlp_fc_1_output_scales_to_fp16)[name = tensor<string, []>("input_5_cast_fp16")];
|
143 |
+
tensor<int32, [2]> var_210 = const()[name = tensor<string, []>("op_210"), val = tensor<int32, [2]>([1, 1])];
|
144 |
+
tensor<int32, [2]> var_212 = const()[name = tensor<string, []>("op_212"), val = tensor<int32, [2]>([1, 1])];
|
145 |
+
tensor<string, []> var_214_pad_type_0 = const()[name = tensor<string, []>("op_214_pad_type_0"), val = tensor<string, []>("custom")];
|
146 |
+
tensor<int32, [4]> var_214_pad_0 = const()[name = tensor<string, []>("op_214_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
147 |
+
tensor<fp16, [1, 11008, 1, 64]> var_214_cast_fp16 = conv(dilations = var_212, groups = var_32, pad = var_214_pad_0, pad_type = var_214_pad_type_0, strides = var_210, weight = blocks_0_mlp_fc_2_weight_palettized_cast_fp16, x = input_3_cast_fp16)[name = tensor<string, []>("op_214_cast_fp16")];
|
148 |
+
tensor<fp16, [1, 11008, 1, 1]> blocks_0_mlp_fc_2_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_0_mlp_fc_2_output_scales_to_fp16"), val = tensor<fp16, [1, 11008, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303638464)))];
|
149 |
+
tensor<fp16, [1, 11008, 1, 64]> x_fc_2_1_cast_fp16 = mul(x = var_214_cast_fp16, y = blocks_0_mlp_fc_2_output_scales_to_fp16)[name = tensor<string, []>("x_fc_2_1_cast_fp16")];
|
150 |
+
tensor<fp16, [1, 11008, 1, 64]> var_216_cast_fp16 = silu(x = input_5_cast_fp16)[name = tensor<string, []>("op_216_cast_fp16")];
|
151 |
+
tensor<fp16, [1, 11008, 1, 64]> input_7_cast_fp16 = mul(x = var_216_cast_fp16, y = x_fc_2_1_cast_fp16)[name = tensor<string, []>("input_7_cast_fp16")];
|
152 |
+
tensor<int32, [2]> var_220 = const()[name = tensor<string, []>("op_220"), val = tensor<int32, [2]>([1, 1])];
|
153 |
+
tensor<int32, [2]> var_222 = const()[name = tensor<string, []>("op_222"), val = tensor<int32, [2]>([1, 1])];
|
154 |
+
tensor<string, []> var_224_pad_type_0 = const()[name = tensor<string, []>("op_224_pad_type_0"), val = tensor<string, []>("custom")];
|
155 |
+
tensor<int32, [4]> var_224_pad_0 = const()[name = tensor<string, []>("op_224_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
156 |
+
tensor<fp16, [1, 4096, 1, 64]> var_224_cast_fp16 = conv(dilations = var_222, groups = var_32, pad = var_224_pad_0, pad_type = var_224_pad_type_0, strides = var_220, weight = blocks_0_mlp_proj_weight_palettized_cast_fp16, x = input_7_cast_fp16)[name = tensor<string, []>("op_224_cast_fp16")];
|
157 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_0_mlp_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_0_mlp_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303660544)))];
|
158 |
+
tensor<fp16, [1, 4096, 1, 64]> var_225_cast_fp16 = mul(x = var_224_cast_fp16, y = blocks_0_mlp_proj_output_scales_to_fp16)[name = tensor<string, []>("op_225_cast_fp16")];
|
159 |
+
tensor<fp16, [1, 4096, 1, 64]> x_15_cast_fp16 = add(x = var_225_cast_fp16, y = x_11_cast_fp16)[name = tensor<string, []>("x_15_cast_fp16")];
|
160 |
+
tensor<int32, []> var_232 = const()[name = tensor<string, []>("op_232"), val = tensor<int32, []>(3)];
|
161 |
+
tensor<int32, []> var_237 = const()[name = tensor<string, []>("op_237"), val = tensor<int32, []>(-2)];
|
162 |
+
tensor<int32, []> var_239 = const()[name = tensor<string, []>("op_239"), val = tensor<int32, []>(-1)];
|
163 |
+
tensor<int32, []> var_246 = const()[name = tensor<string, []>("op_246"), val = tensor<int32, []>(1)];
|
164 |
+
tensor<bool, []> var_247 = const()[name = tensor<string, []>("op_247"), val = tensor<bool, []>(true)];
|
165 |
+
tensor<fp16, [1, 4096, 1, 64]> var_254_cast_fp16 = mul(x = x_15_cast_fp16, y = x_15_cast_fp16)[name = tensor<string, []>("op_254_cast_fp16")];
|
166 |
+
tensor<int32, [1]> var_255 = const()[name = tensor<string, []>("op_255"), val = tensor<int32, [1]>([1])];
|
167 |
+
tensor<fp16, [1, 1, 1, 64]> norm_x_5_cast_fp16 = reduce_mean(axes = var_255, keep_dims = var_247, x = var_254_cast_fp16)[name = tensor<string, []>("norm_x_5_cast_fp16")];
|
168 |
+
tensor<fp16, []> var_257_to_fp16 = const()[name = tensor<string, []>("op_257_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
|
169 |
+
tensor<fp16, [1, 1, 1, 64]> var_258_cast_fp16 = add(x = norm_x_5_cast_fp16, y = var_257_to_fp16)[name = tensor<string, []>("op_258_cast_fp16")];
|
170 |
+
tensor<fp16, []> var_259_epsilon_0_to_fp16 = const()[name = tensor<string, []>("op_259_epsilon_0_to_fp16"), val = tensor<fp16, []>(0x1p-24)];
|
171 |
+
tensor<fp16, [1, 1, 1, 64]> var_259_cast_fp16 = rsqrt(epsilon = var_259_epsilon_0_to_fp16, x = var_258_cast_fp16)[name = tensor<string, []>("op_259_cast_fp16")];
|
172 |
+
tensor<fp16, [1, 4096, 1, 64]> x_normed_9_cast_fp16 = mul(x = x_15_cast_fp16, y = var_259_cast_fp16)[name = tensor<string, []>("x_normed_9_cast_fp16")];
|
173 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_1_norm_1_weight_to_fp16 = const()[name = tensor<string, []>("blocks_1_norm_1_weight_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303668800)))];
|
174 |
+
tensor<fp16, [1, 4096, 1, 64]> x_19_cast_fp16 = mul(x = x_normed_9_cast_fp16, y = blocks_1_norm_1_weight_to_fp16)[name = tensor<string, []>("x_19_cast_fp16")];
|
175 |
+
tensor<int32, [2]> var_274 = const()[name = tensor<string, []>("op_274"), val = tensor<int32, [2]>([1, 1])];
|
176 |
+
tensor<int32, [2]> var_276 = const()[name = tensor<string, []>("op_276"), val = tensor<int32, [2]>([1, 1])];
|
177 |
+
tensor<string, []> var_278_pad_type_0 = const()[name = tensor<string, []>("op_278_pad_type_0"), val = tensor<string, []>("custom")];
|
178 |
+
tensor<int32, [4]> var_278_pad_0 = const()[name = tensor<string, []>("op_278_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
179 |
+
tensor<fp16, [1, 4096, 1, 64]> var_278_cast_fp16 = conv(dilations = var_276, groups = var_246, pad = var_278_pad_0, pad_type = var_278_pad_type_0, strides = var_274, weight = blocks_1_attn_q_proj_weight_palettized_cast_fp16, x = x_19_cast_fp16)[name = tensor<string, []>("op_278_cast_fp16")];
|
180 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_1_attn_q_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_1_attn_q_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303677056)))];
|
181 |
+
tensor<fp16, [1, 4096, 1, 64]> q_7_cast_fp16 = mul(x = var_278_cast_fp16, y = blocks_1_attn_q_proj_output_scales_to_fp16)[name = tensor<string, []>("q_7_cast_fp16")];
|
182 |
+
tensor<int32, [2]> var_282 = const()[name = tensor<string, []>("op_282"), val = tensor<int32, [2]>([1, 1])];
|
183 |
+
tensor<int32, [2]> var_284 = const()[name = tensor<string, []>("op_284"), val = tensor<int32, [2]>([1, 1])];
|
184 |
+
tensor<string, []> var_286_pad_type_0 = const()[name = tensor<string, []>("op_286_pad_type_0"), val = tensor<string, []>("custom")];
|
185 |
+
tensor<int32, [4]> var_286_pad_0 = const()[name = tensor<string, []>("op_286_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
186 |
+
tensor<fp16, [1, 4096, 1, 64]> var_286_cast_fp16 = conv(dilations = var_284, groups = var_246, pad = var_286_pad_0, pad_type = var_286_pad_type_0, strides = var_282, weight = blocks_1_attn_k_proj_weight_palettized_cast_fp16, x = x_19_cast_fp16)[name = tensor<string, []>("op_286_cast_fp16")];
|
187 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_1_attn_k_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_1_attn_k_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303685312)))];
|
188 |
+
tensor<fp16, [1, 4096, 1, 64]> k_9_cast_fp16 = mul(x = var_286_cast_fp16, y = blocks_1_attn_k_proj_output_scales_to_fp16)[name = tensor<string, []>("k_9_cast_fp16")];
|
189 |
+
tensor<int32, [2]> var_290 = const()[name = tensor<string, []>("op_290"), val = tensor<int32, [2]>([1, 1])];
|
190 |
+
tensor<int32, [2]> var_292 = const()[name = tensor<string, []>("op_292"), val = tensor<int32, [2]>([1, 1])];
|
191 |
+
tensor<string, []> var_294_pad_type_0 = const()[name = tensor<string, []>("op_294_pad_type_0"), val = tensor<string, []>("custom")];
|
192 |
+
tensor<int32, [4]> var_294_pad_0 = const()[name = tensor<string, []>("op_294_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
193 |
+
tensor<fp16, [1, 4096, 1, 64]> var_294_cast_fp16 = conv(dilations = var_292, groups = var_246, pad = var_294_pad_0, pad_type = var_294_pad_type_0, strides = var_290, weight = blocks_1_attn_v_proj_weight_palettized_cast_fp16, x = x_19_cast_fp16)[name = tensor<string, []>("op_294_cast_fp16")];
|
194 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_1_attn_v_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_1_attn_v_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303693568)))];
|
195 |
+
tensor<fp16, [1, 4096, 1, 64]> v_7_cast_fp16 = mul(x = var_294_cast_fp16, y = blocks_1_attn_v_proj_output_scales_to_fp16)[name = tensor<string, []>("v_7_cast_fp16")];
|
196 |
+
tensor<int32, [4]> var_296 = const()[name = tensor<string, []>("op_296"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
197 |
+
tensor<fp16, [1, 32, 128, 64]> q_9_cast_fp16 = reshape(shape = var_296, x = q_7_cast_fp16)[name = tensor<string, []>("q_9_cast_fp16")];
|
198 |
+
tensor<int32, [4]> var_298 = const()[name = tensor<string, []>("op_298"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
199 |
+
tensor<fp16, [1, 32, 128, 64]> k_11_cast_fp16 = reshape(shape = var_298, x = k_9_cast_fp16)[name = tensor<string, []>("k_11_cast_fp16")];
|
200 |
+
tensor<int32, [4]> var_300 = const()[name = tensor<string, []>("op_300"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
201 |
+
tensor<fp16, [1, 32, 128, 64]> new_v_cache_1 = reshape(shape = var_300, x = v_7_cast_fp16)[name = tensor<string, []>("v_9_cast_fp16")];
|
202 |
+
tensor<int32, [4]> var_312_begin_0 = const()[name = tensor<string, []>("op_312_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
203 |
+
tensor<int32, [4]> var_312_end_0 = const()[name = tensor<string, []>("op_312_end_0"), val = tensor<int32, [4]>([1, 32, 64, 64])];
|
204 |
+
tensor<bool, [4]> var_312_end_mask_0 = const()[name = tensor<string, []>("op_312_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
|
205 |
+
tensor<fp16, [1, 32, 64, 64]> var_312_cast_fp16 = slice_by_index(begin = var_312_begin_0, end = var_312_end_0, end_mask = var_312_end_mask_0, x = q_9_cast_fp16)[name = tensor<string, []>("op_312_cast_fp16")];
|
206 |
+
tensor<int32, [4]> var_318_begin_0 = const()[name = tensor<string, []>("op_318_begin_0"), val = tensor<int32, [4]>([0, 0, 64, 0])];
|
207 |
+
tensor<int32, [4]> var_318_end_0 = const()[name = tensor<string, []>("op_318_end_0"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
208 |
+
tensor<bool, [4]> var_318_end_mask_0 = const()[name = tensor<string, []>("op_318_end_mask_0"), val = tensor<bool, [4]>([true, true, true, true])];
|
209 |
+
tensor<fp16, [1, 32, 64, 64]> var_318_cast_fp16 = slice_by_index(begin = var_318_begin_0, end = var_318_end_0, end_mask = var_318_end_mask_0, x = q_9_cast_fp16)[name = tensor<string, []>("op_318_cast_fp16")];
|
210 |
+
tensor<fp16, []> const_10_promoted_to_fp16 = const()[name = tensor<string, []>("const_10_promoted_to_fp16"), val = tensor<fp16, []>(-0x1p+0)];
|
211 |
+
tensor<fp16, [1, 32, 64, 64]> var_320_cast_fp16 = mul(x = var_318_cast_fp16, y = const_10_promoted_to_fp16)[name = tensor<string, []>("op_320_cast_fp16")];
|
212 |
+
tensor<bool, []> rotated_5_interleave_0 = const()[name = tensor<string, []>("rotated_5_interleave_0"), val = tensor<bool, []>(false)];
|
213 |
+
tensor<fp16, [1, 32, 128, 64]> rotated_5_cast_fp16 = concat(axis = var_237, interleave = rotated_5_interleave_0, values = (var_320_cast_fp16, var_312_cast_fp16))[name = tensor<string, []>("rotated_5_cast_fp16")];
|
214 |
+
tensor<fp16, [1, 32, 128, 64]> var_323_cast_fp16 = mul(x = q_9_cast_fp16, y = cos)[name = tensor<string, []>("op_323_cast_fp16")];
|
215 |
+
tensor<fp16, [1, 32, 128, 64]> var_324_cast_fp16 = mul(x = rotated_5_cast_fp16, y = sin)[name = tensor<string, []>("op_324_cast_fp16")];
|
216 |
+
tensor<fp16, [1, 32, 128, 64]> roped_5_cast_fp16 = add(x = var_323_cast_fp16, y = var_324_cast_fp16)[name = tensor<string, []>("roped_5_cast_fp16")];
|
217 |
+
tensor<int32, [4]> var_337_begin_0 = const()[name = tensor<string, []>("op_337_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
218 |
+
tensor<int32, [4]> var_337_end_0 = const()[name = tensor<string, []>("op_337_end_0"), val = tensor<int32, [4]>([1, 32, 64, 64])];
|
219 |
+
tensor<bool, [4]> var_337_end_mask_0 = const()[name = tensor<string, []>("op_337_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
|
220 |
+
tensor<fp16, [1, 32, 64, 64]> var_337_cast_fp16 = slice_by_index(begin = var_337_begin_0, end = var_337_end_0, end_mask = var_337_end_mask_0, x = k_11_cast_fp16)[name = tensor<string, []>("op_337_cast_fp16")];
|
221 |
+
tensor<int32, [4]> var_343_begin_0 = const()[name = tensor<string, []>("op_343_begin_0"), val = tensor<int32, [4]>([0, 0, 64, 0])];
|
222 |
+
tensor<int32, [4]> var_343_end_0 = const()[name = tensor<string, []>("op_343_end_0"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
223 |
+
tensor<bool, [4]> var_343_end_mask_0 = const()[name = tensor<string, []>("op_343_end_mask_0"), val = tensor<bool, [4]>([true, true, true, true])];
|
224 |
+
tensor<fp16, [1, 32, 64, 64]> var_343_cast_fp16 = slice_by_index(begin = var_343_begin_0, end = var_343_end_0, end_mask = var_343_end_mask_0, x = k_11_cast_fp16)[name = tensor<string, []>("op_343_cast_fp16")];
|
225 |
+
tensor<fp16, []> const_12_promoted_to_fp16 = const()[name = tensor<string, []>("const_12_promoted_to_fp16"), val = tensor<fp16, []>(-0x1p+0)];
|
226 |
+
tensor<fp16, [1, 32, 64, 64]> var_345_cast_fp16 = mul(x = var_343_cast_fp16, y = const_12_promoted_to_fp16)[name = tensor<string, []>("op_345_cast_fp16")];
|
227 |
+
tensor<bool, []> rotated_7_interleave_0 = const()[name = tensor<string, []>("rotated_7_interleave_0"), val = tensor<bool, []>(false)];
|
228 |
+
tensor<fp16, [1, 32, 128, 64]> rotated_7_cast_fp16 = concat(axis = var_237, interleave = rotated_7_interleave_0, values = (var_345_cast_fp16, var_337_cast_fp16))[name = tensor<string, []>("rotated_7_cast_fp16")];
|
229 |
+
tensor<fp16, [1, 32, 128, 64]> var_348_cast_fp16 = mul(x = k_11_cast_fp16, y = cos)[name = tensor<string, []>("op_348_cast_fp16")];
|
230 |
+
tensor<fp16, [1, 32, 128, 64]> var_349_cast_fp16 = mul(x = rotated_7_cast_fp16, y = sin)[name = tensor<string, []>("op_349_cast_fp16")];
|
231 |
+
tensor<fp16, [1, 32, 128, 64]> roped_7_cast_fp16 = add(x = var_348_cast_fp16, y = var_349_cast_fp16)[name = tensor<string, []>("roped_7_cast_fp16")];
|
232 |
+
tensor<bool, []> q_11_interleave_0 = const()[name = tensor<string, []>("q_11_interleave_0"), val = tensor<bool, []>(false)];
|
233 |
+
tensor<fp16, [1, 32, 128, 64]> q_11_cast_fp16 = concat(axis = var_237, interleave = q_11_interleave_0, values = roped_5_cast_fp16)[name = tensor<string, []>("q_11_cast_fp16")];
|
234 |
+
tensor<bool, []> k_13_interleave_0 = const()[name = tensor<string, []>("k_13_interleave_0"), val = tensor<bool, []>(false)];
|
235 |
+
tensor<fp16, [1, 32, 128, 64]> new_k_cache_1 = concat(axis = var_237, interleave = k_13_interleave_0, values = roped_7_cast_fp16)[name = tensor<string, []>("k_13_cast_fp16")];
|
236 |
+
tensor<bool, []> k_15_interleave_0 = const()[name = tensor<string, []>("k_15_interleave_0"), val = tensor<bool, []>(false)];
|
237 |
+
tensor<fp16, [1, 32, 128, 512]> k_15_cast_fp16 = concat(axis = var_239, interleave = k_15_interleave_0, values = (k_cache_1, new_k_cache_1))[name = tensor<string, []>("k_15_cast_fp16")];
|
238 |
+
tensor<bool, []> v_11_interleave_0 = const()[name = tensor<string, []>("v_11_interleave_0"), val = tensor<bool, []>(false)];
|
239 |
+
tensor<fp16, [1, 32, 128, 512]> v_11_cast_fp16 = concat(axis = var_239, interleave = v_11_interleave_0, values = (v_cache_1, new_v_cache_1))[name = tensor<string, []>("v_11_cast_fp16")];
|
240 |
+
tensor<fp16, []> var_371_to_fp16 = const()[name = tensor<string, []>("op_371_to_fp16"), val = tensor<fp16, []>(0x1.6ap-4)];
|
241 |
+
tensor<fp16, [1, 32, 128, 64]> var_372_cast_fp16 = mul(x = q_11_cast_fp16, y = var_371_to_fp16)[name = tensor<string, []>("op_372_cast_fp16")];
|
242 |
+
tensor<bool, []> attn_weights_5_transpose_x_0 = const()[name = tensor<string, []>("attn_weights_5_transpose_x_0"), val = tensor<bool, []>(true)];
|
243 |
+
tensor<bool, []> attn_weights_5_transpose_y_0 = const()[name = tensor<string, []>("attn_weights_5_transpose_y_0"), val = tensor<bool, []>(false)];
|
244 |
+
tensor<fp16, [1, 32, 64, 512]> attn_weights_5_cast_fp16 = matmul(transpose_x = attn_weights_5_transpose_x_0, transpose_y = attn_weights_5_transpose_y_0, x = var_372_cast_fp16, y = k_15_cast_fp16)[name = tensor<string, []>("attn_weights_5_cast_fp16")];
|
245 |
+
tensor<fp16, [1, 32, 64, 512]> attn_weights_7_cast_fp16 = add(x = attn_weights_5_cast_fp16, y = mask)[name = tensor<string, []>("attn_weights_7_cast_fp16")];
|
246 |
+
tensor<fp16, [1, 32, 64, 512]> var_380_cast_fp16 = softmax(axis = var_232, x = attn_weights_7_cast_fp16)[name = tensor<string, []>("op_380_cast_fp16")];
|
247 |
+
tensor<bool, []> attn_3_transpose_x_0 = const()[name = tensor<string, []>("attn_3_transpose_x_0"), val = tensor<bool, []>(false)];
|
248 |
+
tensor<bool, []> attn_3_transpose_y_0 = const()[name = tensor<string, []>("attn_3_transpose_y_0"), val = tensor<bool, []>(true)];
|
249 |
+
tensor<fp16, [1, 32, 128, 64]> attn_3_cast_fp16 = matmul(transpose_x = attn_3_transpose_x_0, transpose_y = attn_3_transpose_y_0, x = v_11_cast_fp16, y = var_380_cast_fp16)[name = tensor<string, []>("attn_3_cast_fp16")];
|
250 |
+
tensor<int32, [4]> var_384 = const()[name = tensor<string, []>("op_384"), val = tensor<int32, [4]>([1, 4096, 1, -1])];
|
251 |
+
tensor<fp16, [1, 4096, 1, 64]> input_9_cast_fp16 = reshape(shape = var_384, x = attn_3_cast_fp16)[name = tensor<string, []>("input_9_cast_fp16")];
|
252 |
+
tensor<int32, [2]> var_388 = const()[name = tensor<string, []>("op_388"), val = tensor<int32, [2]>([1, 1])];
|
253 |
+
tensor<int32, [2]> var_390 = const()[name = tensor<string, []>("op_390"), val = tensor<int32, [2]>([1, 1])];
|
254 |
+
tensor<string, []> var_392_pad_type_0 = const()[name = tensor<string, []>("op_392_pad_type_0"), val = tensor<string, []>("custom")];
|
255 |
+
tensor<int32, [4]> var_392_pad_0 = const()[name = tensor<string, []>("op_392_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
256 |
+
tensor<fp16, [1, 4096, 1, 64]> var_392_cast_fp16 = conv(dilations = var_390, groups = var_246, pad = var_392_pad_0, pad_type = var_392_pad_type_0, strides = var_388, weight = blocks_1_attn_proj_weight_palettized_cast_fp16, x = input_9_cast_fp16)[name = tensor<string, []>("op_392_cast_fp16")];
|
257 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_1_attn_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_1_attn_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303701824)))];
|
258 |
+
tensor<fp16, [1, 4096, 1, 64]> attention_output_3_cast_fp16 = mul(x = var_392_cast_fp16, y = blocks_1_attn_proj_output_scales_to_fp16)[name = tensor<string, []>("attention_output_3_cast_fp16")];
|
259 |
+
tensor<fp16, [1, 4096, 1, 64]> x_25_cast_fp16 = add(x = attention_output_3_cast_fp16, y = x_15_cast_fp16)[name = tensor<string, []>("x_25_cast_fp16")];
|
260 |
+
tensor<fp16, [1, 4096, 1, 64]> var_401_cast_fp16 = mul(x = x_25_cast_fp16, y = x_25_cast_fp16)[name = tensor<string, []>("op_401_cast_fp16")];
|
261 |
+
tensor<int32, [1]> var_402 = const()[name = tensor<string, []>("op_402"), val = tensor<int32, [1]>([1])];
|
262 |
+
tensor<fp16, [1, 1, 1, 64]> norm_x_7_cast_fp16 = reduce_mean(axes = var_402, keep_dims = var_247, x = var_401_cast_fp16)[name = tensor<string, []>("norm_x_7_cast_fp16")];
|
263 |
+
tensor<fp16, []> var_404_to_fp16 = const()[name = tensor<string, []>("op_404_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
|
264 |
+
tensor<fp16, [1, 1, 1, 64]> var_405_cast_fp16 = add(x = norm_x_7_cast_fp16, y = var_404_to_fp16)[name = tensor<string, []>("op_405_cast_fp16")];
|
265 |
+
tensor<fp16, []> var_406_epsilon_0_to_fp16 = const()[name = tensor<string, []>("op_406_epsilon_0_to_fp16"), val = tensor<fp16, []>(0x1p-24)];
|
266 |
+
tensor<fp16, [1, 1, 1, 64]> var_406_cast_fp16 = rsqrt(epsilon = var_406_epsilon_0_to_fp16, x = var_405_cast_fp16)[name = tensor<string, []>("op_406_cast_fp16")];
|
267 |
+
tensor<fp16, [1, 4096, 1, 64]> x_normed_13_cast_fp16 = mul(x = x_25_cast_fp16, y = var_406_cast_fp16)[name = tensor<string, []>("x_normed_13_cast_fp16")];
|
268 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_1_norm_2_weight_to_fp16 = const()[name = tensor<string, []>("blocks_1_norm_2_weight_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303710080)))];
|
269 |
+
tensor<fp16, [1, 4096, 1, 64]> input_11_cast_fp16 = mul(x = x_normed_13_cast_fp16, y = blocks_1_norm_2_weight_to_fp16)[name = tensor<string, []>("input_11_cast_fp16")];
|
270 |
+
tensor<int32, [2]> var_418 = const()[name = tensor<string, []>("op_418"), val = tensor<int32, [2]>([1, 1])];
|
271 |
+
tensor<int32, [2]> var_420 = const()[name = tensor<string, []>("op_420"), val = tensor<int32, [2]>([1, 1])];
|
272 |
+
tensor<string, []> var_422_pad_type_0 = const()[name = tensor<string, []>("op_422_pad_type_0"), val = tensor<string, []>("custom")];
|
273 |
+
tensor<int32, [4]> var_422_pad_0 = const()[name = tensor<string, []>("op_422_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
274 |
+
tensor<fp16, [1, 11008, 1, 64]> var_422_cast_fp16 = conv(dilations = var_420, groups = var_246, pad = var_422_pad_0, pad_type = var_422_pad_type_0, strides = var_418, weight = blocks_1_mlp_fc_1_weight_palettized_cast_fp16, x = input_11_cast_fp16)[name = tensor<string, []>("op_422_cast_fp16")];
|
275 |
+
tensor<fp16, [1, 11008, 1, 1]> blocks_1_mlp_fc_1_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_1_mlp_fc_1_output_scales_to_fp16"), val = tensor<fp16, [1, 11008, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303718336)))];
|
276 |
+
tensor<fp16, [1, 11008, 1, 64]> input_13_cast_fp16 = mul(x = var_422_cast_fp16, y = blocks_1_mlp_fc_1_output_scales_to_fp16)[name = tensor<string, []>("input_13_cast_fp16")];
|
277 |
+
tensor<int32, [2]> var_426 = const()[name = tensor<string, []>("op_426"), val = tensor<int32, [2]>([1, 1])];
|
278 |
+
tensor<int32, [2]> var_428 = const()[name = tensor<string, []>("op_428"), val = tensor<int32, [2]>([1, 1])];
|
279 |
+
tensor<string, []> var_430_pad_type_0 = const()[name = tensor<string, []>("op_430_pad_type_0"), val = tensor<string, []>("custom")];
|
280 |
+
tensor<int32, [4]> var_430_pad_0 = const()[name = tensor<string, []>("op_430_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
281 |
+
tensor<fp16, [1, 11008, 1, 64]> var_430_cast_fp16 = conv(dilations = var_428, groups = var_246, pad = var_430_pad_0, pad_type = var_430_pad_type_0, strides = var_426, weight = blocks_1_mlp_fc_2_weight_palettized_cast_fp16, x = input_11_cast_fp16)[name = tensor<string, []>("op_430_cast_fp16")];
|
282 |
+
tensor<fp16, [1, 11008, 1, 1]> blocks_1_mlp_fc_2_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_1_mlp_fc_2_output_scales_to_fp16"), val = tensor<fp16, [1, 11008, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303740416)))];
|
283 |
+
tensor<fp16, [1, 11008, 1, 64]> x_fc_2_3_cast_fp16 = mul(x = var_430_cast_fp16, y = blocks_1_mlp_fc_2_output_scales_to_fp16)[name = tensor<string, []>("x_fc_2_3_cast_fp16")];
|
284 |
+
tensor<fp16, [1, 11008, 1, 64]> var_432_cast_fp16 = silu(x = input_13_cast_fp16)[name = tensor<string, []>("op_432_cast_fp16")];
|
285 |
+
tensor<fp16, [1, 11008, 1, 64]> input_15_cast_fp16 = mul(x = var_432_cast_fp16, y = x_fc_2_3_cast_fp16)[name = tensor<string, []>("input_15_cast_fp16")];
|
286 |
+
tensor<int32, [2]> var_436 = const()[name = tensor<string, []>("op_436"), val = tensor<int32, [2]>([1, 1])];
|
287 |
+
tensor<int32, [2]> var_438 = const()[name = tensor<string, []>("op_438"), val = tensor<int32, [2]>([1, 1])];
|
288 |
+
tensor<string, []> var_440_pad_type_0 = const()[name = tensor<string, []>("op_440_pad_type_0"), val = tensor<string, []>("custom")];
|
289 |
+
tensor<int32, [4]> var_440_pad_0 = const()[name = tensor<string, []>("op_440_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
290 |
+
tensor<fp16, [1, 4096, 1, 64]> var_440_cast_fp16 = conv(dilations = var_438, groups = var_246, pad = var_440_pad_0, pad_type = var_440_pad_type_0, strides = var_436, weight = blocks_1_mlp_proj_weight_palettized_cast_fp16, x = input_15_cast_fp16)[name = tensor<string, []>("op_440_cast_fp16")];
|
291 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_1_mlp_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_1_mlp_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303762496)))];
|
292 |
+
tensor<fp16, [1, 4096, 1, 64]> var_441_cast_fp16 = mul(x = var_440_cast_fp16, y = blocks_1_mlp_proj_output_scales_to_fp16)[name = tensor<string, []>("op_441_cast_fp16")];
|
293 |
+
tensor<fp16, [1, 4096, 1, 64]> x_29_cast_fp16 = add(x = var_441_cast_fp16, y = x_25_cast_fp16)[name = tensor<string, []>("x_29_cast_fp16")];
|
294 |
+
tensor<int32, []> var_448 = const()[name = tensor<string, []>("op_448"), val = tensor<int32, []>(3)];
|
295 |
+
tensor<int32, []> var_453 = const()[name = tensor<string, []>("op_453"), val = tensor<int32, []>(-2)];
|
296 |
+
tensor<int32, []> var_455 = const()[name = tensor<string, []>("op_455"), val = tensor<int32, []>(-1)];
|
297 |
+
tensor<int32, []> var_462 = const()[name = tensor<string, []>("op_462"), val = tensor<int32, []>(1)];
|
298 |
+
tensor<bool, []> var_463 = const()[name = tensor<string, []>("op_463"), val = tensor<bool, []>(true)];
|
299 |
+
tensor<fp16, [1, 4096, 1, 64]> var_470_cast_fp16 = mul(x = x_29_cast_fp16, y = x_29_cast_fp16)[name = tensor<string, []>("op_470_cast_fp16")];
|
300 |
+
tensor<int32, [1]> var_471 = const()[name = tensor<string, []>("op_471"), val = tensor<int32, [1]>([1])];
|
301 |
+
tensor<fp16, [1, 1, 1, 64]> norm_x_9_cast_fp16 = reduce_mean(axes = var_471, keep_dims = var_463, x = var_470_cast_fp16)[name = tensor<string, []>("norm_x_9_cast_fp16")];
|
302 |
+
tensor<fp16, []> var_473_to_fp16 = const()[name = tensor<string, []>("op_473_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
|
303 |
+
tensor<fp16, [1, 1, 1, 64]> var_474_cast_fp16 = add(x = norm_x_9_cast_fp16, y = var_473_to_fp16)[name = tensor<string, []>("op_474_cast_fp16")];
|
304 |
+
tensor<fp16, []> var_475_epsilon_0_to_fp16 = const()[name = tensor<string, []>("op_475_epsilon_0_to_fp16"), val = tensor<fp16, []>(0x1p-24)];
|
305 |
+
tensor<fp16, [1, 1, 1, 64]> var_475_cast_fp16 = rsqrt(epsilon = var_475_epsilon_0_to_fp16, x = var_474_cast_fp16)[name = tensor<string, []>("op_475_cast_fp16")];
|
306 |
+
tensor<fp16, [1, 4096, 1, 64]> x_normed_17_cast_fp16 = mul(x = x_29_cast_fp16, y = var_475_cast_fp16)[name = tensor<string, []>("x_normed_17_cast_fp16")];
|
307 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_2_norm_1_weight_to_fp16 = const()[name = tensor<string, []>("blocks_2_norm_1_weight_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303770752)))];
|
308 |
+
tensor<fp16, [1, 4096, 1, 64]> x_33_cast_fp16 = mul(x = x_normed_17_cast_fp16, y = blocks_2_norm_1_weight_to_fp16)[name = tensor<string, []>("x_33_cast_fp16")];
|
309 |
+
tensor<int32, [2]> var_490 = const()[name = tensor<string, []>("op_490"), val = tensor<int32, [2]>([1, 1])];
|
310 |
+
tensor<int32, [2]> var_492 = const()[name = tensor<string, []>("op_492"), val = tensor<int32, [2]>([1, 1])];
|
311 |
+
tensor<string, []> var_494_pad_type_0 = const()[name = tensor<string, []>("op_494_pad_type_0"), val = tensor<string, []>("custom")];
|
312 |
+
tensor<int32, [4]> var_494_pad_0 = const()[name = tensor<string, []>("op_494_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
313 |
+
tensor<fp16, [1, 4096, 1, 64]> var_494_cast_fp16 = conv(dilations = var_492, groups = var_462, pad = var_494_pad_0, pad_type = var_494_pad_type_0, strides = var_490, weight = blocks_2_attn_q_proj_weight_palettized_cast_fp16, x = x_33_cast_fp16)[name = tensor<string, []>("op_494_cast_fp16")];
|
314 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_2_attn_q_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_2_attn_q_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303779008)))];
|
315 |
+
tensor<fp16, [1, 4096, 1, 64]> q_13_cast_fp16 = mul(x = var_494_cast_fp16, y = blocks_2_attn_q_proj_output_scales_to_fp16)[name = tensor<string, []>("q_13_cast_fp16")];
|
316 |
+
tensor<int32, [2]> var_498 = const()[name = tensor<string, []>("op_498"), val = tensor<int32, [2]>([1, 1])];
|
317 |
+
tensor<int32, [2]> var_500 = const()[name = tensor<string, []>("op_500"), val = tensor<int32, [2]>([1, 1])];
|
318 |
+
tensor<string, []> var_502_pad_type_0 = const()[name = tensor<string, []>("op_502_pad_type_0"), val = tensor<string, []>("custom")];
|
319 |
+
tensor<int32, [4]> var_502_pad_0 = const()[name = tensor<string, []>("op_502_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
320 |
+
tensor<fp16, [1, 4096, 1, 64]> var_502_cast_fp16 = conv(dilations = var_500, groups = var_462, pad = var_502_pad_0, pad_type = var_502_pad_type_0, strides = var_498, weight = blocks_2_attn_k_proj_weight_palettized_cast_fp16, x = x_33_cast_fp16)[name = tensor<string, []>("op_502_cast_fp16")];
|
321 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_2_attn_k_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_2_attn_k_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303787264)))];
|
322 |
+
tensor<fp16, [1, 4096, 1, 64]> k_17_cast_fp16 = mul(x = var_502_cast_fp16, y = blocks_2_attn_k_proj_output_scales_to_fp16)[name = tensor<string, []>("k_17_cast_fp16")];
|
323 |
+
tensor<int32, [2]> var_506 = const()[name = tensor<string, []>("op_506"), val = tensor<int32, [2]>([1, 1])];
|
324 |
+
tensor<int32, [2]> var_508 = const()[name = tensor<string, []>("op_508"), val = tensor<int32, [2]>([1, 1])];
|
325 |
+
tensor<string, []> var_510_pad_type_0 = const()[name = tensor<string, []>("op_510_pad_type_0"), val = tensor<string, []>("custom")];
|
326 |
+
tensor<int32, [4]> var_510_pad_0 = const()[name = tensor<string, []>("op_510_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
327 |
+
tensor<fp16, [1, 4096, 1, 64]> var_510_cast_fp16 = conv(dilations = var_508, groups = var_462, pad = var_510_pad_0, pad_type = var_510_pad_type_0, strides = var_506, weight = blocks_2_attn_v_proj_weight_palettized_cast_fp16, x = x_33_cast_fp16)[name = tensor<string, []>("op_510_cast_fp16")];
|
328 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_2_attn_v_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_2_attn_v_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303795520)))];
|
329 |
+
tensor<fp16, [1, 4096, 1, 64]> v_13_cast_fp16 = mul(x = var_510_cast_fp16, y = blocks_2_attn_v_proj_output_scales_to_fp16)[name = tensor<string, []>("v_13_cast_fp16")];
|
330 |
+
tensor<int32, [4]> var_512 = const()[name = tensor<string, []>("op_512"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
331 |
+
tensor<fp16, [1, 32, 128, 64]> q_15_cast_fp16 = reshape(shape = var_512, x = q_13_cast_fp16)[name = tensor<string, []>("q_15_cast_fp16")];
|
332 |
+
tensor<int32, [4]> var_514 = const()[name = tensor<string, []>("op_514"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
333 |
+
tensor<fp16, [1, 32, 128, 64]> k_19_cast_fp16 = reshape(shape = var_514, x = k_17_cast_fp16)[name = tensor<string, []>("k_19_cast_fp16")];
|
334 |
+
tensor<int32, [4]> var_516 = const()[name = tensor<string, []>("op_516"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
335 |
+
tensor<fp16, [1, 32, 128, 64]> new_v_cache_2 = reshape(shape = var_516, x = v_13_cast_fp16)[name = tensor<string, []>("v_15_cast_fp16")];
|
336 |
+
tensor<int32, [4]> var_528_begin_0 = const()[name = tensor<string, []>("op_528_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
337 |
+
tensor<int32, [4]> var_528_end_0 = const()[name = tensor<string, []>("op_528_end_0"), val = tensor<int32, [4]>([1, 32, 64, 64])];
|
338 |
+
tensor<bool, [4]> var_528_end_mask_0 = const()[name = tensor<string, []>("op_528_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
|
339 |
+
tensor<fp16, [1, 32, 64, 64]> var_528_cast_fp16 = slice_by_index(begin = var_528_begin_0, end = var_528_end_0, end_mask = var_528_end_mask_0, x = q_15_cast_fp16)[name = tensor<string, []>("op_528_cast_fp16")];
|
340 |
+
tensor<int32, [4]> var_534_begin_0 = const()[name = tensor<string, []>("op_534_begin_0"), val = tensor<int32, [4]>([0, 0, 64, 0])];
|
341 |
+
tensor<int32, [4]> var_534_end_0 = const()[name = tensor<string, []>("op_534_end_0"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
342 |
+
tensor<bool, [4]> var_534_end_mask_0 = const()[name = tensor<string, []>("op_534_end_mask_0"), val = tensor<bool, [4]>([true, true, true, true])];
|
343 |
+
tensor<fp16, [1, 32, 64, 64]> var_534_cast_fp16 = slice_by_index(begin = var_534_begin_0, end = var_534_end_0, end_mask = var_534_end_mask_0, x = q_15_cast_fp16)[name = tensor<string, []>("op_534_cast_fp16")];
|
344 |
+
tensor<fp16, []> const_17_promoted_to_fp16 = const()[name = tensor<string, []>("const_17_promoted_to_fp16"), val = tensor<fp16, []>(-0x1p+0)];
|
345 |
+
tensor<fp16, [1, 32, 64, 64]> var_536_cast_fp16 = mul(x = var_534_cast_fp16, y = const_17_promoted_to_fp16)[name = tensor<string, []>("op_536_cast_fp16")];
|
346 |
+
tensor<bool, []> rotated_9_interleave_0 = const()[name = tensor<string, []>("rotated_9_interleave_0"), val = tensor<bool, []>(false)];
|
347 |
+
tensor<fp16, [1, 32, 128, 64]> rotated_9_cast_fp16 = concat(axis = var_453, interleave = rotated_9_interleave_0, values = (var_536_cast_fp16, var_528_cast_fp16))[name = tensor<string, []>("rotated_9_cast_fp16")];
|
348 |
+
tensor<fp16, [1, 32, 128, 64]> var_539_cast_fp16 = mul(x = q_15_cast_fp16, y = cos)[name = tensor<string, []>("op_539_cast_fp16")];
|
349 |
+
tensor<fp16, [1, 32, 128, 64]> var_540_cast_fp16 = mul(x = rotated_9_cast_fp16, y = sin)[name = tensor<string, []>("op_540_cast_fp16")];
|
350 |
+
tensor<fp16, [1, 32, 128, 64]> roped_9_cast_fp16 = add(x = var_539_cast_fp16, y = var_540_cast_fp16)[name = tensor<string, []>("roped_9_cast_fp16")];
|
351 |
+
tensor<int32, [4]> var_553_begin_0 = const()[name = tensor<string, []>("op_553_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
352 |
+
tensor<int32, [4]> var_553_end_0 = const()[name = tensor<string, []>("op_553_end_0"), val = tensor<int32, [4]>([1, 32, 64, 64])];
|
353 |
+
tensor<bool, [4]> var_553_end_mask_0 = const()[name = tensor<string, []>("op_553_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
|
354 |
+
tensor<fp16, [1, 32, 64, 64]> var_553_cast_fp16 = slice_by_index(begin = var_553_begin_0, end = var_553_end_0, end_mask = var_553_end_mask_0, x = k_19_cast_fp16)[name = tensor<string, []>("op_553_cast_fp16")];
|
355 |
+
tensor<int32, [4]> var_559_begin_0 = const()[name = tensor<string, []>("op_559_begin_0"), val = tensor<int32, [4]>([0, 0, 64, 0])];
|
356 |
+
tensor<int32, [4]> var_559_end_0 = const()[name = tensor<string, []>("op_559_end_0"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
357 |
+
tensor<bool, [4]> var_559_end_mask_0 = const()[name = tensor<string, []>("op_559_end_mask_0"), val = tensor<bool, [4]>([true, true, true, true])];
|
358 |
+
tensor<fp16, [1, 32, 64, 64]> var_559_cast_fp16 = slice_by_index(begin = var_559_begin_0, end = var_559_end_0, end_mask = var_559_end_mask_0, x = k_19_cast_fp16)[name = tensor<string, []>("op_559_cast_fp16")];
|
359 |
+
tensor<fp16, []> const_19_promoted_to_fp16 = const()[name = tensor<string, []>("const_19_promoted_to_fp16"), val = tensor<fp16, []>(-0x1p+0)];
|
360 |
+
tensor<fp16, [1, 32, 64, 64]> var_561_cast_fp16 = mul(x = var_559_cast_fp16, y = const_19_promoted_to_fp16)[name = tensor<string, []>("op_561_cast_fp16")];
|
361 |
+
tensor<bool, []> rotated_interleave_0 = const()[name = tensor<string, []>("rotated_interleave_0"), val = tensor<bool, []>(false)];
|
362 |
+
tensor<fp16, [1, 32, 128, 64]> rotated_cast_fp16 = concat(axis = var_453, interleave = rotated_interleave_0, values = (var_561_cast_fp16, var_553_cast_fp16))[name = tensor<string, []>("rotated_cast_fp16")];
|
363 |
+
tensor<fp16, [1, 32, 128, 64]> var_564_cast_fp16 = mul(x = k_19_cast_fp16, y = cos)[name = tensor<string, []>("op_564_cast_fp16")];
|
364 |
+
tensor<fp16, [1, 32, 128, 64]> var_565_cast_fp16 = mul(x = rotated_cast_fp16, y = sin)[name = tensor<string, []>("op_565_cast_fp16")];
|
365 |
+
tensor<fp16, [1, 32, 128, 64]> roped_cast_fp16 = add(x = var_564_cast_fp16, y = var_565_cast_fp16)[name = tensor<string, []>("roped_cast_fp16")];
|
366 |
+
tensor<bool, []> q_interleave_0 = const()[name = tensor<string, []>("q_interleave_0"), val = tensor<bool, []>(false)];
|
367 |
+
tensor<fp16, [1, 32, 128, 64]> q_cast_fp16 = concat(axis = var_453, interleave = q_interleave_0, values = roped_9_cast_fp16)[name = tensor<string, []>("q_cast_fp16")];
|
368 |
+
tensor<bool, []> k_21_interleave_0 = const()[name = tensor<string, []>("k_21_interleave_0"), val = tensor<bool, []>(false)];
|
369 |
+
tensor<fp16, [1, 32, 128, 64]> new_k_cache_2 = concat(axis = var_453, interleave = k_21_interleave_0, values = roped_cast_fp16)[name = tensor<string, []>("k_21_cast_fp16")];
|
370 |
+
tensor<bool, []> k_interleave_0 = const()[name = tensor<string, []>("k_interleave_0"), val = tensor<bool, []>(false)];
|
371 |
+
tensor<fp16, [1, 32, 128, 512]> k_cast_fp16 = concat(axis = var_455, interleave = k_interleave_0, values = (k_cache_2, new_k_cache_2))[name = tensor<string, []>("k_cast_fp16")];
|
372 |
+
tensor<bool, []> v_interleave_0 = const()[name = tensor<string, []>("v_interleave_0"), val = tensor<bool, []>(false)];
|
373 |
+
tensor<fp16, [1, 32, 128, 512]> v_cast_fp16 = concat(axis = var_455, interleave = v_interleave_0, values = (v_cache_2, new_v_cache_2))[name = tensor<string, []>("v_cast_fp16")];
|
374 |
+
tensor<fp16, []> var_587_to_fp16 = const()[name = tensor<string, []>("op_587_to_fp16"), val = tensor<fp16, []>(0x1.6ap-4)];
|
375 |
+
tensor<fp16, [1, 32, 128, 64]> var_588_cast_fp16 = mul(x = q_cast_fp16, y = var_587_to_fp16)[name = tensor<string, []>("op_588_cast_fp16")];
|
376 |
+
tensor<bool, []> attn_weights_9_transpose_x_0 = const()[name = tensor<string, []>("attn_weights_9_transpose_x_0"), val = tensor<bool, []>(true)];
|
377 |
+
tensor<bool, []> attn_weights_9_transpose_y_0 = const()[name = tensor<string, []>("attn_weights_9_transpose_y_0"), val = tensor<bool, []>(false)];
|
378 |
+
tensor<fp16, [1, 32, 64, 512]> attn_weights_9_cast_fp16 = matmul(transpose_x = attn_weights_9_transpose_x_0, transpose_y = attn_weights_9_transpose_y_0, x = var_588_cast_fp16, y = k_cast_fp16)[name = tensor<string, []>("attn_weights_9_cast_fp16")];
|
379 |
+
tensor<fp16, [1, 32, 64, 512]> attn_weights_cast_fp16 = add(x = attn_weights_9_cast_fp16, y = mask)[name = tensor<string, []>("attn_weights_cast_fp16")];
|
380 |
+
tensor<fp16, [1, 32, 64, 512]> var_596_cast_fp16 = softmax(axis = var_448, x = attn_weights_cast_fp16)[name = tensor<string, []>("op_596_cast_fp16")];
|
381 |
+
tensor<bool, []> attn_5_transpose_x_0 = const()[name = tensor<string, []>("attn_5_transpose_x_0"), val = tensor<bool, []>(false)];
|
382 |
+
tensor<bool, []> attn_5_transpose_y_0 = const()[name = tensor<string, []>("attn_5_transpose_y_0"), val = tensor<bool, []>(true)];
|
383 |
+
tensor<fp16, [1, 32, 128, 64]> attn_5_cast_fp16 = matmul(transpose_x = attn_5_transpose_x_0, transpose_y = attn_5_transpose_y_0, x = v_cast_fp16, y = var_596_cast_fp16)[name = tensor<string, []>("attn_5_cast_fp16")];
|
384 |
+
tensor<int32, [4]> var_600 = const()[name = tensor<string, []>("op_600"), val = tensor<int32, [4]>([1, 4096, 1, -1])];
|
385 |
+
tensor<fp16, [1, 4096, 1, 64]> input_17_cast_fp16 = reshape(shape = var_600, x = attn_5_cast_fp16)[name = tensor<string, []>("input_17_cast_fp16")];
|
386 |
+
tensor<int32, [2]> var_604 = const()[name = tensor<string, []>("op_604"), val = tensor<int32, [2]>([1, 1])];
|
387 |
+
tensor<int32, [2]> var_606 = const()[name = tensor<string, []>("op_606"), val = tensor<int32, [2]>([1, 1])];
|
388 |
+
tensor<string, []> var_608_pad_type_0 = const()[name = tensor<string, []>("op_608_pad_type_0"), val = tensor<string, []>("custom")];
|
389 |
+
tensor<int32, [4]> var_608_pad_0 = const()[name = tensor<string, []>("op_608_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
390 |
+
tensor<fp16, [1, 4096, 1, 64]> var_608_cast_fp16 = conv(dilations = var_606, groups = var_462, pad = var_608_pad_0, pad_type = var_608_pad_type_0, strides = var_604, weight = blocks_2_attn_proj_weight_palettized_cast_fp16, x = input_17_cast_fp16)[name = tensor<string, []>("op_608_cast_fp16")];
|
391 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_2_attn_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_2_attn_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303803776)))];
|
392 |
+
tensor<fp16, [1, 4096, 1, 64]> attention_output_cast_fp16 = mul(x = var_608_cast_fp16, y = blocks_2_attn_proj_output_scales_to_fp16)[name = tensor<string, []>("attention_output_cast_fp16")];
|
393 |
+
tensor<fp16, [1, 4096, 1, 64]> x_39_cast_fp16 = add(x = attention_output_cast_fp16, y = x_29_cast_fp16)[name = tensor<string, []>("x_39_cast_fp16")];
|
394 |
+
tensor<fp16, [1, 4096, 1, 64]> var_617_cast_fp16 = mul(x = x_39_cast_fp16, y = x_39_cast_fp16)[name = tensor<string, []>("op_617_cast_fp16")];
|
395 |
+
tensor<int32, [1]> var_618 = const()[name = tensor<string, []>("op_618"), val = tensor<int32, [1]>([1])];
|
396 |
+
tensor<fp16, [1, 1, 1, 64]> norm_x_cast_fp16 = reduce_mean(axes = var_618, keep_dims = var_463, x = var_617_cast_fp16)[name = tensor<string, []>("norm_x_cast_fp16")];
|
397 |
+
tensor<fp16, []> var_620_to_fp16 = const()[name = tensor<string, []>("op_620_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
|
398 |
+
tensor<fp16, [1, 1, 1, 64]> var_621_cast_fp16 = add(x = norm_x_cast_fp16, y = var_620_to_fp16)[name = tensor<string, []>("op_621_cast_fp16")];
|
399 |
+
tensor<fp16, []> var_622_epsilon_0_to_fp16 = const()[name = tensor<string, []>("op_622_epsilon_0_to_fp16"), val = tensor<fp16, []>(0x1p-24)];
|
400 |
+
tensor<fp16, [1, 1, 1, 64]> var_622_cast_fp16 = rsqrt(epsilon = var_622_epsilon_0_to_fp16, x = var_621_cast_fp16)[name = tensor<string, []>("op_622_cast_fp16")];
|
401 |
+
tensor<fp16, [1, 4096, 1, 64]> x_normed_21_cast_fp16 = mul(x = x_39_cast_fp16, y = var_622_cast_fp16)[name = tensor<string, []>("x_normed_21_cast_fp16")];
|
402 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_2_norm_2_weight_to_fp16 = const()[name = tensor<string, []>("blocks_2_norm_2_weight_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303812032)))];
|
403 |
+
tensor<fp16, [1, 4096, 1, 64]> input_19_cast_fp16 = mul(x = x_normed_21_cast_fp16, y = blocks_2_norm_2_weight_to_fp16)[name = tensor<string, []>("input_19_cast_fp16")];
|
404 |
+
tensor<int32, [2]> var_634 = const()[name = tensor<string, []>("op_634"), val = tensor<int32, [2]>([1, 1])];
|
405 |
+
tensor<int32, [2]> var_636 = const()[name = tensor<string, []>("op_636"), val = tensor<int32, [2]>([1, 1])];
|
406 |
+
tensor<string, []> var_638_pad_type_0 = const()[name = tensor<string, []>("op_638_pad_type_0"), val = tensor<string, []>("custom")];
|
407 |
+
tensor<int32, [4]> var_638_pad_0 = const()[name = tensor<string, []>("op_638_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
408 |
+
tensor<fp16, [1, 11008, 1, 64]> var_638_cast_fp16 = conv(dilations = var_636, groups = var_462, pad = var_638_pad_0, pad_type = var_638_pad_type_0, strides = var_634, weight = blocks_2_mlp_fc_1_weight_palettized_cast_fp16, x = input_19_cast_fp16)[name = tensor<string, []>("op_638_cast_fp16")];
|
409 |
+
tensor<fp16, [1, 11008, 1, 1]> blocks_2_mlp_fc_1_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_2_mlp_fc_1_output_scales_to_fp16"), val = tensor<fp16, [1, 11008, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303820288)))];
|
410 |
+
tensor<fp16, [1, 11008, 1, 64]> input_21_cast_fp16 = mul(x = var_638_cast_fp16, y = blocks_2_mlp_fc_1_output_scales_to_fp16)[name = tensor<string, []>("input_21_cast_fp16")];
|
411 |
+
tensor<int32, [2]> var_642 = const()[name = tensor<string, []>("op_642"), val = tensor<int32, [2]>([1, 1])];
|
412 |
+
tensor<int32, [2]> var_644 = const()[name = tensor<string, []>("op_644"), val = tensor<int32, [2]>([1, 1])];
|
413 |
+
tensor<string, []> var_646_pad_type_0 = const()[name = tensor<string, []>("op_646_pad_type_0"), val = tensor<string, []>("custom")];
|
414 |
+
tensor<int32, [4]> var_646_pad_0 = const()[name = tensor<string, []>("op_646_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
415 |
+
tensor<fp16, [1, 11008, 1, 64]> var_646_cast_fp16 = conv(dilations = var_644, groups = var_462, pad = var_646_pad_0, pad_type = var_646_pad_type_0, strides = var_642, weight = blocks_2_mlp_fc_2_weight_palettized_cast_fp16, x = input_19_cast_fp16)[name = tensor<string, []>("op_646_cast_fp16")];
|
416 |
+
tensor<fp16, [1, 11008, 1, 1]> blocks_2_mlp_fc_2_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_2_mlp_fc_2_output_scales_to_fp16"), val = tensor<fp16, [1, 11008, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303842368)))];
|
417 |
+
tensor<fp16, [1, 11008, 1, 64]> x_fc_2_cast_fp16 = mul(x = var_646_cast_fp16, y = blocks_2_mlp_fc_2_output_scales_to_fp16)[name = tensor<string, []>("x_fc_2_cast_fp16")];
|
418 |
+
tensor<fp16, [1, 11008, 1, 64]> var_648_cast_fp16 = silu(x = input_21_cast_fp16)[name = tensor<string, []>("op_648_cast_fp16")];
|
419 |
+
tensor<fp16, [1, 11008, 1, 64]> input_cast_fp16 = mul(x = var_648_cast_fp16, y = x_fc_2_cast_fp16)[name = tensor<string, []>("input_cast_fp16")];
|
420 |
+
tensor<int32, [2]> var_652 = const()[name = tensor<string, []>("op_652"), val = tensor<int32, [2]>([1, 1])];
|
421 |
+
tensor<int32, [2]> var_654 = const()[name = tensor<string, []>("op_654"), val = tensor<int32, [2]>([1, 1])];
|
422 |
+
tensor<string, []> var_656_pad_type_0 = const()[name = tensor<string, []>("op_656_pad_type_0"), val = tensor<string, []>("custom")];
|
423 |
+
tensor<int32, [4]> var_656_pad_0 = const()[name = tensor<string, []>("op_656_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
424 |
+
tensor<fp16, [1, 4096, 1, 64]> var_656_cast_fp16 = conv(dilations = var_654, groups = var_462, pad = var_656_pad_0, pad_type = var_656_pad_type_0, strides = var_652, weight = blocks_2_mlp_proj_weight_palettized_cast_fp16, x = input_cast_fp16)[name = tensor<string, []>("op_656_cast_fp16")];
|
425 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_2_mlp_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_2_mlp_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303864448)))];
|
426 |
+
tensor<fp16, [1, 4096, 1, 64]> var_657_cast_fp16 = mul(x = var_656_cast_fp16, y = blocks_2_mlp_proj_output_scales_to_fp16)[name = tensor<string, []>("op_657_cast_fp16")];
|
427 |
+
tensor<fp16, [1, 4096, 1, 64]> new_x = add(x = var_657_cast_fp16, y = x_39_cast_fp16)[name = tensor<string, []>("op_658_cast_fp16")];
|
428 |
+
} -> (new_x, new_k_cache_0, new_k_cache_1, new_k_cache_2, new_v_cache_0, new_v_cache_1, new_v_cache_2);
|
429 |
+
}
|
Llama-2-7b-hf_chunk2.mlmodelc/weights/weight.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d25138904c91ffd7e03563365ae012b5b126a2b75fc66880152e092e7680e211
|
3 |
+
size 303872704
|
Llama-2-7b-hf_chunk3.mlmodelc/analytics/coremldata.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3412284b024b899a736cd77112d4b1a4a5faa19d954259e925ef429f58bd886b
|
3 |
+
size 243
|
Llama-2-7b-hf_chunk3.mlmodelc/coremldata.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:589729b2995d8ca8246bbb5d92b910207bab816ad67282b0a285bcd2de77f80e
|
3 |
+
size 791
|
Llama-2-7b-hf_chunk3.mlmodelc/metadata.json
ADDED
@@ -0,0 +1,218 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[
|
2 |
+
{
|
3 |
+
"metadataOutputVersion" : "3.0",
|
4 |
+
"storagePrecision" : "Mixed (Float16, Palettized (4 bits))",
|
5 |
+
"outputSchema" : [
|
6 |
+
{
|
7 |
+
"hasShapeFlexibility" : "0",
|
8 |
+
"isOptional" : "0",
|
9 |
+
"dataType" : "Float16",
|
10 |
+
"formattedType" : "MultiArray (Float16 1 × 4096 × 1 × 64)",
|
11 |
+
"shortDescription" : "",
|
12 |
+
"shape" : "[1, 4096, 1, 64]",
|
13 |
+
"name" : "new_x",
|
14 |
+
"type" : "MultiArray"
|
15 |
+
},
|
16 |
+
{
|
17 |
+
"hasShapeFlexibility" : "0",
|
18 |
+
"isOptional" : "0",
|
19 |
+
"dataType" : "Float16",
|
20 |
+
"formattedType" : "MultiArray (Float16 1 × 32 × 128 × 64)",
|
21 |
+
"shortDescription" : "",
|
22 |
+
"shape" : "[1, 32, 128, 64]",
|
23 |
+
"name" : "new_k_cache_0",
|
24 |
+
"type" : "MultiArray"
|
25 |
+
},
|
26 |
+
{
|
27 |
+
"hasShapeFlexibility" : "0",
|
28 |
+
"isOptional" : "0",
|
29 |
+
"dataType" : "Float16",
|
30 |
+
"formattedType" : "MultiArray (Float16 1 × 32 × 128 × 64)",
|
31 |
+
"shortDescription" : "",
|
32 |
+
"shape" : "[1, 32, 128, 64]",
|
33 |
+
"name" : "new_k_cache_1",
|
34 |
+
"type" : "MultiArray"
|
35 |
+
},
|
36 |
+
{
|
37 |
+
"hasShapeFlexibility" : "0",
|
38 |
+
"isOptional" : "0",
|
39 |
+
"dataType" : "Float16",
|
40 |
+
"formattedType" : "MultiArray (Float16 1 × 32 × 128 × 64)",
|
41 |
+
"shortDescription" : "",
|
42 |
+
"shape" : "[1, 32, 128, 64]",
|
43 |
+
"name" : "new_k_cache_2",
|
44 |
+
"type" : "MultiArray"
|
45 |
+
},
|
46 |
+
{
|
47 |
+
"hasShapeFlexibility" : "0",
|
48 |
+
"isOptional" : "0",
|
49 |
+
"dataType" : "Float16",
|
50 |
+
"formattedType" : "MultiArray (Float16 1 × 32 × 128 × 64)",
|
51 |
+
"shortDescription" : "",
|
52 |
+
"shape" : "[1, 32, 128, 64]",
|
53 |
+
"name" : "new_v_cache_0",
|
54 |
+
"type" : "MultiArray"
|
55 |
+
},
|
56 |
+
{
|
57 |
+
"hasShapeFlexibility" : "0",
|
58 |
+
"isOptional" : "0",
|
59 |
+
"dataType" : "Float16",
|
60 |
+
"formattedType" : "MultiArray (Float16 1 × 32 × 128 × 64)",
|
61 |
+
"shortDescription" : "",
|
62 |
+
"shape" : "[1, 32, 128, 64]",
|
63 |
+
"name" : "new_v_cache_1",
|
64 |
+
"type" : "MultiArray"
|
65 |
+
},
|
66 |
+
{
|
67 |
+
"hasShapeFlexibility" : "0",
|
68 |
+
"isOptional" : "0",
|
69 |
+
"dataType" : "Float16",
|
70 |
+
"formattedType" : "MultiArray (Float16 1 × 32 × 128 × 64)",
|
71 |
+
"shortDescription" : "",
|
72 |
+
"shape" : "[1, 32, 128, 64]",
|
73 |
+
"name" : "new_v_cache_2",
|
74 |
+
"type" : "MultiArray"
|
75 |
+
}
|
76 |
+
],
|
77 |
+
"modelParameters" : [
|
78 |
+
|
79 |
+
],
|
80 |
+
"specificationVersion" : 7,
|
81 |
+
"mlProgramOperationTypeHistogram" : {
|
82 |
+
"Concat" : 18,
|
83 |
+
"Ios16.rsqrt" : 6,
|
84 |
+
"Ios16.mul" : 63,
|
85 |
+
"SliceByIndex" : 12,
|
86 |
+
"Ios16.constexprLutToDense" : 21,
|
87 |
+
"Ios16.conv" : 21,
|
88 |
+
"Ios16.add" : 21,
|
89 |
+
"Ios16.reduceMean" : 6,
|
90 |
+
"Ios16.matmul" : 6,
|
91 |
+
"Ios16.softmax" : 3,
|
92 |
+
"Ios16.reshape" : 12,
|
93 |
+
"Ios16.silu" : 3
|
94 |
+
},
|
95 |
+
"computePrecision" : "Mixed (Float16, Int32)",
|
96 |
+
"isUpdatable" : "0",
|
97 |
+
"availability" : {
|
98 |
+
"macOS" : "13.0",
|
99 |
+
"tvOS" : "16.0",
|
100 |
+
"visionOS" : "1.0",
|
101 |
+
"watchOS" : "9.0",
|
102 |
+
"iOS" : "16.0",
|
103 |
+
"macCatalyst" : "16.0"
|
104 |
+
},
|
105 |
+
"modelType" : {
|
106 |
+
"name" : "MLModelType_mlProgram"
|
107 |
+
},
|
108 |
+
"userDefinedMetadata" : {
|
109 |
+
"com.github.apple.coremltools.source_dialect" : "TorchScript",
|
110 |
+
"com.github.apple.coremltools.source" : "torch==2.1.0",
|
111 |
+
"com.github.apple.coremltools.version" : "7.2"
|
112 |
+
},
|
113 |
+
"inputSchema" : [
|
114 |
+
{
|
115 |
+
"hasShapeFlexibility" : "0",
|
116 |
+
"isOptional" : "0",
|
117 |
+
"dataType" : "Float16",
|
118 |
+
"formattedType" : "MultiArray (Float16 1 × 4096 × 1 × 64)",
|
119 |
+
"shortDescription" : "",
|
120 |
+
"shape" : "[1, 4096, 1, 64]",
|
121 |
+
"name" : "x",
|
122 |
+
"type" : "MultiArray"
|
123 |
+
},
|
124 |
+
{
|
125 |
+
"hasShapeFlexibility" : "0",
|
126 |
+
"isOptional" : "0",
|
127 |
+
"dataType" : "Float16",
|
128 |
+
"formattedType" : "MultiArray (Float16 128 × 64)",
|
129 |
+
"shortDescription" : "",
|
130 |
+
"shape" : "[128, 64]",
|
131 |
+
"name" : "cos",
|
132 |
+
"type" : "MultiArray"
|
133 |
+
},
|
134 |
+
{
|
135 |
+
"hasShapeFlexibility" : "0",
|
136 |
+
"isOptional" : "0",
|
137 |
+
"dataType" : "Float16",
|
138 |
+
"formattedType" : "MultiArray (Float16 128 × 64)",
|
139 |
+
"shortDescription" : "",
|
140 |
+
"shape" : "[128, 64]",
|
141 |
+
"name" : "sin",
|
142 |
+
"type" : "MultiArray"
|
143 |
+
},
|
144 |
+
{
|
145 |
+
"hasShapeFlexibility" : "0",
|
146 |
+
"isOptional" : "0",
|
147 |
+
"dataType" : "Float16",
|
148 |
+
"formattedType" : "MultiArray (Float16 1 × 1 × 64 × 512)",
|
149 |
+
"shortDescription" : "",
|
150 |
+
"shape" : "[1, 1, 64, 512]",
|
151 |
+
"name" : "mask",
|
152 |
+
"type" : "MultiArray"
|
153 |
+
},
|
154 |
+
{
|
155 |
+
"hasShapeFlexibility" : "0",
|
156 |
+
"isOptional" : "1",
|
157 |
+
"dataType" : "Float16",
|
158 |
+
"formattedType" : "MultiArray (Float16 1 × 32 × 128 × 448)?",
|
159 |
+
"shortDescription" : "",
|
160 |
+
"shape" : "[1, 32, 128, 448]",
|
161 |
+
"name" : "k_cache_0",
|
162 |
+
"type" : "MultiArray"
|
163 |
+
},
|
164 |
+
{
|
165 |
+
"hasShapeFlexibility" : "0",
|
166 |
+
"isOptional" : "1",
|
167 |
+
"dataType" : "Float16",
|
168 |
+
"formattedType" : "MultiArray (Float16 1 × 32 × 128 × 448)?",
|
169 |
+
"shortDescription" : "",
|
170 |
+
"shape" : "[1, 32, 128, 448]",
|
171 |
+
"name" : "v_cache_0",
|
172 |
+
"type" : "MultiArray"
|
173 |
+
},
|
174 |
+
{
|
175 |
+
"hasShapeFlexibility" : "0",
|
176 |
+
"isOptional" : "1",
|
177 |
+
"dataType" : "Float16",
|
178 |
+
"formattedType" : "MultiArray (Float16 1 × 32 × 128 × 448)?",
|
179 |
+
"shortDescription" : "",
|
180 |
+
"shape" : "[1, 32, 128, 448]",
|
181 |
+
"name" : "k_cache_1",
|
182 |
+
"type" : "MultiArray"
|
183 |
+
},
|
184 |
+
{
|
185 |
+
"hasShapeFlexibility" : "0",
|
186 |
+
"isOptional" : "1",
|
187 |
+
"dataType" : "Float16",
|
188 |
+
"formattedType" : "MultiArray (Float16 1 × 32 × 128 × 448)?",
|
189 |
+
"shortDescription" : "",
|
190 |
+
"shape" : "[1, 32, 128, 448]",
|
191 |
+
"name" : "v_cache_1",
|
192 |
+
"type" : "MultiArray"
|
193 |
+
},
|
194 |
+
{
|
195 |
+
"hasShapeFlexibility" : "0",
|
196 |
+
"isOptional" : "1",
|
197 |
+
"dataType" : "Float16",
|
198 |
+
"formattedType" : "MultiArray (Float16 1 × 32 × 128 × 448)?",
|
199 |
+
"shortDescription" : "",
|
200 |
+
"shape" : "[1, 32, 128, 448]",
|
201 |
+
"name" : "k_cache_2",
|
202 |
+
"type" : "MultiArray"
|
203 |
+
},
|
204 |
+
{
|
205 |
+
"hasShapeFlexibility" : "0",
|
206 |
+
"isOptional" : "1",
|
207 |
+
"dataType" : "Float16",
|
208 |
+
"formattedType" : "MultiArray (Float16 1 × 32 × 128 × 448)?",
|
209 |
+
"shortDescription" : "",
|
210 |
+
"shape" : "[1, 32, 128, 448]",
|
211 |
+
"name" : "v_cache_2",
|
212 |
+
"type" : "MultiArray"
|
213 |
+
}
|
214 |
+
],
|
215 |
+
"generatedClassName" : "Llama_2_7b_hf_2024_05_25_14_03_55_chunk3",
|
216 |
+
"method" : "predict"
|
217 |
+
}
|
218 |
+
]
|
Llama-2-7b-hf_chunk3.mlmodelc/model.mil
ADDED
@@ -0,0 +1,429 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
program(1.0)
|
2 |
+
[buildInfo = dict<tensor<string, []>, tensor<string, []>>({{"coremlc-component-MIL", "5.33.5"}, {"coremlc-version", "1877.40.3"}, {"coremltools-component-torch", "2.1.0"}, {"coremltools-source-dialect", "TorchScript"}, {"coremltools-version", "7.2"}})]
|
3 |
+
{
|
4 |
+
func main<ios16>(tensor<fp16, [128, 64]> cos, tensor<fp16, [1, 32, 128, 448]> k_cache_0, tensor<fp16, [1, 32, 128, 448]> k_cache_1, tensor<fp16, [1, 32, 128, 448]> k_cache_2, tensor<fp16, [1, 1, 64, 512]> mask, tensor<fp16, [128, 64]> sin, tensor<fp16, [1, 32, 128, 448]> v_cache_0, tensor<fp16, [1, 32, 128, 448]> v_cache_1, tensor<fp16, [1, 32, 128, 448]> v_cache_2, tensor<fp16, [1, 4096, 1, 64]> x) [CoreML_InputDefaultValues = dict<tensor<string, []>, tensor<fp32, []>>({{"k_cache_0", 0}, {"k_cache_1", 0}, {"k_cache_2", 0}, {"v_cache_0", 0}, {"v_cache_1", 0}, {"v_cache_2", 0}})] {
|
5 |
+
tensor<fp16, [4096, 4096, 1, 1]> blocks_0_attn_q_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [8388608]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(64))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(8388736))), name = tensor<string, []>("blocks_0_attn_q_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 4096, 1, 1])];
|
6 |
+
tensor<fp16, [4096, 4096, 1, 1]> blocks_0_attn_k_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [8388608]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(8388864))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(16777536))), name = tensor<string, []>("blocks_0_attn_k_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 4096, 1, 1])];
|
7 |
+
tensor<fp16, [4096, 4096, 1, 1]> blocks_0_attn_v_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [8388608]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(16777664))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(25166336))), name = tensor<string, []>("blocks_0_attn_v_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 4096, 1, 1])];
|
8 |
+
tensor<fp16, [4096, 4096, 1, 1]> blocks_0_attn_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [8388608]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(25166464))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(33555136))), name = tensor<string, []>("blocks_0_attn_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 4096, 1, 1])];
|
9 |
+
tensor<fp16, [11008, 4096, 1, 1]> blocks_0_mlp_fc_1_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [22544384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(33555264))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(56099712))), name = tensor<string, []>("blocks_0_mlp_fc_1_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([11008, 4096, 1, 1])];
|
10 |
+
tensor<fp16, [11008, 4096, 1, 1]> blocks_0_mlp_fc_2_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [22544384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(56099840))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(78644288))), name = tensor<string, []>("blocks_0_mlp_fc_2_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([11008, 4096, 1, 1])];
|
11 |
+
tensor<fp16, [4096, 11008, 1, 1]> blocks_0_mlp_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [22544384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(78644416))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(101188864))), name = tensor<string, []>("blocks_0_mlp_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 11008, 1, 1])];
|
12 |
+
tensor<fp16, [4096, 4096, 1, 1]> blocks_1_attn_q_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [8388608]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(101188992))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(109577664))), name = tensor<string, []>("blocks_1_attn_q_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 4096, 1, 1])];
|
13 |
+
tensor<fp16, [4096, 4096, 1, 1]> blocks_1_attn_k_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [8388608]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(109577792))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(117966464))), name = tensor<string, []>("blocks_1_attn_k_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 4096, 1, 1])];
|
14 |
+
tensor<fp16, [4096, 4096, 1, 1]> blocks_1_attn_v_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [8388608]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(117966592))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(126355264))), name = tensor<string, []>("blocks_1_attn_v_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 4096, 1, 1])];
|
15 |
+
tensor<fp16, [4096, 4096, 1, 1]> blocks_1_attn_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [8388608]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(126355392))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(134744064))), name = tensor<string, []>("blocks_1_attn_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 4096, 1, 1])];
|
16 |
+
tensor<fp16, [11008, 4096, 1, 1]> blocks_1_mlp_fc_1_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [22544384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(134744192))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(157288640))), name = tensor<string, []>("blocks_1_mlp_fc_1_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([11008, 4096, 1, 1])];
|
17 |
+
tensor<fp16, [11008, 4096, 1, 1]> blocks_1_mlp_fc_2_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [22544384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(157288768))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(179833216))), name = tensor<string, []>("blocks_1_mlp_fc_2_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([11008, 4096, 1, 1])];
|
18 |
+
tensor<fp16, [4096, 11008, 1, 1]> blocks_1_mlp_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [22544384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(179833344))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(202377792))), name = tensor<string, []>("blocks_1_mlp_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 11008, 1, 1])];
|
19 |
+
tensor<fp16, [4096, 4096, 1, 1]> blocks_2_attn_q_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [8388608]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(202377920))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(210766592))), name = tensor<string, []>("blocks_2_attn_q_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 4096, 1, 1])];
|
20 |
+
tensor<fp16, [4096, 4096, 1, 1]> blocks_2_attn_k_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [8388608]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(210766720))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(219155392))), name = tensor<string, []>("blocks_2_attn_k_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 4096, 1, 1])];
|
21 |
+
tensor<fp16, [4096, 4096, 1, 1]> blocks_2_attn_v_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [8388608]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(219155520))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(227544192))), name = tensor<string, []>("blocks_2_attn_v_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 4096, 1, 1])];
|
22 |
+
tensor<fp16, [4096, 4096, 1, 1]> blocks_2_attn_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [8388608]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(227544320))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(235932992))), name = tensor<string, []>("blocks_2_attn_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 4096, 1, 1])];
|
23 |
+
tensor<fp16, [11008, 4096, 1, 1]> blocks_2_mlp_fc_1_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [22544384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(235933120))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(258477568))), name = tensor<string, []>("blocks_2_mlp_fc_1_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([11008, 4096, 1, 1])];
|
24 |
+
tensor<fp16, [11008, 4096, 1, 1]> blocks_2_mlp_fc_2_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [22544384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(258477696))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(281022144))), name = tensor<string, []>("blocks_2_mlp_fc_2_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([11008, 4096, 1, 1])];
|
25 |
+
tensor<fp16, [4096, 11008, 1, 1]> blocks_2_mlp_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [22544384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(281022272))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303566720))), name = tensor<string, []>("blocks_2_mlp_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 11008, 1, 1])];
|
26 |
+
tensor<int32, []> var_18 = const()[name = tensor<string, []>("op_18"), val = tensor<int32, []>(3)];
|
27 |
+
tensor<int32, []> var_23 = const()[name = tensor<string, []>("op_23"), val = tensor<int32, []>(-2)];
|
28 |
+
tensor<int32, []> var_25 = const()[name = tensor<string, []>("op_25"), val = tensor<int32, []>(-1)];
|
29 |
+
tensor<int32, []> var_32 = const()[name = tensor<string, []>("op_32"), val = tensor<int32, []>(1)];
|
30 |
+
tensor<bool, []> var_33 = const()[name = tensor<string, []>("op_33"), val = tensor<bool, []>(true)];
|
31 |
+
tensor<fp16, [1, 4096, 1, 64]> var_41_cast_fp16 = mul(x = x, y = x)[name = tensor<string, []>("op_41_cast_fp16")];
|
32 |
+
tensor<int32, [1]> var_42 = const()[name = tensor<string, []>("op_42"), val = tensor<int32, [1]>([1])];
|
33 |
+
tensor<fp16, [1, 1, 1, 64]> norm_x_1_cast_fp16 = reduce_mean(axes = var_42, keep_dims = var_33, x = var_41_cast_fp16)[name = tensor<string, []>("norm_x_1_cast_fp16")];
|
34 |
+
tensor<fp16, []> var_44_to_fp16 = const()[name = tensor<string, []>("op_44_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
|
35 |
+
tensor<fp16, [1, 1, 1, 64]> var_45_cast_fp16 = add(x = norm_x_1_cast_fp16, y = var_44_to_fp16)[name = tensor<string, []>("op_45_cast_fp16")];
|
36 |
+
tensor<fp16, []> var_46_epsilon_0_to_fp16 = const()[name = tensor<string, []>("op_46_epsilon_0_to_fp16"), val = tensor<fp16, []>(0x1p-24)];
|
37 |
+
tensor<fp16, [1, 1, 1, 64]> var_46_cast_fp16 = rsqrt(epsilon = var_46_epsilon_0_to_fp16, x = var_45_cast_fp16)[name = tensor<string, []>("op_46_cast_fp16")];
|
38 |
+
tensor<fp16, [1, 4096, 1, 64]> x_normed_1_cast_fp16 = mul(x = x, y = var_46_cast_fp16)[name = tensor<string, []>("x_normed_1_cast_fp16")];
|
39 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_0_norm_1_weight_to_fp16 = const()[name = tensor<string, []>("blocks_0_norm_1_weight_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303566848)))];
|
40 |
+
tensor<fp16, [1, 4096, 1, 64]> x_5_cast_fp16 = mul(x = x_normed_1_cast_fp16, y = blocks_0_norm_1_weight_to_fp16)[name = tensor<string, []>("x_5_cast_fp16")];
|
41 |
+
tensor<int32, [2]> var_58 = const()[name = tensor<string, []>("op_58"), val = tensor<int32, [2]>([1, 1])];
|
42 |
+
tensor<int32, [2]> var_60 = const()[name = tensor<string, []>("op_60"), val = tensor<int32, [2]>([1, 1])];
|
43 |
+
tensor<string, []> var_62_pad_type_0 = const()[name = tensor<string, []>("op_62_pad_type_0"), val = tensor<string, []>("custom")];
|
44 |
+
tensor<int32, [4]> var_62_pad_0 = const()[name = tensor<string, []>("op_62_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
45 |
+
tensor<fp16, [1, 4096, 1, 64]> var_62_cast_fp16 = conv(dilations = var_60, groups = var_32, pad = var_62_pad_0, pad_type = var_62_pad_type_0, strides = var_58, weight = blocks_0_attn_q_proj_weight_palettized_cast_fp16, x = x_5_cast_fp16)[name = tensor<string, []>("op_62_cast_fp16")];
|
46 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_0_attn_q_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_0_attn_q_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303575104)))];
|
47 |
+
tensor<fp16, [1, 4096, 1, 64]> q_1_cast_fp16 = mul(x = var_62_cast_fp16, y = blocks_0_attn_q_proj_output_scales_to_fp16)[name = tensor<string, []>("q_1_cast_fp16")];
|
48 |
+
tensor<int32, [2]> var_66 = const()[name = tensor<string, []>("op_66"), val = tensor<int32, [2]>([1, 1])];
|
49 |
+
tensor<int32, [2]> var_68 = const()[name = tensor<string, []>("op_68"), val = tensor<int32, [2]>([1, 1])];
|
50 |
+
tensor<string, []> var_70_pad_type_0 = const()[name = tensor<string, []>("op_70_pad_type_0"), val = tensor<string, []>("custom")];
|
51 |
+
tensor<int32, [4]> var_70_pad_0 = const()[name = tensor<string, []>("op_70_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
52 |
+
tensor<fp16, [1, 4096, 1, 64]> var_70_cast_fp16 = conv(dilations = var_68, groups = var_32, pad = var_70_pad_0, pad_type = var_70_pad_type_0, strides = var_66, weight = blocks_0_attn_k_proj_weight_palettized_cast_fp16, x = x_5_cast_fp16)[name = tensor<string, []>("op_70_cast_fp16")];
|
53 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_0_attn_k_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_0_attn_k_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303583360)))];
|
54 |
+
tensor<fp16, [1, 4096, 1, 64]> k_1_cast_fp16 = mul(x = var_70_cast_fp16, y = blocks_0_attn_k_proj_output_scales_to_fp16)[name = tensor<string, []>("k_1_cast_fp16")];
|
55 |
+
tensor<int32, [2]> var_74 = const()[name = tensor<string, []>("op_74"), val = tensor<int32, [2]>([1, 1])];
|
56 |
+
tensor<int32, [2]> var_76 = const()[name = tensor<string, []>("op_76"), val = tensor<int32, [2]>([1, 1])];
|
57 |
+
tensor<string, []> var_78_pad_type_0 = const()[name = tensor<string, []>("op_78_pad_type_0"), val = tensor<string, []>("custom")];
|
58 |
+
tensor<int32, [4]> var_78_pad_0 = const()[name = tensor<string, []>("op_78_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
59 |
+
tensor<fp16, [1, 4096, 1, 64]> var_78_cast_fp16 = conv(dilations = var_76, groups = var_32, pad = var_78_pad_0, pad_type = var_78_pad_type_0, strides = var_74, weight = blocks_0_attn_v_proj_weight_palettized_cast_fp16, x = x_5_cast_fp16)[name = tensor<string, []>("op_78_cast_fp16")];
|
60 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_0_attn_v_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_0_attn_v_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303591616)))];
|
61 |
+
tensor<fp16, [1, 4096, 1, 64]> v_1_cast_fp16 = mul(x = var_78_cast_fp16, y = blocks_0_attn_v_proj_output_scales_to_fp16)[name = tensor<string, []>("v_1_cast_fp16")];
|
62 |
+
tensor<int32, [4]> var_80 = const()[name = tensor<string, []>("op_80"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
63 |
+
tensor<fp16, [1, 32, 128, 64]> q_3_cast_fp16 = reshape(shape = var_80, x = q_1_cast_fp16)[name = tensor<string, []>("q_3_cast_fp16")];
|
64 |
+
tensor<int32, [4]> var_82 = const()[name = tensor<string, []>("op_82"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
65 |
+
tensor<fp16, [1, 32, 128, 64]> k_3_cast_fp16 = reshape(shape = var_82, x = k_1_cast_fp16)[name = tensor<string, []>("k_3_cast_fp16")];
|
66 |
+
tensor<int32, [4]> var_84 = const()[name = tensor<string, []>("op_84"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
67 |
+
tensor<fp16, [1, 32, 128, 64]> new_v_cache_0 = reshape(shape = var_84, x = v_1_cast_fp16)[name = tensor<string, []>("v_3_cast_fp16")];
|
68 |
+
tensor<int32, [4]> var_96_begin_0 = const()[name = tensor<string, []>("op_96_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
69 |
+
tensor<int32, [4]> var_96_end_0 = const()[name = tensor<string, []>("op_96_end_0"), val = tensor<int32, [4]>([1, 32, 64, 64])];
|
70 |
+
tensor<bool, [4]> var_96_end_mask_0 = const()[name = tensor<string, []>("op_96_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
|
71 |
+
tensor<fp16, [1, 32, 64, 64]> var_96_cast_fp16 = slice_by_index(begin = var_96_begin_0, end = var_96_end_0, end_mask = var_96_end_mask_0, x = q_3_cast_fp16)[name = tensor<string, []>("op_96_cast_fp16")];
|
72 |
+
tensor<int32, [4]> var_102_begin_0 = const()[name = tensor<string, []>("op_102_begin_0"), val = tensor<int32, [4]>([0, 0, 64, 0])];
|
73 |
+
tensor<int32, [4]> var_102_end_0 = const()[name = tensor<string, []>("op_102_end_0"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
74 |
+
tensor<bool, [4]> var_102_end_mask_0 = const()[name = tensor<string, []>("op_102_end_mask_0"), val = tensor<bool, [4]>([true, true, true, true])];
|
75 |
+
tensor<fp16, [1, 32, 64, 64]> var_102_cast_fp16 = slice_by_index(begin = var_102_begin_0, end = var_102_end_0, end_mask = var_102_end_mask_0, x = q_3_cast_fp16)[name = tensor<string, []>("op_102_cast_fp16")];
|
76 |
+
tensor<fp16, []> const_3_promoted_to_fp16 = const()[name = tensor<string, []>("const_3_promoted_to_fp16"), val = tensor<fp16, []>(-0x1p+0)];
|
77 |
+
tensor<fp16, [1, 32, 64, 64]> var_104_cast_fp16 = mul(x = var_102_cast_fp16, y = const_3_promoted_to_fp16)[name = tensor<string, []>("op_104_cast_fp16")];
|
78 |
+
tensor<bool, []> rotated_1_interleave_0 = const()[name = tensor<string, []>("rotated_1_interleave_0"), val = tensor<bool, []>(false)];
|
79 |
+
tensor<fp16, [1, 32, 128, 64]> rotated_1_cast_fp16 = concat(axis = var_23, interleave = rotated_1_interleave_0, values = (var_104_cast_fp16, var_96_cast_fp16))[name = tensor<string, []>("rotated_1_cast_fp16")];
|
80 |
+
tensor<fp16, [1, 32, 128, 64]> var_107_cast_fp16 = mul(x = q_3_cast_fp16, y = cos)[name = tensor<string, []>("op_107_cast_fp16")];
|
81 |
+
tensor<fp16, [1, 32, 128, 64]> var_108_cast_fp16 = mul(x = rotated_1_cast_fp16, y = sin)[name = tensor<string, []>("op_108_cast_fp16")];
|
82 |
+
tensor<fp16, [1, 32, 128, 64]> roped_1_cast_fp16 = add(x = var_107_cast_fp16, y = var_108_cast_fp16)[name = tensor<string, []>("roped_1_cast_fp16")];
|
83 |
+
tensor<int32, [4]> var_121_begin_0 = const()[name = tensor<string, []>("op_121_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
84 |
+
tensor<int32, [4]> var_121_end_0 = const()[name = tensor<string, []>("op_121_end_0"), val = tensor<int32, [4]>([1, 32, 64, 64])];
|
85 |
+
tensor<bool, [4]> var_121_end_mask_0 = const()[name = tensor<string, []>("op_121_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
|
86 |
+
tensor<fp16, [1, 32, 64, 64]> var_121_cast_fp16 = slice_by_index(begin = var_121_begin_0, end = var_121_end_0, end_mask = var_121_end_mask_0, x = k_3_cast_fp16)[name = tensor<string, []>("op_121_cast_fp16")];
|
87 |
+
tensor<int32, [4]> var_127_begin_0 = const()[name = tensor<string, []>("op_127_begin_0"), val = tensor<int32, [4]>([0, 0, 64, 0])];
|
88 |
+
tensor<int32, [4]> var_127_end_0 = const()[name = tensor<string, []>("op_127_end_0"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
89 |
+
tensor<bool, [4]> var_127_end_mask_0 = const()[name = tensor<string, []>("op_127_end_mask_0"), val = tensor<bool, [4]>([true, true, true, true])];
|
90 |
+
tensor<fp16, [1, 32, 64, 64]> var_127_cast_fp16 = slice_by_index(begin = var_127_begin_0, end = var_127_end_0, end_mask = var_127_end_mask_0, x = k_3_cast_fp16)[name = tensor<string, []>("op_127_cast_fp16")];
|
91 |
+
tensor<fp16, []> const_5_promoted_to_fp16 = const()[name = tensor<string, []>("const_5_promoted_to_fp16"), val = tensor<fp16, []>(-0x1p+0)];
|
92 |
+
tensor<fp16, [1, 32, 64, 64]> var_129_cast_fp16 = mul(x = var_127_cast_fp16, y = const_5_promoted_to_fp16)[name = tensor<string, []>("op_129_cast_fp16")];
|
93 |
+
tensor<bool, []> rotated_3_interleave_0 = const()[name = tensor<string, []>("rotated_3_interleave_0"), val = tensor<bool, []>(false)];
|
94 |
+
tensor<fp16, [1, 32, 128, 64]> rotated_3_cast_fp16 = concat(axis = var_23, interleave = rotated_3_interleave_0, values = (var_129_cast_fp16, var_121_cast_fp16))[name = tensor<string, []>("rotated_3_cast_fp16")];
|
95 |
+
tensor<fp16, [1, 32, 128, 64]> var_132_cast_fp16 = mul(x = k_3_cast_fp16, y = cos)[name = tensor<string, []>("op_132_cast_fp16")];
|
96 |
+
tensor<fp16, [1, 32, 128, 64]> var_133_cast_fp16 = mul(x = rotated_3_cast_fp16, y = sin)[name = tensor<string, []>("op_133_cast_fp16")];
|
97 |
+
tensor<fp16, [1, 32, 128, 64]> roped_3_cast_fp16 = add(x = var_132_cast_fp16, y = var_133_cast_fp16)[name = tensor<string, []>("roped_3_cast_fp16")];
|
98 |
+
tensor<bool, []> q_5_interleave_0 = const()[name = tensor<string, []>("q_5_interleave_0"), val = tensor<bool, []>(false)];
|
99 |
+
tensor<fp16, [1, 32, 128, 64]> q_5_cast_fp16 = concat(axis = var_23, interleave = q_5_interleave_0, values = roped_1_cast_fp16)[name = tensor<string, []>("q_5_cast_fp16")];
|
100 |
+
tensor<bool, []> k_5_interleave_0 = const()[name = tensor<string, []>("k_5_interleave_0"), val = tensor<bool, []>(false)];
|
101 |
+
tensor<fp16, [1, 32, 128, 64]> new_k_cache_0 = concat(axis = var_23, interleave = k_5_interleave_0, values = roped_3_cast_fp16)[name = tensor<string, []>("k_5_cast_fp16")];
|
102 |
+
tensor<bool, []> k_7_interleave_0 = const()[name = tensor<string, []>("k_7_interleave_0"), val = tensor<bool, []>(false)];
|
103 |
+
tensor<fp16, [1, 32, 128, 512]> k_7_cast_fp16 = concat(axis = var_25, interleave = k_7_interleave_0, values = (k_cache_0, new_k_cache_0))[name = tensor<string, []>("k_7_cast_fp16")];
|
104 |
+
tensor<bool, []> v_5_interleave_0 = const()[name = tensor<string, []>("v_5_interleave_0"), val = tensor<bool, []>(false)];
|
105 |
+
tensor<fp16, [1, 32, 128, 512]> v_5_cast_fp16 = concat(axis = var_25, interleave = v_5_interleave_0, values = (v_cache_0, new_v_cache_0))[name = tensor<string, []>("v_5_cast_fp16")];
|
106 |
+
tensor<fp16, []> var_155_to_fp16 = const()[name = tensor<string, []>("op_155_to_fp16"), val = tensor<fp16, []>(0x1.6ap-4)];
|
107 |
+
tensor<fp16, [1, 32, 128, 64]> var_156_cast_fp16 = mul(x = q_5_cast_fp16, y = var_155_to_fp16)[name = tensor<string, []>("op_156_cast_fp16")];
|
108 |
+
tensor<bool, []> attn_weights_1_transpose_x_0 = const()[name = tensor<string, []>("attn_weights_1_transpose_x_0"), val = tensor<bool, []>(true)];
|
109 |
+
tensor<bool, []> attn_weights_1_transpose_y_0 = const()[name = tensor<string, []>("attn_weights_1_transpose_y_0"), val = tensor<bool, []>(false)];
|
110 |
+
tensor<fp16, [1, 32, 64, 512]> attn_weights_1_cast_fp16 = matmul(transpose_x = attn_weights_1_transpose_x_0, transpose_y = attn_weights_1_transpose_y_0, x = var_156_cast_fp16, y = k_7_cast_fp16)[name = tensor<string, []>("attn_weights_1_cast_fp16")];
|
111 |
+
tensor<fp16, [1, 32, 64, 512]> attn_weights_3_cast_fp16 = add(x = attn_weights_1_cast_fp16, y = mask)[name = tensor<string, []>("attn_weights_3_cast_fp16")];
|
112 |
+
tensor<fp16, [1, 32, 64, 512]> var_164_cast_fp16 = softmax(axis = var_18, x = attn_weights_3_cast_fp16)[name = tensor<string, []>("op_164_cast_fp16")];
|
113 |
+
tensor<bool, []> attn_1_transpose_x_0 = const()[name = tensor<string, []>("attn_1_transpose_x_0"), val = tensor<bool, []>(false)];
|
114 |
+
tensor<bool, []> attn_1_transpose_y_0 = const()[name = tensor<string, []>("attn_1_transpose_y_0"), val = tensor<bool, []>(true)];
|
115 |
+
tensor<fp16, [1, 32, 128, 64]> attn_1_cast_fp16 = matmul(transpose_x = attn_1_transpose_x_0, transpose_y = attn_1_transpose_y_0, x = v_5_cast_fp16, y = var_164_cast_fp16)[name = tensor<string, []>("attn_1_cast_fp16")];
|
116 |
+
tensor<int32, [4]> var_168 = const()[name = tensor<string, []>("op_168"), val = tensor<int32, [4]>([1, 4096, 1, -1])];
|
117 |
+
tensor<fp16, [1, 4096, 1, 64]> input_1_cast_fp16 = reshape(shape = var_168, x = attn_1_cast_fp16)[name = tensor<string, []>("input_1_cast_fp16")];
|
118 |
+
tensor<int32, [2]> var_172 = const()[name = tensor<string, []>("op_172"), val = tensor<int32, [2]>([1, 1])];
|
119 |
+
tensor<int32, [2]> var_174 = const()[name = tensor<string, []>("op_174"), val = tensor<int32, [2]>([1, 1])];
|
120 |
+
tensor<string, []> var_176_pad_type_0 = const()[name = tensor<string, []>("op_176_pad_type_0"), val = tensor<string, []>("custom")];
|
121 |
+
tensor<int32, [4]> var_176_pad_0 = const()[name = tensor<string, []>("op_176_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
122 |
+
tensor<fp16, [1, 4096, 1, 64]> var_176_cast_fp16 = conv(dilations = var_174, groups = var_32, pad = var_176_pad_0, pad_type = var_176_pad_type_0, strides = var_172, weight = blocks_0_attn_proj_weight_palettized_cast_fp16, x = input_1_cast_fp16)[name = tensor<string, []>("op_176_cast_fp16")];
|
123 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_0_attn_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_0_attn_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303599872)))];
|
124 |
+
tensor<fp16, [1, 4096, 1, 64]> attention_output_1_cast_fp16 = mul(x = var_176_cast_fp16, y = blocks_0_attn_proj_output_scales_to_fp16)[name = tensor<string, []>("attention_output_1_cast_fp16")];
|
125 |
+
tensor<fp16, [1, 4096, 1, 64]> x_11_cast_fp16 = add(x = attention_output_1_cast_fp16, y = x)[name = tensor<string, []>("x_11_cast_fp16")];
|
126 |
+
tensor<fp16, [1, 4096, 1, 64]> var_185_cast_fp16 = mul(x = x_11_cast_fp16, y = x_11_cast_fp16)[name = tensor<string, []>("op_185_cast_fp16")];
|
127 |
+
tensor<int32, [1]> var_186 = const()[name = tensor<string, []>("op_186"), val = tensor<int32, [1]>([1])];
|
128 |
+
tensor<fp16, [1, 1, 1, 64]> norm_x_3_cast_fp16 = reduce_mean(axes = var_186, keep_dims = var_33, x = var_185_cast_fp16)[name = tensor<string, []>("norm_x_3_cast_fp16")];
|
129 |
+
tensor<fp16, []> var_188_to_fp16 = const()[name = tensor<string, []>("op_188_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
|
130 |
+
tensor<fp16, [1, 1, 1, 64]> var_189_cast_fp16 = add(x = norm_x_3_cast_fp16, y = var_188_to_fp16)[name = tensor<string, []>("op_189_cast_fp16")];
|
131 |
+
tensor<fp16, []> var_190_epsilon_0_to_fp16 = const()[name = tensor<string, []>("op_190_epsilon_0_to_fp16"), val = tensor<fp16, []>(0x1p-24)];
|
132 |
+
tensor<fp16, [1, 1, 1, 64]> var_190_cast_fp16 = rsqrt(epsilon = var_190_epsilon_0_to_fp16, x = var_189_cast_fp16)[name = tensor<string, []>("op_190_cast_fp16")];
|
133 |
+
tensor<fp16, [1, 4096, 1, 64]> x_normed_5_cast_fp16 = mul(x = x_11_cast_fp16, y = var_190_cast_fp16)[name = tensor<string, []>("x_normed_5_cast_fp16")];
|
134 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_0_norm_2_weight_to_fp16 = const()[name = tensor<string, []>("blocks_0_norm_2_weight_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303608128)))];
|
135 |
+
tensor<fp16, [1, 4096, 1, 64]> input_3_cast_fp16 = mul(x = x_normed_5_cast_fp16, y = blocks_0_norm_2_weight_to_fp16)[name = tensor<string, []>("input_3_cast_fp16")];
|
136 |
+
tensor<int32, [2]> var_202 = const()[name = tensor<string, []>("op_202"), val = tensor<int32, [2]>([1, 1])];
|
137 |
+
tensor<int32, [2]> var_204 = const()[name = tensor<string, []>("op_204"), val = tensor<int32, [2]>([1, 1])];
|
138 |
+
tensor<string, []> var_206_pad_type_0 = const()[name = tensor<string, []>("op_206_pad_type_0"), val = tensor<string, []>("custom")];
|
139 |
+
tensor<int32, [4]> var_206_pad_0 = const()[name = tensor<string, []>("op_206_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
140 |
+
tensor<fp16, [1, 11008, 1, 64]> var_206_cast_fp16 = conv(dilations = var_204, groups = var_32, pad = var_206_pad_0, pad_type = var_206_pad_type_0, strides = var_202, weight = blocks_0_mlp_fc_1_weight_palettized_cast_fp16, x = input_3_cast_fp16)[name = tensor<string, []>("op_206_cast_fp16")];
|
141 |
+
tensor<fp16, [1, 11008, 1, 1]> blocks_0_mlp_fc_1_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_0_mlp_fc_1_output_scales_to_fp16"), val = tensor<fp16, [1, 11008, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303616384)))];
|
142 |
+
tensor<fp16, [1, 11008, 1, 64]> input_5_cast_fp16 = mul(x = var_206_cast_fp16, y = blocks_0_mlp_fc_1_output_scales_to_fp16)[name = tensor<string, []>("input_5_cast_fp16")];
|
143 |
+
tensor<int32, [2]> var_210 = const()[name = tensor<string, []>("op_210"), val = tensor<int32, [2]>([1, 1])];
|
144 |
+
tensor<int32, [2]> var_212 = const()[name = tensor<string, []>("op_212"), val = tensor<int32, [2]>([1, 1])];
|
145 |
+
tensor<string, []> var_214_pad_type_0 = const()[name = tensor<string, []>("op_214_pad_type_0"), val = tensor<string, []>("custom")];
|
146 |
+
tensor<int32, [4]> var_214_pad_0 = const()[name = tensor<string, []>("op_214_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
147 |
+
tensor<fp16, [1, 11008, 1, 64]> var_214_cast_fp16 = conv(dilations = var_212, groups = var_32, pad = var_214_pad_0, pad_type = var_214_pad_type_0, strides = var_210, weight = blocks_0_mlp_fc_2_weight_palettized_cast_fp16, x = input_3_cast_fp16)[name = tensor<string, []>("op_214_cast_fp16")];
|
148 |
+
tensor<fp16, [1, 11008, 1, 1]> blocks_0_mlp_fc_2_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_0_mlp_fc_2_output_scales_to_fp16"), val = tensor<fp16, [1, 11008, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303638464)))];
|
149 |
+
tensor<fp16, [1, 11008, 1, 64]> x_fc_2_1_cast_fp16 = mul(x = var_214_cast_fp16, y = blocks_0_mlp_fc_2_output_scales_to_fp16)[name = tensor<string, []>("x_fc_2_1_cast_fp16")];
|
150 |
+
tensor<fp16, [1, 11008, 1, 64]> var_216_cast_fp16 = silu(x = input_5_cast_fp16)[name = tensor<string, []>("op_216_cast_fp16")];
|
151 |
+
tensor<fp16, [1, 11008, 1, 64]> input_7_cast_fp16 = mul(x = var_216_cast_fp16, y = x_fc_2_1_cast_fp16)[name = tensor<string, []>("input_7_cast_fp16")];
|
152 |
+
tensor<int32, [2]> var_220 = const()[name = tensor<string, []>("op_220"), val = tensor<int32, [2]>([1, 1])];
|
153 |
+
tensor<int32, [2]> var_222 = const()[name = tensor<string, []>("op_222"), val = tensor<int32, [2]>([1, 1])];
|
154 |
+
tensor<string, []> var_224_pad_type_0 = const()[name = tensor<string, []>("op_224_pad_type_0"), val = tensor<string, []>("custom")];
|
155 |
+
tensor<int32, [4]> var_224_pad_0 = const()[name = tensor<string, []>("op_224_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
156 |
+
tensor<fp16, [1, 4096, 1, 64]> var_224_cast_fp16 = conv(dilations = var_222, groups = var_32, pad = var_224_pad_0, pad_type = var_224_pad_type_0, strides = var_220, weight = blocks_0_mlp_proj_weight_palettized_cast_fp16, x = input_7_cast_fp16)[name = tensor<string, []>("op_224_cast_fp16")];
|
157 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_0_mlp_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_0_mlp_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303660544)))];
|
158 |
+
tensor<fp16, [1, 4096, 1, 64]> var_225_cast_fp16 = mul(x = var_224_cast_fp16, y = blocks_0_mlp_proj_output_scales_to_fp16)[name = tensor<string, []>("op_225_cast_fp16")];
|
159 |
+
tensor<fp16, [1, 4096, 1, 64]> x_15_cast_fp16 = add(x = var_225_cast_fp16, y = x_11_cast_fp16)[name = tensor<string, []>("x_15_cast_fp16")];
|
160 |
+
tensor<int32, []> var_232 = const()[name = tensor<string, []>("op_232"), val = tensor<int32, []>(3)];
|
161 |
+
tensor<int32, []> var_237 = const()[name = tensor<string, []>("op_237"), val = tensor<int32, []>(-2)];
|
162 |
+
tensor<int32, []> var_239 = const()[name = tensor<string, []>("op_239"), val = tensor<int32, []>(-1)];
|
163 |
+
tensor<int32, []> var_246 = const()[name = tensor<string, []>("op_246"), val = tensor<int32, []>(1)];
|
164 |
+
tensor<bool, []> var_247 = const()[name = tensor<string, []>("op_247"), val = tensor<bool, []>(true)];
|
165 |
+
tensor<fp16, [1, 4096, 1, 64]> var_254_cast_fp16 = mul(x = x_15_cast_fp16, y = x_15_cast_fp16)[name = tensor<string, []>("op_254_cast_fp16")];
|
166 |
+
tensor<int32, [1]> var_255 = const()[name = tensor<string, []>("op_255"), val = tensor<int32, [1]>([1])];
|
167 |
+
tensor<fp16, [1, 1, 1, 64]> norm_x_5_cast_fp16 = reduce_mean(axes = var_255, keep_dims = var_247, x = var_254_cast_fp16)[name = tensor<string, []>("norm_x_5_cast_fp16")];
|
168 |
+
tensor<fp16, []> var_257_to_fp16 = const()[name = tensor<string, []>("op_257_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
|
169 |
+
tensor<fp16, [1, 1, 1, 64]> var_258_cast_fp16 = add(x = norm_x_5_cast_fp16, y = var_257_to_fp16)[name = tensor<string, []>("op_258_cast_fp16")];
|
170 |
+
tensor<fp16, []> var_259_epsilon_0_to_fp16 = const()[name = tensor<string, []>("op_259_epsilon_0_to_fp16"), val = tensor<fp16, []>(0x1p-24)];
|
171 |
+
tensor<fp16, [1, 1, 1, 64]> var_259_cast_fp16 = rsqrt(epsilon = var_259_epsilon_0_to_fp16, x = var_258_cast_fp16)[name = tensor<string, []>("op_259_cast_fp16")];
|
172 |
+
tensor<fp16, [1, 4096, 1, 64]> x_normed_9_cast_fp16 = mul(x = x_15_cast_fp16, y = var_259_cast_fp16)[name = tensor<string, []>("x_normed_9_cast_fp16")];
|
173 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_1_norm_1_weight_to_fp16 = const()[name = tensor<string, []>("blocks_1_norm_1_weight_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303668800)))];
|
174 |
+
tensor<fp16, [1, 4096, 1, 64]> x_19_cast_fp16 = mul(x = x_normed_9_cast_fp16, y = blocks_1_norm_1_weight_to_fp16)[name = tensor<string, []>("x_19_cast_fp16")];
|
175 |
+
tensor<int32, [2]> var_274 = const()[name = tensor<string, []>("op_274"), val = tensor<int32, [2]>([1, 1])];
|
176 |
+
tensor<int32, [2]> var_276 = const()[name = tensor<string, []>("op_276"), val = tensor<int32, [2]>([1, 1])];
|
177 |
+
tensor<string, []> var_278_pad_type_0 = const()[name = tensor<string, []>("op_278_pad_type_0"), val = tensor<string, []>("custom")];
|
178 |
+
tensor<int32, [4]> var_278_pad_0 = const()[name = tensor<string, []>("op_278_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
179 |
+
tensor<fp16, [1, 4096, 1, 64]> var_278_cast_fp16 = conv(dilations = var_276, groups = var_246, pad = var_278_pad_0, pad_type = var_278_pad_type_0, strides = var_274, weight = blocks_1_attn_q_proj_weight_palettized_cast_fp16, x = x_19_cast_fp16)[name = tensor<string, []>("op_278_cast_fp16")];
|
180 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_1_attn_q_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_1_attn_q_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303677056)))];
|
181 |
+
tensor<fp16, [1, 4096, 1, 64]> q_7_cast_fp16 = mul(x = var_278_cast_fp16, y = blocks_1_attn_q_proj_output_scales_to_fp16)[name = tensor<string, []>("q_7_cast_fp16")];
|
182 |
+
tensor<int32, [2]> var_282 = const()[name = tensor<string, []>("op_282"), val = tensor<int32, [2]>([1, 1])];
|
183 |
+
tensor<int32, [2]> var_284 = const()[name = tensor<string, []>("op_284"), val = tensor<int32, [2]>([1, 1])];
|
184 |
+
tensor<string, []> var_286_pad_type_0 = const()[name = tensor<string, []>("op_286_pad_type_0"), val = tensor<string, []>("custom")];
|
185 |
+
tensor<int32, [4]> var_286_pad_0 = const()[name = tensor<string, []>("op_286_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
186 |
+
tensor<fp16, [1, 4096, 1, 64]> var_286_cast_fp16 = conv(dilations = var_284, groups = var_246, pad = var_286_pad_0, pad_type = var_286_pad_type_0, strides = var_282, weight = blocks_1_attn_k_proj_weight_palettized_cast_fp16, x = x_19_cast_fp16)[name = tensor<string, []>("op_286_cast_fp16")];
|
187 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_1_attn_k_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_1_attn_k_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303685312)))];
|
188 |
+
tensor<fp16, [1, 4096, 1, 64]> k_9_cast_fp16 = mul(x = var_286_cast_fp16, y = blocks_1_attn_k_proj_output_scales_to_fp16)[name = tensor<string, []>("k_9_cast_fp16")];
|
189 |
+
tensor<int32, [2]> var_290 = const()[name = tensor<string, []>("op_290"), val = tensor<int32, [2]>([1, 1])];
|
190 |
+
tensor<int32, [2]> var_292 = const()[name = tensor<string, []>("op_292"), val = tensor<int32, [2]>([1, 1])];
|
191 |
+
tensor<string, []> var_294_pad_type_0 = const()[name = tensor<string, []>("op_294_pad_type_0"), val = tensor<string, []>("custom")];
|
192 |
+
tensor<int32, [4]> var_294_pad_0 = const()[name = tensor<string, []>("op_294_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
193 |
+
tensor<fp16, [1, 4096, 1, 64]> var_294_cast_fp16 = conv(dilations = var_292, groups = var_246, pad = var_294_pad_0, pad_type = var_294_pad_type_0, strides = var_290, weight = blocks_1_attn_v_proj_weight_palettized_cast_fp16, x = x_19_cast_fp16)[name = tensor<string, []>("op_294_cast_fp16")];
|
194 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_1_attn_v_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_1_attn_v_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303693568)))];
|
195 |
+
tensor<fp16, [1, 4096, 1, 64]> v_7_cast_fp16 = mul(x = var_294_cast_fp16, y = blocks_1_attn_v_proj_output_scales_to_fp16)[name = tensor<string, []>("v_7_cast_fp16")];
|
196 |
+
tensor<int32, [4]> var_296 = const()[name = tensor<string, []>("op_296"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
197 |
+
tensor<fp16, [1, 32, 128, 64]> q_9_cast_fp16 = reshape(shape = var_296, x = q_7_cast_fp16)[name = tensor<string, []>("q_9_cast_fp16")];
|
198 |
+
tensor<int32, [4]> var_298 = const()[name = tensor<string, []>("op_298"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
199 |
+
tensor<fp16, [1, 32, 128, 64]> k_11_cast_fp16 = reshape(shape = var_298, x = k_9_cast_fp16)[name = tensor<string, []>("k_11_cast_fp16")];
|
200 |
+
tensor<int32, [4]> var_300 = const()[name = tensor<string, []>("op_300"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
201 |
+
tensor<fp16, [1, 32, 128, 64]> new_v_cache_1 = reshape(shape = var_300, x = v_7_cast_fp16)[name = tensor<string, []>("v_9_cast_fp16")];
|
202 |
+
tensor<int32, [4]> var_312_begin_0 = const()[name = tensor<string, []>("op_312_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
203 |
+
tensor<int32, [4]> var_312_end_0 = const()[name = tensor<string, []>("op_312_end_0"), val = tensor<int32, [4]>([1, 32, 64, 64])];
|
204 |
+
tensor<bool, [4]> var_312_end_mask_0 = const()[name = tensor<string, []>("op_312_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
|
205 |
+
tensor<fp16, [1, 32, 64, 64]> var_312_cast_fp16 = slice_by_index(begin = var_312_begin_0, end = var_312_end_0, end_mask = var_312_end_mask_0, x = q_9_cast_fp16)[name = tensor<string, []>("op_312_cast_fp16")];
|
206 |
+
tensor<int32, [4]> var_318_begin_0 = const()[name = tensor<string, []>("op_318_begin_0"), val = tensor<int32, [4]>([0, 0, 64, 0])];
|
207 |
+
tensor<int32, [4]> var_318_end_0 = const()[name = tensor<string, []>("op_318_end_0"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
208 |
+
tensor<bool, [4]> var_318_end_mask_0 = const()[name = tensor<string, []>("op_318_end_mask_0"), val = tensor<bool, [4]>([true, true, true, true])];
|
209 |
+
tensor<fp16, [1, 32, 64, 64]> var_318_cast_fp16 = slice_by_index(begin = var_318_begin_0, end = var_318_end_0, end_mask = var_318_end_mask_0, x = q_9_cast_fp16)[name = tensor<string, []>("op_318_cast_fp16")];
|
210 |
+
tensor<fp16, []> const_10_promoted_to_fp16 = const()[name = tensor<string, []>("const_10_promoted_to_fp16"), val = tensor<fp16, []>(-0x1p+0)];
|
211 |
+
tensor<fp16, [1, 32, 64, 64]> var_320_cast_fp16 = mul(x = var_318_cast_fp16, y = const_10_promoted_to_fp16)[name = tensor<string, []>("op_320_cast_fp16")];
|
212 |
+
tensor<bool, []> rotated_5_interleave_0 = const()[name = tensor<string, []>("rotated_5_interleave_0"), val = tensor<bool, []>(false)];
|
213 |
+
tensor<fp16, [1, 32, 128, 64]> rotated_5_cast_fp16 = concat(axis = var_237, interleave = rotated_5_interleave_0, values = (var_320_cast_fp16, var_312_cast_fp16))[name = tensor<string, []>("rotated_5_cast_fp16")];
|
214 |
+
tensor<fp16, [1, 32, 128, 64]> var_323_cast_fp16 = mul(x = q_9_cast_fp16, y = cos)[name = tensor<string, []>("op_323_cast_fp16")];
|
215 |
+
tensor<fp16, [1, 32, 128, 64]> var_324_cast_fp16 = mul(x = rotated_5_cast_fp16, y = sin)[name = tensor<string, []>("op_324_cast_fp16")];
|
216 |
+
tensor<fp16, [1, 32, 128, 64]> roped_5_cast_fp16 = add(x = var_323_cast_fp16, y = var_324_cast_fp16)[name = tensor<string, []>("roped_5_cast_fp16")];
|
217 |
+
tensor<int32, [4]> var_337_begin_0 = const()[name = tensor<string, []>("op_337_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
218 |
+
tensor<int32, [4]> var_337_end_0 = const()[name = tensor<string, []>("op_337_end_0"), val = tensor<int32, [4]>([1, 32, 64, 64])];
|
219 |
+
tensor<bool, [4]> var_337_end_mask_0 = const()[name = tensor<string, []>("op_337_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
|
220 |
+
tensor<fp16, [1, 32, 64, 64]> var_337_cast_fp16 = slice_by_index(begin = var_337_begin_0, end = var_337_end_0, end_mask = var_337_end_mask_0, x = k_11_cast_fp16)[name = tensor<string, []>("op_337_cast_fp16")];
|
221 |
+
tensor<int32, [4]> var_343_begin_0 = const()[name = tensor<string, []>("op_343_begin_0"), val = tensor<int32, [4]>([0, 0, 64, 0])];
|
222 |
+
tensor<int32, [4]> var_343_end_0 = const()[name = tensor<string, []>("op_343_end_0"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
223 |
+
tensor<bool, [4]> var_343_end_mask_0 = const()[name = tensor<string, []>("op_343_end_mask_0"), val = tensor<bool, [4]>([true, true, true, true])];
|
224 |
+
tensor<fp16, [1, 32, 64, 64]> var_343_cast_fp16 = slice_by_index(begin = var_343_begin_0, end = var_343_end_0, end_mask = var_343_end_mask_0, x = k_11_cast_fp16)[name = tensor<string, []>("op_343_cast_fp16")];
|
225 |
+
tensor<fp16, []> const_12_promoted_to_fp16 = const()[name = tensor<string, []>("const_12_promoted_to_fp16"), val = tensor<fp16, []>(-0x1p+0)];
|
226 |
+
tensor<fp16, [1, 32, 64, 64]> var_345_cast_fp16 = mul(x = var_343_cast_fp16, y = const_12_promoted_to_fp16)[name = tensor<string, []>("op_345_cast_fp16")];
|
227 |
+
tensor<bool, []> rotated_7_interleave_0 = const()[name = tensor<string, []>("rotated_7_interleave_0"), val = tensor<bool, []>(false)];
|
228 |
+
tensor<fp16, [1, 32, 128, 64]> rotated_7_cast_fp16 = concat(axis = var_237, interleave = rotated_7_interleave_0, values = (var_345_cast_fp16, var_337_cast_fp16))[name = tensor<string, []>("rotated_7_cast_fp16")];
|
229 |
+
tensor<fp16, [1, 32, 128, 64]> var_348_cast_fp16 = mul(x = k_11_cast_fp16, y = cos)[name = tensor<string, []>("op_348_cast_fp16")];
|
230 |
+
tensor<fp16, [1, 32, 128, 64]> var_349_cast_fp16 = mul(x = rotated_7_cast_fp16, y = sin)[name = tensor<string, []>("op_349_cast_fp16")];
|
231 |
+
tensor<fp16, [1, 32, 128, 64]> roped_7_cast_fp16 = add(x = var_348_cast_fp16, y = var_349_cast_fp16)[name = tensor<string, []>("roped_7_cast_fp16")];
|
232 |
+
tensor<bool, []> q_11_interleave_0 = const()[name = tensor<string, []>("q_11_interleave_0"), val = tensor<bool, []>(false)];
|
233 |
+
tensor<fp16, [1, 32, 128, 64]> q_11_cast_fp16 = concat(axis = var_237, interleave = q_11_interleave_0, values = roped_5_cast_fp16)[name = tensor<string, []>("q_11_cast_fp16")];
|
234 |
+
tensor<bool, []> k_13_interleave_0 = const()[name = tensor<string, []>("k_13_interleave_0"), val = tensor<bool, []>(false)];
|
235 |
+
tensor<fp16, [1, 32, 128, 64]> new_k_cache_1 = concat(axis = var_237, interleave = k_13_interleave_0, values = roped_7_cast_fp16)[name = tensor<string, []>("k_13_cast_fp16")];
|
236 |
+
tensor<bool, []> k_15_interleave_0 = const()[name = tensor<string, []>("k_15_interleave_0"), val = tensor<bool, []>(false)];
|
237 |
+
tensor<fp16, [1, 32, 128, 512]> k_15_cast_fp16 = concat(axis = var_239, interleave = k_15_interleave_0, values = (k_cache_1, new_k_cache_1))[name = tensor<string, []>("k_15_cast_fp16")];
|
238 |
+
tensor<bool, []> v_11_interleave_0 = const()[name = tensor<string, []>("v_11_interleave_0"), val = tensor<bool, []>(false)];
|
239 |
+
tensor<fp16, [1, 32, 128, 512]> v_11_cast_fp16 = concat(axis = var_239, interleave = v_11_interleave_0, values = (v_cache_1, new_v_cache_1))[name = tensor<string, []>("v_11_cast_fp16")];
|
240 |
+
tensor<fp16, []> var_371_to_fp16 = const()[name = tensor<string, []>("op_371_to_fp16"), val = tensor<fp16, []>(0x1.6ap-4)];
|
241 |
+
tensor<fp16, [1, 32, 128, 64]> var_372_cast_fp16 = mul(x = q_11_cast_fp16, y = var_371_to_fp16)[name = tensor<string, []>("op_372_cast_fp16")];
|
242 |
+
tensor<bool, []> attn_weights_5_transpose_x_0 = const()[name = tensor<string, []>("attn_weights_5_transpose_x_0"), val = tensor<bool, []>(true)];
|
243 |
+
tensor<bool, []> attn_weights_5_transpose_y_0 = const()[name = tensor<string, []>("attn_weights_5_transpose_y_0"), val = tensor<bool, []>(false)];
|
244 |
+
tensor<fp16, [1, 32, 64, 512]> attn_weights_5_cast_fp16 = matmul(transpose_x = attn_weights_5_transpose_x_0, transpose_y = attn_weights_5_transpose_y_0, x = var_372_cast_fp16, y = k_15_cast_fp16)[name = tensor<string, []>("attn_weights_5_cast_fp16")];
|
245 |
+
tensor<fp16, [1, 32, 64, 512]> attn_weights_7_cast_fp16 = add(x = attn_weights_5_cast_fp16, y = mask)[name = tensor<string, []>("attn_weights_7_cast_fp16")];
|
246 |
+
tensor<fp16, [1, 32, 64, 512]> var_380_cast_fp16 = softmax(axis = var_232, x = attn_weights_7_cast_fp16)[name = tensor<string, []>("op_380_cast_fp16")];
|
247 |
+
tensor<bool, []> attn_3_transpose_x_0 = const()[name = tensor<string, []>("attn_3_transpose_x_0"), val = tensor<bool, []>(false)];
|
248 |
+
tensor<bool, []> attn_3_transpose_y_0 = const()[name = tensor<string, []>("attn_3_transpose_y_0"), val = tensor<bool, []>(true)];
|
249 |
+
tensor<fp16, [1, 32, 128, 64]> attn_3_cast_fp16 = matmul(transpose_x = attn_3_transpose_x_0, transpose_y = attn_3_transpose_y_0, x = v_11_cast_fp16, y = var_380_cast_fp16)[name = tensor<string, []>("attn_3_cast_fp16")];
|
250 |
+
tensor<int32, [4]> var_384 = const()[name = tensor<string, []>("op_384"), val = tensor<int32, [4]>([1, 4096, 1, -1])];
|
251 |
+
tensor<fp16, [1, 4096, 1, 64]> input_9_cast_fp16 = reshape(shape = var_384, x = attn_3_cast_fp16)[name = tensor<string, []>("input_9_cast_fp16")];
|
252 |
+
tensor<int32, [2]> var_388 = const()[name = tensor<string, []>("op_388"), val = tensor<int32, [2]>([1, 1])];
|
253 |
+
tensor<int32, [2]> var_390 = const()[name = tensor<string, []>("op_390"), val = tensor<int32, [2]>([1, 1])];
|
254 |
+
tensor<string, []> var_392_pad_type_0 = const()[name = tensor<string, []>("op_392_pad_type_0"), val = tensor<string, []>("custom")];
|
255 |
+
tensor<int32, [4]> var_392_pad_0 = const()[name = tensor<string, []>("op_392_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
256 |
+
tensor<fp16, [1, 4096, 1, 64]> var_392_cast_fp16 = conv(dilations = var_390, groups = var_246, pad = var_392_pad_0, pad_type = var_392_pad_type_0, strides = var_388, weight = blocks_1_attn_proj_weight_palettized_cast_fp16, x = input_9_cast_fp16)[name = tensor<string, []>("op_392_cast_fp16")];
|
257 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_1_attn_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_1_attn_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303701824)))];
|
258 |
+
tensor<fp16, [1, 4096, 1, 64]> attention_output_3_cast_fp16 = mul(x = var_392_cast_fp16, y = blocks_1_attn_proj_output_scales_to_fp16)[name = tensor<string, []>("attention_output_3_cast_fp16")];
|
259 |
+
tensor<fp16, [1, 4096, 1, 64]> x_25_cast_fp16 = add(x = attention_output_3_cast_fp16, y = x_15_cast_fp16)[name = tensor<string, []>("x_25_cast_fp16")];
|
260 |
+
tensor<fp16, [1, 4096, 1, 64]> var_401_cast_fp16 = mul(x = x_25_cast_fp16, y = x_25_cast_fp16)[name = tensor<string, []>("op_401_cast_fp16")];
|
261 |
+
tensor<int32, [1]> var_402 = const()[name = tensor<string, []>("op_402"), val = tensor<int32, [1]>([1])];
|
262 |
+
tensor<fp16, [1, 1, 1, 64]> norm_x_7_cast_fp16 = reduce_mean(axes = var_402, keep_dims = var_247, x = var_401_cast_fp16)[name = tensor<string, []>("norm_x_7_cast_fp16")];
|
263 |
+
tensor<fp16, []> var_404_to_fp16 = const()[name = tensor<string, []>("op_404_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
|
264 |
+
tensor<fp16, [1, 1, 1, 64]> var_405_cast_fp16 = add(x = norm_x_7_cast_fp16, y = var_404_to_fp16)[name = tensor<string, []>("op_405_cast_fp16")];
|
265 |
+
tensor<fp16, []> var_406_epsilon_0_to_fp16 = const()[name = tensor<string, []>("op_406_epsilon_0_to_fp16"), val = tensor<fp16, []>(0x1p-24)];
|
266 |
+
tensor<fp16, [1, 1, 1, 64]> var_406_cast_fp16 = rsqrt(epsilon = var_406_epsilon_0_to_fp16, x = var_405_cast_fp16)[name = tensor<string, []>("op_406_cast_fp16")];
|
267 |
+
tensor<fp16, [1, 4096, 1, 64]> x_normed_13_cast_fp16 = mul(x = x_25_cast_fp16, y = var_406_cast_fp16)[name = tensor<string, []>("x_normed_13_cast_fp16")];
|
268 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_1_norm_2_weight_to_fp16 = const()[name = tensor<string, []>("blocks_1_norm_2_weight_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303710080)))];
|
269 |
+
tensor<fp16, [1, 4096, 1, 64]> input_11_cast_fp16 = mul(x = x_normed_13_cast_fp16, y = blocks_1_norm_2_weight_to_fp16)[name = tensor<string, []>("input_11_cast_fp16")];
|
270 |
+
tensor<int32, [2]> var_418 = const()[name = tensor<string, []>("op_418"), val = tensor<int32, [2]>([1, 1])];
|
271 |
+
tensor<int32, [2]> var_420 = const()[name = tensor<string, []>("op_420"), val = tensor<int32, [2]>([1, 1])];
|
272 |
+
tensor<string, []> var_422_pad_type_0 = const()[name = tensor<string, []>("op_422_pad_type_0"), val = tensor<string, []>("custom")];
|
273 |
+
tensor<int32, [4]> var_422_pad_0 = const()[name = tensor<string, []>("op_422_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
274 |
+
tensor<fp16, [1, 11008, 1, 64]> var_422_cast_fp16 = conv(dilations = var_420, groups = var_246, pad = var_422_pad_0, pad_type = var_422_pad_type_0, strides = var_418, weight = blocks_1_mlp_fc_1_weight_palettized_cast_fp16, x = input_11_cast_fp16)[name = tensor<string, []>("op_422_cast_fp16")];
|
275 |
+
tensor<fp16, [1, 11008, 1, 1]> blocks_1_mlp_fc_1_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_1_mlp_fc_1_output_scales_to_fp16"), val = tensor<fp16, [1, 11008, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303718336)))];
|
276 |
+
tensor<fp16, [1, 11008, 1, 64]> input_13_cast_fp16 = mul(x = var_422_cast_fp16, y = blocks_1_mlp_fc_1_output_scales_to_fp16)[name = tensor<string, []>("input_13_cast_fp16")];
|
277 |
+
tensor<int32, [2]> var_426 = const()[name = tensor<string, []>("op_426"), val = tensor<int32, [2]>([1, 1])];
|
278 |
+
tensor<int32, [2]> var_428 = const()[name = tensor<string, []>("op_428"), val = tensor<int32, [2]>([1, 1])];
|
279 |
+
tensor<string, []> var_430_pad_type_0 = const()[name = tensor<string, []>("op_430_pad_type_0"), val = tensor<string, []>("custom")];
|
280 |
+
tensor<int32, [4]> var_430_pad_0 = const()[name = tensor<string, []>("op_430_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
281 |
+
tensor<fp16, [1, 11008, 1, 64]> var_430_cast_fp16 = conv(dilations = var_428, groups = var_246, pad = var_430_pad_0, pad_type = var_430_pad_type_0, strides = var_426, weight = blocks_1_mlp_fc_2_weight_palettized_cast_fp16, x = input_11_cast_fp16)[name = tensor<string, []>("op_430_cast_fp16")];
|
282 |
+
tensor<fp16, [1, 11008, 1, 1]> blocks_1_mlp_fc_2_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_1_mlp_fc_2_output_scales_to_fp16"), val = tensor<fp16, [1, 11008, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303740416)))];
|
283 |
+
tensor<fp16, [1, 11008, 1, 64]> x_fc_2_3_cast_fp16 = mul(x = var_430_cast_fp16, y = blocks_1_mlp_fc_2_output_scales_to_fp16)[name = tensor<string, []>("x_fc_2_3_cast_fp16")];
|
284 |
+
tensor<fp16, [1, 11008, 1, 64]> var_432_cast_fp16 = silu(x = input_13_cast_fp16)[name = tensor<string, []>("op_432_cast_fp16")];
|
285 |
+
tensor<fp16, [1, 11008, 1, 64]> input_15_cast_fp16 = mul(x = var_432_cast_fp16, y = x_fc_2_3_cast_fp16)[name = tensor<string, []>("input_15_cast_fp16")];
|
286 |
+
tensor<int32, [2]> var_436 = const()[name = tensor<string, []>("op_436"), val = tensor<int32, [2]>([1, 1])];
|
287 |
+
tensor<int32, [2]> var_438 = const()[name = tensor<string, []>("op_438"), val = tensor<int32, [2]>([1, 1])];
|
288 |
+
tensor<string, []> var_440_pad_type_0 = const()[name = tensor<string, []>("op_440_pad_type_0"), val = tensor<string, []>("custom")];
|
289 |
+
tensor<int32, [4]> var_440_pad_0 = const()[name = tensor<string, []>("op_440_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
290 |
+
tensor<fp16, [1, 4096, 1, 64]> var_440_cast_fp16 = conv(dilations = var_438, groups = var_246, pad = var_440_pad_0, pad_type = var_440_pad_type_0, strides = var_436, weight = blocks_1_mlp_proj_weight_palettized_cast_fp16, x = input_15_cast_fp16)[name = tensor<string, []>("op_440_cast_fp16")];
|
291 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_1_mlp_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_1_mlp_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303762496)))];
|
292 |
+
tensor<fp16, [1, 4096, 1, 64]> var_441_cast_fp16 = mul(x = var_440_cast_fp16, y = blocks_1_mlp_proj_output_scales_to_fp16)[name = tensor<string, []>("op_441_cast_fp16")];
|
293 |
+
tensor<fp16, [1, 4096, 1, 64]> x_29_cast_fp16 = add(x = var_441_cast_fp16, y = x_25_cast_fp16)[name = tensor<string, []>("x_29_cast_fp16")];
|
294 |
+
tensor<int32, []> var_448 = const()[name = tensor<string, []>("op_448"), val = tensor<int32, []>(3)];
|
295 |
+
tensor<int32, []> var_453 = const()[name = tensor<string, []>("op_453"), val = tensor<int32, []>(-2)];
|
296 |
+
tensor<int32, []> var_455 = const()[name = tensor<string, []>("op_455"), val = tensor<int32, []>(-1)];
|
297 |
+
tensor<int32, []> var_462 = const()[name = tensor<string, []>("op_462"), val = tensor<int32, []>(1)];
|
298 |
+
tensor<bool, []> var_463 = const()[name = tensor<string, []>("op_463"), val = tensor<bool, []>(true)];
|
299 |
+
tensor<fp16, [1, 4096, 1, 64]> var_470_cast_fp16 = mul(x = x_29_cast_fp16, y = x_29_cast_fp16)[name = tensor<string, []>("op_470_cast_fp16")];
|
300 |
+
tensor<int32, [1]> var_471 = const()[name = tensor<string, []>("op_471"), val = tensor<int32, [1]>([1])];
|
301 |
+
tensor<fp16, [1, 1, 1, 64]> norm_x_9_cast_fp16 = reduce_mean(axes = var_471, keep_dims = var_463, x = var_470_cast_fp16)[name = tensor<string, []>("norm_x_9_cast_fp16")];
|
302 |
+
tensor<fp16, []> var_473_to_fp16 = const()[name = tensor<string, []>("op_473_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
|
303 |
+
tensor<fp16, [1, 1, 1, 64]> var_474_cast_fp16 = add(x = norm_x_9_cast_fp16, y = var_473_to_fp16)[name = tensor<string, []>("op_474_cast_fp16")];
|
304 |
+
tensor<fp16, []> var_475_epsilon_0_to_fp16 = const()[name = tensor<string, []>("op_475_epsilon_0_to_fp16"), val = tensor<fp16, []>(0x1p-24)];
|
305 |
+
tensor<fp16, [1, 1, 1, 64]> var_475_cast_fp16 = rsqrt(epsilon = var_475_epsilon_0_to_fp16, x = var_474_cast_fp16)[name = tensor<string, []>("op_475_cast_fp16")];
|
306 |
+
tensor<fp16, [1, 4096, 1, 64]> x_normed_17_cast_fp16 = mul(x = x_29_cast_fp16, y = var_475_cast_fp16)[name = tensor<string, []>("x_normed_17_cast_fp16")];
|
307 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_2_norm_1_weight_to_fp16 = const()[name = tensor<string, []>("blocks_2_norm_1_weight_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303770752)))];
|
308 |
+
tensor<fp16, [1, 4096, 1, 64]> x_33_cast_fp16 = mul(x = x_normed_17_cast_fp16, y = blocks_2_norm_1_weight_to_fp16)[name = tensor<string, []>("x_33_cast_fp16")];
|
309 |
+
tensor<int32, [2]> var_490 = const()[name = tensor<string, []>("op_490"), val = tensor<int32, [2]>([1, 1])];
|
310 |
+
tensor<int32, [2]> var_492 = const()[name = tensor<string, []>("op_492"), val = tensor<int32, [2]>([1, 1])];
|
311 |
+
tensor<string, []> var_494_pad_type_0 = const()[name = tensor<string, []>("op_494_pad_type_0"), val = tensor<string, []>("custom")];
|
312 |
+
tensor<int32, [4]> var_494_pad_0 = const()[name = tensor<string, []>("op_494_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
313 |
+
tensor<fp16, [1, 4096, 1, 64]> var_494_cast_fp16 = conv(dilations = var_492, groups = var_462, pad = var_494_pad_0, pad_type = var_494_pad_type_0, strides = var_490, weight = blocks_2_attn_q_proj_weight_palettized_cast_fp16, x = x_33_cast_fp16)[name = tensor<string, []>("op_494_cast_fp16")];
|
314 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_2_attn_q_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_2_attn_q_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303779008)))];
|
315 |
+
tensor<fp16, [1, 4096, 1, 64]> q_13_cast_fp16 = mul(x = var_494_cast_fp16, y = blocks_2_attn_q_proj_output_scales_to_fp16)[name = tensor<string, []>("q_13_cast_fp16")];
|
316 |
+
tensor<int32, [2]> var_498 = const()[name = tensor<string, []>("op_498"), val = tensor<int32, [2]>([1, 1])];
|
317 |
+
tensor<int32, [2]> var_500 = const()[name = tensor<string, []>("op_500"), val = tensor<int32, [2]>([1, 1])];
|
318 |
+
tensor<string, []> var_502_pad_type_0 = const()[name = tensor<string, []>("op_502_pad_type_0"), val = tensor<string, []>("custom")];
|
319 |
+
tensor<int32, [4]> var_502_pad_0 = const()[name = tensor<string, []>("op_502_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
320 |
+
tensor<fp16, [1, 4096, 1, 64]> var_502_cast_fp16 = conv(dilations = var_500, groups = var_462, pad = var_502_pad_0, pad_type = var_502_pad_type_0, strides = var_498, weight = blocks_2_attn_k_proj_weight_palettized_cast_fp16, x = x_33_cast_fp16)[name = tensor<string, []>("op_502_cast_fp16")];
|
321 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_2_attn_k_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_2_attn_k_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303787264)))];
|
322 |
+
tensor<fp16, [1, 4096, 1, 64]> k_17_cast_fp16 = mul(x = var_502_cast_fp16, y = blocks_2_attn_k_proj_output_scales_to_fp16)[name = tensor<string, []>("k_17_cast_fp16")];
|
323 |
+
tensor<int32, [2]> var_506 = const()[name = tensor<string, []>("op_506"), val = tensor<int32, [2]>([1, 1])];
|
324 |
+
tensor<int32, [2]> var_508 = const()[name = tensor<string, []>("op_508"), val = tensor<int32, [2]>([1, 1])];
|
325 |
+
tensor<string, []> var_510_pad_type_0 = const()[name = tensor<string, []>("op_510_pad_type_0"), val = tensor<string, []>("custom")];
|
326 |
+
tensor<int32, [4]> var_510_pad_0 = const()[name = tensor<string, []>("op_510_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
327 |
+
tensor<fp16, [1, 4096, 1, 64]> var_510_cast_fp16 = conv(dilations = var_508, groups = var_462, pad = var_510_pad_0, pad_type = var_510_pad_type_0, strides = var_506, weight = blocks_2_attn_v_proj_weight_palettized_cast_fp16, x = x_33_cast_fp16)[name = tensor<string, []>("op_510_cast_fp16")];
|
328 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_2_attn_v_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_2_attn_v_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303795520)))];
|
329 |
+
tensor<fp16, [1, 4096, 1, 64]> v_13_cast_fp16 = mul(x = var_510_cast_fp16, y = blocks_2_attn_v_proj_output_scales_to_fp16)[name = tensor<string, []>("v_13_cast_fp16")];
|
330 |
+
tensor<int32, [4]> var_512 = const()[name = tensor<string, []>("op_512"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
331 |
+
tensor<fp16, [1, 32, 128, 64]> q_15_cast_fp16 = reshape(shape = var_512, x = q_13_cast_fp16)[name = tensor<string, []>("q_15_cast_fp16")];
|
332 |
+
tensor<int32, [4]> var_514 = const()[name = tensor<string, []>("op_514"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
333 |
+
tensor<fp16, [1, 32, 128, 64]> k_19_cast_fp16 = reshape(shape = var_514, x = k_17_cast_fp16)[name = tensor<string, []>("k_19_cast_fp16")];
|
334 |
+
tensor<int32, [4]> var_516 = const()[name = tensor<string, []>("op_516"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
335 |
+
tensor<fp16, [1, 32, 128, 64]> new_v_cache_2 = reshape(shape = var_516, x = v_13_cast_fp16)[name = tensor<string, []>("v_15_cast_fp16")];
|
336 |
+
tensor<int32, [4]> var_528_begin_0 = const()[name = tensor<string, []>("op_528_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
337 |
+
tensor<int32, [4]> var_528_end_0 = const()[name = tensor<string, []>("op_528_end_0"), val = tensor<int32, [4]>([1, 32, 64, 64])];
|
338 |
+
tensor<bool, [4]> var_528_end_mask_0 = const()[name = tensor<string, []>("op_528_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
|
339 |
+
tensor<fp16, [1, 32, 64, 64]> var_528_cast_fp16 = slice_by_index(begin = var_528_begin_0, end = var_528_end_0, end_mask = var_528_end_mask_0, x = q_15_cast_fp16)[name = tensor<string, []>("op_528_cast_fp16")];
|
340 |
+
tensor<int32, [4]> var_534_begin_0 = const()[name = tensor<string, []>("op_534_begin_0"), val = tensor<int32, [4]>([0, 0, 64, 0])];
|
341 |
+
tensor<int32, [4]> var_534_end_0 = const()[name = tensor<string, []>("op_534_end_0"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
342 |
+
tensor<bool, [4]> var_534_end_mask_0 = const()[name = tensor<string, []>("op_534_end_mask_0"), val = tensor<bool, [4]>([true, true, true, true])];
|
343 |
+
tensor<fp16, [1, 32, 64, 64]> var_534_cast_fp16 = slice_by_index(begin = var_534_begin_0, end = var_534_end_0, end_mask = var_534_end_mask_0, x = q_15_cast_fp16)[name = tensor<string, []>("op_534_cast_fp16")];
|
344 |
+
tensor<fp16, []> const_17_promoted_to_fp16 = const()[name = tensor<string, []>("const_17_promoted_to_fp16"), val = tensor<fp16, []>(-0x1p+0)];
|
345 |
+
tensor<fp16, [1, 32, 64, 64]> var_536_cast_fp16 = mul(x = var_534_cast_fp16, y = const_17_promoted_to_fp16)[name = tensor<string, []>("op_536_cast_fp16")];
|
346 |
+
tensor<bool, []> rotated_9_interleave_0 = const()[name = tensor<string, []>("rotated_9_interleave_0"), val = tensor<bool, []>(false)];
|
347 |
+
tensor<fp16, [1, 32, 128, 64]> rotated_9_cast_fp16 = concat(axis = var_453, interleave = rotated_9_interleave_0, values = (var_536_cast_fp16, var_528_cast_fp16))[name = tensor<string, []>("rotated_9_cast_fp16")];
|
348 |
+
tensor<fp16, [1, 32, 128, 64]> var_539_cast_fp16 = mul(x = q_15_cast_fp16, y = cos)[name = tensor<string, []>("op_539_cast_fp16")];
|
349 |
+
tensor<fp16, [1, 32, 128, 64]> var_540_cast_fp16 = mul(x = rotated_9_cast_fp16, y = sin)[name = tensor<string, []>("op_540_cast_fp16")];
|
350 |
+
tensor<fp16, [1, 32, 128, 64]> roped_9_cast_fp16 = add(x = var_539_cast_fp16, y = var_540_cast_fp16)[name = tensor<string, []>("roped_9_cast_fp16")];
|
351 |
+
tensor<int32, [4]> var_553_begin_0 = const()[name = tensor<string, []>("op_553_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
352 |
+
tensor<int32, [4]> var_553_end_0 = const()[name = tensor<string, []>("op_553_end_0"), val = tensor<int32, [4]>([1, 32, 64, 64])];
|
353 |
+
tensor<bool, [4]> var_553_end_mask_0 = const()[name = tensor<string, []>("op_553_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
|
354 |
+
tensor<fp16, [1, 32, 64, 64]> var_553_cast_fp16 = slice_by_index(begin = var_553_begin_0, end = var_553_end_0, end_mask = var_553_end_mask_0, x = k_19_cast_fp16)[name = tensor<string, []>("op_553_cast_fp16")];
|
355 |
+
tensor<int32, [4]> var_559_begin_0 = const()[name = tensor<string, []>("op_559_begin_0"), val = tensor<int32, [4]>([0, 0, 64, 0])];
|
356 |
+
tensor<int32, [4]> var_559_end_0 = const()[name = tensor<string, []>("op_559_end_0"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
357 |
+
tensor<bool, [4]> var_559_end_mask_0 = const()[name = tensor<string, []>("op_559_end_mask_0"), val = tensor<bool, [4]>([true, true, true, true])];
|
358 |
+
tensor<fp16, [1, 32, 64, 64]> var_559_cast_fp16 = slice_by_index(begin = var_559_begin_0, end = var_559_end_0, end_mask = var_559_end_mask_0, x = k_19_cast_fp16)[name = tensor<string, []>("op_559_cast_fp16")];
|
359 |
+
tensor<fp16, []> const_19_promoted_to_fp16 = const()[name = tensor<string, []>("const_19_promoted_to_fp16"), val = tensor<fp16, []>(-0x1p+0)];
|
360 |
+
tensor<fp16, [1, 32, 64, 64]> var_561_cast_fp16 = mul(x = var_559_cast_fp16, y = const_19_promoted_to_fp16)[name = tensor<string, []>("op_561_cast_fp16")];
|
361 |
+
tensor<bool, []> rotated_interleave_0 = const()[name = tensor<string, []>("rotated_interleave_0"), val = tensor<bool, []>(false)];
|
362 |
+
tensor<fp16, [1, 32, 128, 64]> rotated_cast_fp16 = concat(axis = var_453, interleave = rotated_interleave_0, values = (var_561_cast_fp16, var_553_cast_fp16))[name = tensor<string, []>("rotated_cast_fp16")];
|
363 |
+
tensor<fp16, [1, 32, 128, 64]> var_564_cast_fp16 = mul(x = k_19_cast_fp16, y = cos)[name = tensor<string, []>("op_564_cast_fp16")];
|
364 |
+
tensor<fp16, [1, 32, 128, 64]> var_565_cast_fp16 = mul(x = rotated_cast_fp16, y = sin)[name = tensor<string, []>("op_565_cast_fp16")];
|
365 |
+
tensor<fp16, [1, 32, 128, 64]> roped_cast_fp16 = add(x = var_564_cast_fp16, y = var_565_cast_fp16)[name = tensor<string, []>("roped_cast_fp16")];
|
366 |
+
tensor<bool, []> q_interleave_0 = const()[name = tensor<string, []>("q_interleave_0"), val = tensor<bool, []>(false)];
|
367 |
+
tensor<fp16, [1, 32, 128, 64]> q_cast_fp16 = concat(axis = var_453, interleave = q_interleave_0, values = roped_9_cast_fp16)[name = tensor<string, []>("q_cast_fp16")];
|
368 |
+
tensor<bool, []> k_21_interleave_0 = const()[name = tensor<string, []>("k_21_interleave_0"), val = tensor<bool, []>(false)];
|
369 |
+
tensor<fp16, [1, 32, 128, 64]> new_k_cache_2 = concat(axis = var_453, interleave = k_21_interleave_0, values = roped_cast_fp16)[name = tensor<string, []>("k_21_cast_fp16")];
|
370 |
+
tensor<bool, []> k_interleave_0 = const()[name = tensor<string, []>("k_interleave_0"), val = tensor<bool, []>(false)];
|
371 |
+
tensor<fp16, [1, 32, 128, 512]> k_cast_fp16 = concat(axis = var_455, interleave = k_interleave_0, values = (k_cache_2, new_k_cache_2))[name = tensor<string, []>("k_cast_fp16")];
|
372 |
+
tensor<bool, []> v_interleave_0 = const()[name = tensor<string, []>("v_interleave_0"), val = tensor<bool, []>(false)];
|
373 |
+
tensor<fp16, [1, 32, 128, 512]> v_cast_fp16 = concat(axis = var_455, interleave = v_interleave_0, values = (v_cache_2, new_v_cache_2))[name = tensor<string, []>("v_cast_fp16")];
|
374 |
+
tensor<fp16, []> var_587_to_fp16 = const()[name = tensor<string, []>("op_587_to_fp16"), val = tensor<fp16, []>(0x1.6ap-4)];
|
375 |
+
tensor<fp16, [1, 32, 128, 64]> var_588_cast_fp16 = mul(x = q_cast_fp16, y = var_587_to_fp16)[name = tensor<string, []>("op_588_cast_fp16")];
|
376 |
+
tensor<bool, []> attn_weights_9_transpose_x_0 = const()[name = tensor<string, []>("attn_weights_9_transpose_x_0"), val = tensor<bool, []>(true)];
|
377 |
+
tensor<bool, []> attn_weights_9_transpose_y_0 = const()[name = tensor<string, []>("attn_weights_9_transpose_y_0"), val = tensor<bool, []>(false)];
|
378 |
+
tensor<fp16, [1, 32, 64, 512]> attn_weights_9_cast_fp16 = matmul(transpose_x = attn_weights_9_transpose_x_0, transpose_y = attn_weights_9_transpose_y_0, x = var_588_cast_fp16, y = k_cast_fp16)[name = tensor<string, []>("attn_weights_9_cast_fp16")];
|
379 |
+
tensor<fp16, [1, 32, 64, 512]> attn_weights_cast_fp16 = add(x = attn_weights_9_cast_fp16, y = mask)[name = tensor<string, []>("attn_weights_cast_fp16")];
|
380 |
+
tensor<fp16, [1, 32, 64, 512]> var_596_cast_fp16 = softmax(axis = var_448, x = attn_weights_cast_fp16)[name = tensor<string, []>("op_596_cast_fp16")];
|
381 |
+
tensor<bool, []> attn_5_transpose_x_0 = const()[name = tensor<string, []>("attn_5_transpose_x_0"), val = tensor<bool, []>(false)];
|
382 |
+
tensor<bool, []> attn_5_transpose_y_0 = const()[name = tensor<string, []>("attn_5_transpose_y_0"), val = tensor<bool, []>(true)];
|
383 |
+
tensor<fp16, [1, 32, 128, 64]> attn_5_cast_fp16 = matmul(transpose_x = attn_5_transpose_x_0, transpose_y = attn_5_transpose_y_0, x = v_cast_fp16, y = var_596_cast_fp16)[name = tensor<string, []>("attn_5_cast_fp16")];
|
384 |
+
tensor<int32, [4]> var_600 = const()[name = tensor<string, []>("op_600"), val = tensor<int32, [4]>([1, 4096, 1, -1])];
|
385 |
+
tensor<fp16, [1, 4096, 1, 64]> input_17_cast_fp16 = reshape(shape = var_600, x = attn_5_cast_fp16)[name = tensor<string, []>("input_17_cast_fp16")];
|
386 |
+
tensor<int32, [2]> var_604 = const()[name = tensor<string, []>("op_604"), val = tensor<int32, [2]>([1, 1])];
|
387 |
+
tensor<int32, [2]> var_606 = const()[name = tensor<string, []>("op_606"), val = tensor<int32, [2]>([1, 1])];
|
388 |
+
tensor<string, []> var_608_pad_type_0 = const()[name = tensor<string, []>("op_608_pad_type_0"), val = tensor<string, []>("custom")];
|
389 |
+
tensor<int32, [4]> var_608_pad_0 = const()[name = tensor<string, []>("op_608_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
390 |
+
tensor<fp16, [1, 4096, 1, 64]> var_608_cast_fp16 = conv(dilations = var_606, groups = var_462, pad = var_608_pad_0, pad_type = var_608_pad_type_0, strides = var_604, weight = blocks_2_attn_proj_weight_palettized_cast_fp16, x = input_17_cast_fp16)[name = tensor<string, []>("op_608_cast_fp16")];
|
391 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_2_attn_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_2_attn_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303803776)))];
|
392 |
+
tensor<fp16, [1, 4096, 1, 64]> attention_output_cast_fp16 = mul(x = var_608_cast_fp16, y = blocks_2_attn_proj_output_scales_to_fp16)[name = tensor<string, []>("attention_output_cast_fp16")];
|
393 |
+
tensor<fp16, [1, 4096, 1, 64]> x_39_cast_fp16 = add(x = attention_output_cast_fp16, y = x_29_cast_fp16)[name = tensor<string, []>("x_39_cast_fp16")];
|
394 |
+
tensor<fp16, [1, 4096, 1, 64]> var_617_cast_fp16 = mul(x = x_39_cast_fp16, y = x_39_cast_fp16)[name = tensor<string, []>("op_617_cast_fp16")];
|
395 |
+
tensor<int32, [1]> var_618 = const()[name = tensor<string, []>("op_618"), val = tensor<int32, [1]>([1])];
|
396 |
+
tensor<fp16, [1, 1, 1, 64]> norm_x_cast_fp16 = reduce_mean(axes = var_618, keep_dims = var_463, x = var_617_cast_fp16)[name = tensor<string, []>("norm_x_cast_fp16")];
|
397 |
+
tensor<fp16, []> var_620_to_fp16 = const()[name = tensor<string, []>("op_620_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
|
398 |
+
tensor<fp16, [1, 1, 1, 64]> var_621_cast_fp16 = add(x = norm_x_cast_fp16, y = var_620_to_fp16)[name = tensor<string, []>("op_621_cast_fp16")];
|
399 |
+
tensor<fp16, []> var_622_epsilon_0_to_fp16 = const()[name = tensor<string, []>("op_622_epsilon_0_to_fp16"), val = tensor<fp16, []>(0x1p-24)];
|
400 |
+
tensor<fp16, [1, 1, 1, 64]> var_622_cast_fp16 = rsqrt(epsilon = var_622_epsilon_0_to_fp16, x = var_621_cast_fp16)[name = tensor<string, []>("op_622_cast_fp16")];
|
401 |
+
tensor<fp16, [1, 4096, 1, 64]> x_normed_21_cast_fp16 = mul(x = x_39_cast_fp16, y = var_622_cast_fp16)[name = tensor<string, []>("x_normed_21_cast_fp16")];
|
402 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_2_norm_2_weight_to_fp16 = const()[name = tensor<string, []>("blocks_2_norm_2_weight_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303812032)))];
|
403 |
+
tensor<fp16, [1, 4096, 1, 64]> input_19_cast_fp16 = mul(x = x_normed_21_cast_fp16, y = blocks_2_norm_2_weight_to_fp16)[name = tensor<string, []>("input_19_cast_fp16")];
|
404 |
+
tensor<int32, [2]> var_634 = const()[name = tensor<string, []>("op_634"), val = tensor<int32, [2]>([1, 1])];
|
405 |
+
tensor<int32, [2]> var_636 = const()[name = tensor<string, []>("op_636"), val = tensor<int32, [2]>([1, 1])];
|
406 |
+
tensor<string, []> var_638_pad_type_0 = const()[name = tensor<string, []>("op_638_pad_type_0"), val = tensor<string, []>("custom")];
|
407 |
+
tensor<int32, [4]> var_638_pad_0 = const()[name = tensor<string, []>("op_638_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
408 |
+
tensor<fp16, [1, 11008, 1, 64]> var_638_cast_fp16 = conv(dilations = var_636, groups = var_462, pad = var_638_pad_0, pad_type = var_638_pad_type_0, strides = var_634, weight = blocks_2_mlp_fc_1_weight_palettized_cast_fp16, x = input_19_cast_fp16)[name = tensor<string, []>("op_638_cast_fp16")];
|
409 |
+
tensor<fp16, [1, 11008, 1, 1]> blocks_2_mlp_fc_1_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_2_mlp_fc_1_output_scales_to_fp16"), val = tensor<fp16, [1, 11008, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303820288)))];
|
410 |
+
tensor<fp16, [1, 11008, 1, 64]> input_21_cast_fp16 = mul(x = var_638_cast_fp16, y = blocks_2_mlp_fc_1_output_scales_to_fp16)[name = tensor<string, []>("input_21_cast_fp16")];
|
411 |
+
tensor<int32, [2]> var_642 = const()[name = tensor<string, []>("op_642"), val = tensor<int32, [2]>([1, 1])];
|
412 |
+
tensor<int32, [2]> var_644 = const()[name = tensor<string, []>("op_644"), val = tensor<int32, [2]>([1, 1])];
|
413 |
+
tensor<string, []> var_646_pad_type_0 = const()[name = tensor<string, []>("op_646_pad_type_0"), val = tensor<string, []>("custom")];
|
414 |
+
tensor<int32, [4]> var_646_pad_0 = const()[name = tensor<string, []>("op_646_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
415 |
+
tensor<fp16, [1, 11008, 1, 64]> var_646_cast_fp16 = conv(dilations = var_644, groups = var_462, pad = var_646_pad_0, pad_type = var_646_pad_type_0, strides = var_642, weight = blocks_2_mlp_fc_2_weight_palettized_cast_fp16, x = input_19_cast_fp16)[name = tensor<string, []>("op_646_cast_fp16")];
|
416 |
+
tensor<fp16, [1, 11008, 1, 1]> blocks_2_mlp_fc_2_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_2_mlp_fc_2_output_scales_to_fp16"), val = tensor<fp16, [1, 11008, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303842368)))];
|
417 |
+
tensor<fp16, [1, 11008, 1, 64]> x_fc_2_cast_fp16 = mul(x = var_646_cast_fp16, y = blocks_2_mlp_fc_2_output_scales_to_fp16)[name = tensor<string, []>("x_fc_2_cast_fp16")];
|
418 |
+
tensor<fp16, [1, 11008, 1, 64]> var_648_cast_fp16 = silu(x = input_21_cast_fp16)[name = tensor<string, []>("op_648_cast_fp16")];
|
419 |
+
tensor<fp16, [1, 11008, 1, 64]> input_cast_fp16 = mul(x = var_648_cast_fp16, y = x_fc_2_cast_fp16)[name = tensor<string, []>("input_cast_fp16")];
|
420 |
+
tensor<int32, [2]> var_652 = const()[name = tensor<string, []>("op_652"), val = tensor<int32, [2]>([1, 1])];
|
421 |
+
tensor<int32, [2]> var_654 = const()[name = tensor<string, []>("op_654"), val = tensor<int32, [2]>([1, 1])];
|
422 |
+
tensor<string, []> var_656_pad_type_0 = const()[name = tensor<string, []>("op_656_pad_type_0"), val = tensor<string, []>("custom")];
|
423 |
+
tensor<int32, [4]> var_656_pad_0 = const()[name = tensor<string, []>("op_656_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
424 |
+
tensor<fp16, [1, 4096, 1, 64]> var_656_cast_fp16 = conv(dilations = var_654, groups = var_462, pad = var_656_pad_0, pad_type = var_656_pad_type_0, strides = var_652, weight = blocks_2_mlp_proj_weight_palettized_cast_fp16, x = input_cast_fp16)[name = tensor<string, []>("op_656_cast_fp16")];
|
425 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_2_mlp_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_2_mlp_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303864448)))];
|
426 |
+
tensor<fp16, [1, 4096, 1, 64]> var_657_cast_fp16 = mul(x = var_656_cast_fp16, y = blocks_2_mlp_proj_output_scales_to_fp16)[name = tensor<string, []>("op_657_cast_fp16")];
|
427 |
+
tensor<fp16, [1, 4096, 1, 64]> new_x = add(x = var_657_cast_fp16, y = x_39_cast_fp16)[name = tensor<string, []>("op_658_cast_fp16")];
|
428 |
+
} -> (new_x, new_k_cache_0, new_k_cache_1, new_k_cache_2, new_v_cache_0, new_v_cache_1, new_v_cache_2);
|
429 |
+
}
|
Llama-2-7b-hf_chunk3.mlmodelc/weights/weight.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ad1bc13ecfabbb4f02f8306bf18913019826fb28b002e14f11bddeca7a9edefa
|
3 |
+
size 303872704
|
Llama-2-7b-hf_chunk4.mlmodelc/analytics/coremldata.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3412284b024b899a736cd77112d4b1a4a5faa19d954259e925ef429f58bd886b
|
3 |
+
size 243
|
Llama-2-7b-hf_chunk4.mlmodelc/coremldata.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b79e263bb20b8a02d650dad2c3eee71ff787829f337aedacb6cd4e1b61c1ce23
|
3 |
+
size 791
|
Llama-2-7b-hf_chunk4.mlmodelc/metadata.json
ADDED
@@ -0,0 +1,218 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[
|
2 |
+
{
|
3 |
+
"metadataOutputVersion" : "3.0",
|
4 |
+
"storagePrecision" : "Mixed (Float16, Palettized (4 bits))",
|
5 |
+
"outputSchema" : [
|
6 |
+
{
|
7 |
+
"hasShapeFlexibility" : "0",
|
8 |
+
"isOptional" : "0",
|
9 |
+
"dataType" : "Float16",
|
10 |
+
"formattedType" : "MultiArray (Float16 1 × 4096 × 1 × 64)",
|
11 |
+
"shortDescription" : "",
|
12 |
+
"shape" : "[1, 4096, 1, 64]",
|
13 |
+
"name" : "new_x",
|
14 |
+
"type" : "MultiArray"
|
15 |
+
},
|
16 |
+
{
|
17 |
+
"hasShapeFlexibility" : "0",
|
18 |
+
"isOptional" : "0",
|
19 |
+
"dataType" : "Float16",
|
20 |
+
"formattedType" : "MultiArray (Float16 1 × 32 × 128 × 64)",
|
21 |
+
"shortDescription" : "",
|
22 |
+
"shape" : "[1, 32, 128, 64]",
|
23 |
+
"name" : "new_k_cache_0",
|
24 |
+
"type" : "MultiArray"
|
25 |
+
},
|
26 |
+
{
|
27 |
+
"hasShapeFlexibility" : "0",
|
28 |
+
"isOptional" : "0",
|
29 |
+
"dataType" : "Float16",
|
30 |
+
"formattedType" : "MultiArray (Float16 1 × 32 × 128 × 64)",
|
31 |
+
"shortDescription" : "",
|
32 |
+
"shape" : "[1, 32, 128, 64]",
|
33 |
+
"name" : "new_k_cache_1",
|
34 |
+
"type" : "MultiArray"
|
35 |
+
},
|
36 |
+
{
|
37 |
+
"hasShapeFlexibility" : "0",
|
38 |
+
"isOptional" : "0",
|
39 |
+
"dataType" : "Float16",
|
40 |
+
"formattedType" : "MultiArray (Float16 1 × 32 × 128 × 64)",
|
41 |
+
"shortDescription" : "",
|
42 |
+
"shape" : "[1, 32, 128, 64]",
|
43 |
+
"name" : "new_k_cache_2",
|
44 |
+
"type" : "MultiArray"
|
45 |
+
},
|
46 |
+
{
|
47 |
+
"hasShapeFlexibility" : "0",
|
48 |
+
"isOptional" : "0",
|
49 |
+
"dataType" : "Float16",
|
50 |
+
"formattedType" : "MultiArray (Float16 1 × 32 × 128 × 64)",
|
51 |
+
"shortDescription" : "",
|
52 |
+
"shape" : "[1, 32, 128, 64]",
|
53 |
+
"name" : "new_v_cache_0",
|
54 |
+
"type" : "MultiArray"
|
55 |
+
},
|
56 |
+
{
|
57 |
+
"hasShapeFlexibility" : "0",
|
58 |
+
"isOptional" : "0",
|
59 |
+
"dataType" : "Float16",
|
60 |
+
"formattedType" : "MultiArray (Float16 1 × 32 × 128 × 64)",
|
61 |
+
"shortDescription" : "",
|
62 |
+
"shape" : "[1, 32, 128, 64]",
|
63 |
+
"name" : "new_v_cache_1",
|
64 |
+
"type" : "MultiArray"
|
65 |
+
},
|
66 |
+
{
|
67 |
+
"hasShapeFlexibility" : "0",
|
68 |
+
"isOptional" : "0",
|
69 |
+
"dataType" : "Float16",
|
70 |
+
"formattedType" : "MultiArray (Float16 1 × 32 × 128 × 64)",
|
71 |
+
"shortDescription" : "",
|
72 |
+
"shape" : "[1, 32, 128, 64]",
|
73 |
+
"name" : "new_v_cache_2",
|
74 |
+
"type" : "MultiArray"
|
75 |
+
}
|
76 |
+
],
|
77 |
+
"modelParameters" : [
|
78 |
+
|
79 |
+
],
|
80 |
+
"specificationVersion" : 7,
|
81 |
+
"mlProgramOperationTypeHistogram" : {
|
82 |
+
"Concat" : 18,
|
83 |
+
"Ios16.rsqrt" : 6,
|
84 |
+
"Ios16.mul" : 63,
|
85 |
+
"SliceByIndex" : 12,
|
86 |
+
"Ios16.constexprLutToDense" : 21,
|
87 |
+
"Ios16.conv" : 21,
|
88 |
+
"Ios16.add" : 21,
|
89 |
+
"Ios16.reduceMean" : 6,
|
90 |
+
"Ios16.matmul" : 6,
|
91 |
+
"Ios16.softmax" : 3,
|
92 |
+
"Ios16.reshape" : 12,
|
93 |
+
"Ios16.silu" : 3
|
94 |
+
},
|
95 |
+
"computePrecision" : "Mixed (Float16, Int32)",
|
96 |
+
"isUpdatable" : "0",
|
97 |
+
"availability" : {
|
98 |
+
"macOS" : "13.0",
|
99 |
+
"tvOS" : "16.0",
|
100 |
+
"visionOS" : "1.0",
|
101 |
+
"watchOS" : "9.0",
|
102 |
+
"iOS" : "16.0",
|
103 |
+
"macCatalyst" : "16.0"
|
104 |
+
},
|
105 |
+
"modelType" : {
|
106 |
+
"name" : "MLModelType_mlProgram"
|
107 |
+
},
|
108 |
+
"userDefinedMetadata" : {
|
109 |
+
"com.github.apple.coremltools.source_dialect" : "TorchScript",
|
110 |
+
"com.github.apple.coremltools.source" : "torch==2.1.0",
|
111 |
+
"com.github.apple.coremltools.version" : "7.2"
|
112 |
+
},
|
113 |
+
"inputSchema" : [
|
114 |
+
{
|
115 |
+
"hasShapeFlexibility" : "0",
|
116 |
+
"isOptional" : "0",
|
117 |
+
"dataType" : "Float16",
|
118 |
+
"formattedType" : "MultiArray (Float16 1 × 4096 × 1 × 64)",
|
119 |
+
"shortDescription" : "",
|
120 |
+
"shape" : "[1, 4096, 1, 64]",
|
121 |
+
"name" : "x",
|
122 |
+
"type" : "MultiArray"
|
123 |
+
},
|
124 |
+
{
|
125 |
+
"hasShapeFlexibility" : "0",
|
126 |
+
"isOptional" : "0",
|
127 |
+
"dataType" : "Float16",
|
128 |
+
"formattedType" : "MultiArray (Float16 128 × 64)",
|
129 |
+
"shortDescription" : "",
|
130 |
+
"shape" : "[128, 64]",
|
131 |
+
"name" : "cos",
|
132 |
+
"type" : "MultiArray"
|
133 |
+
},
|
134 |
+
{
|
135 |
+
"hasShapeFlexibility" : "0",
|
136 |
+
"isOptional" : "0",
|
137 |
+
"dataType" : "Float16",
|
138 |
+
"formattedType" : "MultiArray (Float16 128 × 64)",
|
139 |
+
"shortDescription" : "",
|
140 |
+
"shape" : "[128, 64]",
|
141 |
+
"name" : "sin",
|
142 |
+
"type" : "MultiArray"
|
143 |
+
},
|
144 |
+
{
|
145 |
+
"hasShapeFlexibility" : "0",
|
146 |
+
"isOptional" : "0",
|
147 |
+
"dataType" : "Float16",
|
148 |
+
"formattedType" : "MultiArray (Float16 1 × 1 × 64 × 512)",
|
149 |
+
"shortDescription" : "",
|
150 |
+
"shape" : "[1, 1, 64, 512]",
|
151 |
+
"name" : "mask",
|
152 |
+
"type" : "MultiArray"
|
153 |
+
},
|
154 |
+
{
|
155 |
+
"hasShapeFlexibility" : "0",
|
156 |
+
"isOptional" : "1",
|
157 |
+
"dataType" : "Float16",
|
158 |
+
"formattedType" : "MultiArray (Float16 1 × 32 × 128 × 448)?",
|
159 |
+
"shortDescription" : "",
|
160 |
+
"shape" : "[1, 32, 128, 448]",
|
161 |
+
"name" : "k_cache_0",
|
162 |
+
"type" : "MultiArray"
|
163 |
+
},
|
164 |
+
{
|
165 |
+
"hasShapeFlexibility" : "0",
|
166 |
+
"isOptional" : "1",
|
167 |
+
"dataType" : "Float16",
|
168 |
+
"formattedType" : "MultiArray (Float16 1 × 32 × 128 × 448)?",
|
169 |
+
"shortDescription" : "",
|
170 |
+
"shape" : "[1, 32, 128, 448]",
|
171 |
+
"name" : "v_cache_0",
|
172 |
+
"type" : "MultiArray"
|
173 |
+
},
|
174 |
+
{
|
175 |
+
"hasShapeFlexibility" : "0",
|
176 |
+
"isOptional" : "1",
|
177 |
+
"dataType" : "Float16",
|
178 |
+
"formattedType" : "MultiArray (Float16 1 × 32 × 128 × 448)?",
|
179 |
+
"shortDescription" : "",
|
180 |
+
"shape" : "[1, 32, 128, 448]",
|
181 |
+
"name" : "k_cache_1",
|
182 |
+
"type" : "MultiArray"
|
183 |
+
},
|
184 |
+
{
|
185 |
+
"hasShapeFlexibility" : "0",
|
186 |
+
"isOptional" : "1",
|
187 |
+
"dataType" : "Float16",
|
188 |
+
"formattedType" : "MultiArray (Float16 1 × 32 × 128 × 448)?",
|
189 |
+
"shortDescription" : "",
|
190 |
+
"shape" : "[1, 32, 128, 448]",
|
191 |
+
"name" : "v_cache_1",
|
192 |
+
"type" : "MultiArray"
|
193 |
+
},
|
194 |
+
{
|
195 |
+
"hasShapeFlexibility" : "0",
|
196 |
+
"isOptional" : "1",
|
197 |
+
"dataType" : "Float16",
|
198 |
+
"formattedType" : "MultiArray (Float16 1 × 32 × 128 × 448)?",
|
199 |
+
"shortDescription" : "",
|
200 |
+
"shape" : "[1, 32, 128, 448]",
|
201 |
+
"name" : "k_cache_2",
|
202 |
+
"type" : "MultiArray"
|
203 |
+
},
|
204 |
+
{
|
205 |
+
"hasShapeFlexibility" : "0",
|
206 |
+
"isOptional" : "1",
|
207 |
+
"dataType" : "Float16",
|
208 |
+
"formattedType" : "MultiArray (Float16 1 × 32 × 128 × 448)?",
|
209 |
+
"shortDescription" : "",
|
210 |
+
"shape" : "[1, 32, 128, 448]",
|
211 |
+
"name" : "v_cache_2",
|
212 |
+
"type" : "MultiArray"
|
213 |
+
}
|
214 |
+
],
|
215 |
+
"generatedClassName" : "Llama_2_7b_hf_2024_05_25_14_03_55_chunk4",
|
216 |
+
"method" : "predict"
|
217 |
+
}
|
218 |
+
]
|
Llama-2-7b-hf_chunk4.mlmodelc/model.mil
ADDED
@@ -0,0 +1,429 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
program(1.0)
|
2 |
+
[buildInfo = dict<tensor<string, []>, tensor<string, []>>({{"coremlc-component-MIL", "5.33.5"}, {"coremlc-version", "1877.40.3"}, {"coremltools-component-torch", "2.1.0"}, {"coremltools-source-dialect", "TorchScript"}, {"coremltools-version", "7.2"}})]
|
3 |
+
{
|
4 |
+
func main<ios16>(tensor<fp16, [128, 64]> cos, tensor<fp16, [1, 32, 128, 448]> k_cache_0, tensor<fp16, [1, 32, 128, 448]> k_cache_1, tensor<fp16, [1, 32, 128, 448]> k_cache_2, tensor<fp16, [1, 1, 64, 512]> mask, tensor<fp16, [128, 64]> sin, tensor<fp16, [1, 32, 128, 448]> v_cache_0, tensor<fp16, [1, 32, 128, 448]> v_cache_1, tensor<fp16, [1, 32, 128, 448]> v_cache_2, tensor<fp16, [1, 4096, 1, 64]> x) [CoreML_InputDefaultValues = dict<tensor<string, []>, tensor<fp32, []>>({{"k_cache_0", 0}, {"k_cache_1", 0}, {"k_cache_2", 0}, {"v_cache_0", 0}, {"v_cache_1", 0}, {"v_cache_2", 0}})] {
|
5 |
+
tensor<fp16, [4096, 4096, 1, 1]> blocks_0_attn_q_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [8388608]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(64))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(8388736))), name = tensor<string, []>("blocks_0_attn_q_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 4096, 1, 1])];
|
6 |
+
tensor<fp16, [4096, 4096, 1, 1]> blocks_0_attn_k_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [8388608]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(8388864))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(16777536))), name = tensor<string, []>("blocks_0_attn_k_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 4096, 1, 1])];
|
7 |
+
tensor<fp16, [4096, 4096, 1, 1]> blocks_0_attn_v_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [8388608]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(16777664))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(25166336))), name = tensor<string, []>("blocks_0_attn_v_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 4096, 1, 1])];
|
8 |
+
tensor<fp16, [4096, 4096, 1, 1]> blocks_0_attn_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [8388608]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(25166464))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(33555136))), name = tensor<string, []>("blocks_0_attn_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 4096, 1, 1])];
|
9 |
+
tensor<fp16, [11008, 4096, 1, 1]> blocks_0_mlp_fc_1_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [22544384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(33555264))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(56099712))), name = tensor<string, []>("blocks_0_mlp_fc_1_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([11008, 4096, 1, 1])];
|
10 |
+
tensor<fp16, [11008, 4096, 1, 1]> blocks_0_mlp_fc_2_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [22544384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(56099840))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(78644288))), name = tensor<string, []>("blocks_0_mlp_fc_2_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([11008, 4096, 1, 1])];
|
11 |
+
tensor<fp16, [4096, 11008, 1, 1]> blocks_0_mlp_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [22544384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(78644416))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(101188864))), name = tensor<string, []>("blocks_0_mlp_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 11008, 1, 1])];
|
12 |
+
tensor<fp16, [4096, 4096, 1, 1]> blocks_1_attn_q_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [8388608]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(101188992))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(109577664))), name = tensor<string, []>("blocks_1_attn_q_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 4096, 1, 1])];
|
13 |
+
tensor<fp16, [4096, 4096, 1, 1]> blocks_1_attn_k_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [8388608]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(109577792))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(117966464))), name = tensor<string, []>("blocks_1_attn_k_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 4096, 1, 1])];
|
14 |
+
tensor<fp16, [4096, 4096, 1, 1]> blocks_1_attn_v_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [8388608]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(117966592))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(126355264))), name = tensor<string, []>("blocks_1_attn_v_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 4096, 1, 1])];
|
15 |
+
tensor<fp16, [4096, 4096, 1, 1]> blocks_1_attn_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [8388608]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(126355392))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(134744064))), name = tensor<string, []>("blocks_1_attn_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 4096, 1, 1])];
|
16 |
+
tensor<fp16, [11008, 4096, 1, 1]> blocks_1_mlp_fc_1_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [22544384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(134744192))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(157288640))), name = tensor<string, []>("blocks_1_mlp_fc_1_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([11008, 4096, 1, 1])];
|
17 |
+
tensor<fp16, [11008, 4096, 1, 1]> blocks_1_mlp_fc_2_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [22544384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(157288768))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(179833216))), name = tensor<string, []>("blocks_1_mlp_fc_2_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([11008, 4096, 1, 1])];
|
18 |
+
tensor<fp16, [4096, 11008, 1, 1]> blocks_1_mlp_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [22544384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(179833344))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(202377792))), name = tensor<string, []>("blocks_1_mlp_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 11008, 1, 1])];
|
19 |
+
tensor<fp16, [4096, 4096, 1, 1]> blocks_2_attn_q_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [8388608]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(202377920))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(210766592))), name = tensor<string, []>("blocks_2_attn_q_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 4096, 1, 1])];
|
20 |
+
tensor<fp16, [4096, 4096, 1, 1]> blocks_2_attn_k_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [8388608]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(210766720))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(219155392))), name = tensor<string, []>("blocks_2_attn_k_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 4096, 1, 1])];
|
21 |
+
tensor<fp16, [4096, 4096, 1, 1]> blocks_2_attn_v_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [8388608]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(219155520))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(227544192))), name = tensor<string, []>("blocks_2_attn_v_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 4096, 1, 1])];
|
22 |
+
tensor<fp16, [4096, 4096, 1, 1]> blocks_2_attn_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [8388608]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(227544320))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(235932992))), name = tensor<string, []>("blocks_2_attn_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 4096, 1, 1])];
|
23 |
+
tensor<fp16, [11008, 4096, 1, 1]> blocks_2_mlp_fc_1_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [22544384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(235933120))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(258477568))), name = tensor<string, []>("blocks_2_mlp_fc_1_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([11008, 4096, 1, 1])];
|
24 |
+
tensor<fp16, [11008, 4096, 1, 1]> blocks_2_mlp_fc_2_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [22544384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(258477696))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(281022144))), name = tensor<string, []>("blocks_2_mlp_fc_2_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([11008, 4096, 1, 1])];
|
25 |
+
tensor<fp16, [4096, 11008, 1, 1]> blocks_2_mlp_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [22544384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(281022272))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303566720))), name = tensor<string, []>("blocks_2_mlp_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 11008, 1, 1])];
|
26 |
+
tensor<int32, []> var_18 = const()[name = tensor<string, []>("op_18"), val = tensor<int32, []>(3)];
|
27 |
+
tensor<int32, []> var_23 = const()[name = tensor<string, []>("op_23"), val = tensor<int32, []>(-2)];
|
28 |
+
tensor<int32, []> var_25 = const()[name = tensor<string, []>("op_25"), val = tensor<int32, []>(-1)];
|
29 |
+
tensor<int32, []> var_32 = const()[name = tensor<string, []>("op_32"), val = tensor<int32, []>(1)];
|
30 |
+
tensor<bool, []> var_33 = const()[name = tensor<string, []>("op_33"), val = tensor<bool, []>(true)];
|
31 |
+
tensor<fp16, [1, 4096, 1, 64]> var_41_cast_fp16 = mul(x = x, y = x)[name = tensor<string, []>("op_41_cast_fp16")];
|
32 |
+
tensor<int32, [1]> var_42 = const()[name = tensor<string, []>("op_42"), val = tensor<int32, [1]>([1])];
|
33 |
+
tensor<fp16, [1, 1, 1, 64]> norm_x_1_cast_fp16 = reduce_mean(axes = var_42, keep_dims = var_33, x = var_41_cast_fp16)[name = tensor<string, []>("norm_x_1_cast_fp16")];
|
34 |
+
tensor<fp16, []> var_44_to_fp16 = const()[name = tensor<string, []>("op_44_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
|
35 |
+
tensor<fp16, [1, 1, 1, 64]> var_45_cast_fp16 = add(x = norm_x_1_cast_fp16, y = var_44_to_fp16)[name = tensor<string, []>("op_45_cast_fp16")];
|
36 |
+
tensor<fp16, []> var_46_epsilon_0_to_fp16 = const()[name = tensor<string, []>("op_46_epsilon_0_to_fp16"), val = tensor<fp16, []>(0x1p-24)];
|
37 |
+
tensor<fp16, [1, 1, 1, 64]> var_46_cast_fp16 = rsqrt(epsilon = var_46_epsilon_0_to_fp16, x = var_45_cast_fp16)[name = tensor<string, []>("op_46_cast_fp16")];
|
38 |
+
tensor<fp16, [1, 4096, 1, 64]> x_normed_1_cast_fp16 = mul(x = x, y = var_46_cast_fp16)[name = tensor<string, []>("x_normed_1_cast_fp16")];
|
39 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_0_norm_1_weight_to_fp16 = const()[name = tensor<string, []>("blocks_0_norm_1_weight_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303566848)))];
|
40 |
+
tensor<fp16, [1, 4096, 1, 64]> x_5_cast_fp16 = mul(x = x_normed_1_cast_fp16, y = blocks_0_norm_1_weight_to_fp16)[name = tensor<string, []>("x_5_cast_fp16")];
|
41 |
+
tensor<int32, [2]> var_58 = const()[name = tensor<string, []>("op_58"), val = tensor<int32, [2]>([1, 1])];
|
42 |
+
tensor<int32, [2]> var_60 = const()[name = tensor<string, []>("op_60"), val = tensor<int32, [2]>([1, 1])];
|
43 |
+
tensor<string, []> var_62_pad_type_0 = const()[name = tensor<string, []>("op_62_pad_type_0"), val = tensor<string, []>("custom")];
|
44 |
+
tensor<int32, [4]> var_62_pad_0 = const()[name = tensor<string, []>("op_62_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
45 |
+
tensor<fp16, [1, 4096, 1, 64]> var_62_cast_fp16 = conv(dilations = var_60, groups = var_32, pad = var_62_pad_0, pad_type = var_62_pad_type_0, strides = var_58, weight = blocks_0_attn_q_proj_weight_palettized_cast_fp16, x = x_5_cast_fp16)[name = tensor<string, []>("op_62_cast_fp16")];
|
46 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_0_attn_q_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_0_attn_q_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303575104)))];
|
47 |
+
tensor<fp16, [1, 4096, 1, 64]> q_1_cast_fp16 = mul(x = var_62_cast_fp16, y = blocks_0_attn_q_proj_output_scales_to_fp16)[name = tensor<string, []>("q_1_cast_fp16")];
|
48 |
+
tensor<int32, [2]> var_66 = const()[name = tensor<string, []>("op_66"), val = tensor<int32, [2]>([1, 1])];
|
49 |
+
tensor<int32, [2]> var_68 = const()[name = tensor<string, []>("op_68"), val = tensor<int32, [2]>([1, 1])];
|
50 |
+
tensor<string, []> var_70_pad_type_0 = const()[name = tensor<string, []>("op_70_pad_type_0"), val = tensor<string, []>("custom")];
|
51 |
+
tensor<int32, [4]> var_70_pad_0 = const()[name = tensor<string, []>("op_70_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
52 |
+
tensor<fp16, [1, 4096, 1, 64]> var_70_cast_fp16 = conv(dilations = var_68, groups = var_32, pad = var_70_pad_0, pad_type = var_70_pad_type_0, strides = var_66, weight = blocks_0_attn_k_proj_weight_palettized_cast_fp16, x = x_5_cast_fp16)[name = tensor<string, []>("op_70_cast_fp16")];
|
53 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_0_attn_k_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_0_attn_k_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303583360)))];
|
54 |
+
tensor<fp16, [1, 4096, 1, 64]> k_1_cast_fp16 = mul(x = var_70_cast_fp16, y = blocks_0_attn_k_proj_output_scales_to_fp16)[name = tensor<string, []>("k_1_cast_fp16")];
|
55 |
+
tensor<int32, [2]> var_74 = const()[name = tensor<string, []>("op_74"), val = tensor<int32, [2]>([1, 1])];
|
56 |
+
tensor<int32, [2]> var_76 = const()[name = tensor<string, []>("op_76"), val = tensor<int32, [2]>([1, 1])];
|
57 |
+
tensor<string, []> var_78_pad_type_0 = const()[name = tensor<string, []>("op_78_pad_type_0"), val = tensor<string, []>("custom")];
|
58 |
+
tensor<int32, [4]> var_78_pad_0 = const()[name = tensor<string, []>("op_78_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
59 |
+
tensor<fp16, [1, 4096, 1, 64]> var_78_cast_fp16 = conv(dilations = var_76, groups = var_32, pad = var_78_pad_0, pad_type = var_78_pad_type_0, strides = var_74, weight = blocks_0_attn_v_proj_weight_palettized_cast_fp16, x = x_5_cast_fp16)[name = tensor<string, []>("op_78_cast_fp16")];
|
60 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_0_attn_v_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_0_attn_v_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303591616)))];
|
61 |
+
tensor<fp16, [1, 4096, 1, 64]> v_1_cast_fp16 = mul(x = var_78_cast_fp16, y = blocks_0_attn_v_proj_output_scales_to_fp16)[name = tensor<string, []>("v_1_cast_fp16")];
|
62 |
+
tensor<int32, [4]> var_80 = const()[name = tensor<string, []>("op_80"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
63 |
+
tensor<fp16, [1, 32, 128, 64]> q_3_cast_fp16 = reshape(shape = var_80, x = q_1_cast_fp16)[name = tensor<string, []>("q_3_cast_fp16")];
|
64 |
+
tensor<int32, [4]> var_82 = const()[name = tensor<string, []>("op_82"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
65 |
+
tensor<fp16, [1, 32, 128, 64]> k_3_cast_fp16 = reshape(shape = var_82, x = k_1_cast_fp16)[name = tensor<string, []>("k_3_cast_fp16")];
|
66 |
+
tensor<int32, [4]> var_84 = const()[name = tensor<string, []>("op_84"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
67 |
+
tensor<fp16, [1, 32, 128, 64]> new_v_cache_0 = reshape(shape = var_84, x = v_1_cast_fp16)[name = tensor<string, []>("v_3_cast_fp16")];
|
68 |
+
tensor<int32, [4]> var_96_begin_0 = const()[name = tensor<string, []>("op_96_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
69 |
+
tensor<int32, [4]> var_96_end_0 = const()[name = tensor<string, []>("op_96_end_0"), val = tensor<int32, [4]>([1, 32, 64, 64])];
|
70 |
+
tensor<bool, [4]> var_96_end_mask_0 = const()[name = tensor<string, []>("op_96_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
|
71 |
+
tensor<fp16, [1, 32, 64, 64]> var_96_cast_fp16 = slice_by_index(begin = var_96_begin_0, end = var_96_end_0, end_mask = var_96_end_mask_0, x = q_3_cast_fp16)[name = tensor<string, []>("op_96_cast_fp16")];
|
72 |
+
tensor<int32, [4]> var_102_begin_0 = const()[name = tensor<string, []>("op_102_begin_0"), val = tensor<int32, [4]>([0, 0, 64, 0])];
|
73 |
+
tensor<int32, [4]> var_102_end_0 = const()[name = tensor<string, []>("op_102_end_0"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
74 |
+
tensor<bool, [4]> var_102_end_mask_0 = const()[name = tensor<string, []>("op_102_end_mask_0"), val = tensor<bool, [4]>([true, true, true, true])];
|
75 |
+
tensor<fp16, [1, 32, 64, 64]> var_102_cast_fp16 = slice_by_index(begin = var_102_begin_0, end = var_102_end_0, end_mask = var_102_end_mask_0, x = q_3_cast_fp16)[name = tensor<string, []>("op_102_cast_fp16")];
|
76 |
+
tensor<fp16, []> const_3_promoted_to_fp16 = const()[name = tensor<string, []>("const_3_promoted_to_fp16"), val = tensor<fp16, []>(-0x1p+0)];
|
77 |
+
tensor<fp16, [1, 32, 64, 64]> var_104_cast_fp16 = mul(x = var_102_cast_fp16, y = const_3_promoted_to_fp16)[name = tensor<string, []>("op_104_cast_fp16")];
|
78 |
+
tensor<bool, []> rotated_1_interleave_0 = const()[name = tensor<string, []>("rotated_1_interleave_0"), val = tensor<bool, []>(false)];
|
79 |
+
tensor<fp16, [1, 32, 128, 64]> rotated_1_cast_fp16 = concat(axis = var_23, interleave = rotated_1_interleave_0, values = (var_104_cast_fp16, var_96_cast_fp16))[name = tensor<string, []>("rotated_1_cast_fp16")];
|
80 |
+
tensor<fp16, [1, 32, 128, 64]> var_107_cast_fp16 = mul(x = q_3_cast_fp16, y = cos)[name = tensor<string, []>("op_107_cast_fp16")];
|
81 |
+
tensor<fp16, [1, 32, 128, 64]> var_108_cast_fp16 = mul(x = rotated_1_cast_fp16, y = sin)[name = tensor<string, []>("op_108_cast_fp16")];
|
82 |
+
tensor<fp16, [1, 32, 128, 64]> roped_1_cast_fp16 = add(x = var_107_cast_fp16, y = var_108_cast_fp16)[name = tensor<string, []>("roped_1_cast_fp16")];
|
83 |
+
tensor<int32, [4]> var_121_begin_0 = const()[name = tensor<string, []>("op_121_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
84 |
+
tensor<int32, [4]> var_121_end_0 = const()[name = tensor<string, []>("op_121_end_0"), val = tensor<int32, [4]>([1, 32, 64, 64])];
|
85 |
+
tensor<bool, [4]> var_121_end_mask_0 = const()[name = tensor<string, []>("op_121_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
|
86 |
+
tensor<fp16, [1, 32, 64, 64]> var_121_cast_fp16 = slice_by_index(begin = var_121_begin_0, end = var_121_end_0, end_mask = var_121_end_mask_0, x = k_3_cast_fp16)[name = tensor<string, []>("op_121_cast_fp16")];
|
87 |
+
tensor<int32, [4]> var_127_begin_0 = const()[name = tensor<string, []>("op_127_begin_0"), val = tensor<int32, [4]>([0, 0, 64, 0])];
|
88 |
+
tensor<int32, [4]> var_127_end_0 = const()[name = tensor<string, []>("op_127_end_0"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
89 |
+
tensor<bool, [4]> var_127_end_mask_0 = const()[name = tensor<string, []>("op_127_end_mask_0"), val = tensor<bool, [4]>([true, true, true, true])];
|
90 |
+
tensor<fp16, [1, 32, 64, 64]> var_127_cast_fp16 = slice_by_index(begin = var_127_begin_0, end = var_127_end_0, end_mask = var_127_end_mask_0, x = k_3_cast_fp16)[name = tensor<string, []>("op_127_cast_fp16")];
|
91 |
+
tensor<fp16, []> const_5_promoted_to_fp16 = const()[name = tensor<string, []>("const_5_promoted_to_fp16"), val = tensor<fp16, []>(-0x1p+0)];
|
92 |
+
tensor<fp16, [1, 32, 64, 64]> var_129_cast_fp16 = mul(x = var_127_cast_fp16, y = const_5_promoted_to_fp16)[name = tensor<string, []>("op_129_cast_fp16")];
|
93 |
+
tensor<bool, []> rotated_3_interleave_0 = const()[name = tensor<string, []>("rotated_3_interleave_0"), val = tensor<bool, []>(false)];
|
94 |
+
tensor<fp16, [1, 32, 128, 64]> rotated_3_cast_fp16 = concat(axis = var_23, interleave = rotated_3_interleave_0, values = (var_129_cast_fp16, var_121_cast_fp16))[name = tensor<string, []>("rotated_3_cast_fp16")];
|
95 |
+
tensor<fp16, [1, 32, 128, 64]> var_132_cast_fp16 = mul(x = k_3_cast_fp16, y = cos)[name = tensor<string, []>("op_132_cast_fp16")];
|
96 |
+
tensor<fp16, [1, 32, 128, 64]> var_133_cast_fp16 = mul(x = rotated_3_cast_fp16, y = sin)[name = tensor<string, []>("op_133_cast_fp16")];
|
97 |
+
tensor<fp16, [1, 32, 128, 64]> roped_3_cast_fp16 = add(x = var_132_cast_fp16, y = var_133_cast_fp16)[name = tensor<string, []>("roped_3_cast_fp16")];
|
98 |
+
tensor<bool, []> q_5_interleave_0 = const()[name = tensor<string, []>("q_5_interleave_0"), val = tensor<bool, []>(false)];
|
99 |
+
tensor<fp16, [1, 32, 128, 64]> q_5_cast_fp16 = concat(axis = var_23, interleave = q_5_interleave_0, values = roped_1_cast_fp16)[name = tensor<string, []>("q_5_cast_fp16")];
|
100 |
+
tensor<bool, []> k_5_interleave_0 = const()[name = tensor<string, []>("k_5_interleave_0"), val = tensor<bool, []>(false)];
|
101 |
+
tensor<fp16, [1, 32, 128, 64]> new_k_cache_0 = concat(axis = var_23, interleave = k_5_interleave_0, values = roped_3_cast_fp16)[name = tensor<string, []>("k_5_cast_fp16")];
|
102 |
+
tensor<bool, []> k_7_interleave_0 = const()[name = tensor<string, []>("k_7_interleave_0"), val = tensor<bool, []>(false)];
|
103 |
+
tensor<fp16, [1, 32, 128, 512]> k_7_cast_fp16 = concat(axis = var_25, interleave = k_7_interleave_0, values = (k_cache_0, new_k_cache_0))[name = tensor<string, []>("k_7_cast_fp16")];
|
104 |
+
tensor<bool, []> v_5_interleave_0 = const()[name = tensor<string, []>("v_5_interleave_0"), val = tensor<bool, []>(false)];
|
105 |
+
tensor<fp16, [1, 32, 128, 512]> v_5_cast_fp16 = concat(axis = var_25, interleave = v_5_interleave_0, values = (v_cache_0, new_v_cache_0))[name = tensor<string, []>("v_5_cast_fp16")];
|
106 |
+
tensor<fp16, []> var_155_to_fp16 = const()[name = tensor<string, []>("op_155_to_fp16"), val = tensor<fp16, []>(0x1.6ap-4)];
|
107 |
+
tensor<fp16, [1, 32, 128, 64]> var_156_cast_fp16 = mul(x = q_5_cast_fp16, y = var_155_to_fp16)[name = tensor<string, []>("op_156_cast_fp16")];
|
108 |
+
tensor<bool, []> attn_weights_1_transpose_x_0 = const()[name = tensor<string, []>("attn_weights_1_transpose_x_0"), val = tensor<bool, []>(true)];
|
109 |
+
tensor<bool, []> attn_weights_1_transpose_y_0 = const()[name = tensor<string, []>("attn_weights_1_transpose_y_0"), val = tensor<bool, []>(false)];
|
110 |
+
tensor<fp16, [1, 32, 64, 512]> attn_weights_1_cast_fp16 = matmul(transpose_x = attn_weights_1_transpose_x_0, transpose_y = attn_weights_1_transpose_y_0, x = var_156_cast_fp16, y = k_7_cast_fp16)[name = tensor<string, []>("attn_weights_1_cast_fp16")];
|
111 |
+
tensor<fp16, [1, 32, 64, 512]> attn_weights_3_cast_fp16 = add(x = attn_weights_1_cast_fp16, y = mask)[name = tensor<string, []>("attn_weights_3_cast_fp16")];
|
112 |
+
tensor<fp16, [1, 32, 64, 512]> var_164_cast_fp16 = softmax(axis = var_18, x = attn_weights_3_cast_fp16)[name = tensor<string, []>("op_164_cast_fp16")];
|
113 |
+
tensor<bool, []> attn_1_transpose_x_0 = const()[name = tensor<string, []>("attn_1_transpose_x_0"), val = tensor<bool, []>(false)];
|
114 |
+
tensor<bool, []> attn_1_transpose_y_0 = const()[name = tensor<string, []>("attn_1_transpose_y_0"), val = tensor<bool, []>(true)];
|
115 |
+
tensor<fp16, [1, 32, 128, 64]> attn_1_cast_fp16 = matmul(transpose_x = attn_1_transpose_x_0, transpose_y = attn_1_transpose_y_0, x = v_5_cast_fp16, y = var_164_cast_fp16)[name = tensor<string, []>("attn_1_cast_fp16")];
|
116 |
+
tensor<int32, [4]> var_168 = const()[name = tensor<string, []>("op_168"), val = tensor<int32, [4]>([1, 4096, 1, -1])];
|
117 |
+
tensor<fp16, [1, 4096, 1, 64]> input_1_cast_fp16 = reshape(shape = var_168, x = attn_1_cast_fp16)[name = tensor<string, []>("input_1_cast_fp16")];
|
118 |
+
tensor<int32, [2]> var_172 = const()[name = tensor<string, []>("op_172"), val = tensor<int32, [2]>([1, 1])];
|
119 |
+
tensor<int32, [2]> var_174 = const()[name = tensor<string, []>("op_174"), val = tensor<int32, [2]>([1, 1])];
|
120 |
+
tensor<string, []> var_176_pad_type_0 = const()[name = tensor<string, []>("op_176_pad_type_0"), val = tensor<string, []>("custom")];
|
121 |
+
tensor<int32, [4]> var_176_pad_0 = const()[name = tensor<string, []>("op_176_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
122 |
+
tensor<fp16, [1, 4096, 1, 64]> var_176_cast_fp16 = conv(dilations = var_174, groups = var_32, pad = var_176_pad_0, pad_type = var_176_pad_type_0, strides = var_172, weight = blocks_0_attn_proj_weight_palettized_cast_fp16, x = input_1_cast_fp16)[name = tensor<string, []>("op_176_cast_fp16")];
|
123 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_0_attn_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_0_attn_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303599872)))];
|
124 |
+
tensor<fp16, [1, 4096, 1, 64]> attention_output_1_cast_fp16 = mul(x = var_176_cast_fp16, y = blocks_0_attn_proj_output_scales_to_fp16)[name = tensor<string, []>("attention_output_1_cast_fp16")];
|
125 |
+
tensor<fp16, [1, 4096, 1, 64]> x_11_cast_fp16 = add(x = attention_output_1_cast_fp16, y = x)[name = tensor<string, []>("x_11_cast_fp16")];
|
126 |
+
tensor<fp16, [1, 4096, 1, 64]> var_185_cast_fp16 = mul(x = x_11_cast_fp16, y = x_11_cast_fp16)[name = tensor<string, []>("op_185_cast_fp16")];
|
127 |
+
tensor<int32, [1]> var_186 = const()[name = tensor<string, []>("op_186"), val = tensor<int32, [1]>([1])];
|
128 |
+
tensor<fp16, [1, 1, 1, 64]> norm_x_3_cast_fp16 = reduce_mean(axes = var_186, keep_dims = var_33, x = var_185_cast_fp16)[name = tensor<string, []>("norm_x_3_cast_fp16")];
|
129 |
+
tensor<fp16, []> var_188_to_fp16 = const()[name = tensor<string, []>("op_188_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
|
130 |
+
tensor<fp16, [1, 1, 1, 64]> var_189_cast_fp16 = add(x = norm_x_3_cast_fp16, y = var_188_to_fp16)[name = tensor<string, []>("op_189_cast_fp16")];
|
131 |
+
tensor<fp16, []> var_190_epsilon_0_to_fp16 = const()[name = tensor<string, []>("op_190_epsilon_0_to_fp16"), val = tensor<fp16, []>(0x1p-24)];
|
132 |
+
tensor<fp16, [1, 1, 1, 64]> var_190_cast_fp16 = rsqrt(epsilon = var_190_epsilon_0_to_fp16, x = var_189_cast_fp16)[name = tensor<string, []>("op_190_cast_fp16")];
|
133 |
+
tensor<fp16, [1, 4096, 1, 64]> x_normed_5_cast_fp16 = mul(x = x_11_cast_fp16, y = var_190_cast_fp16)[name = tensor<string, []>("x_normed_5_cast_fp16")];
|
134 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_0_norm_2_weight_to_fp16 = const()[name = tensor<string, []>("blocks_0_norm_2_weight_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303608128)))];
|
135 |
+
tensor<fp16, [1, 4096, 1, 64]> input_3_cast_fp16 = mul(x = x_normed_5_cast_fp16, y = blocks_0_norm_2_weight_to_fp16)[name = tensor<string, []>("input_3_cast_fp16")];
|
136 |
+
tensor<int32, [2]> var_202 = const()[name = tensor<string, []>("op_202"), val = tensor<int32, [2]>([1, 1])];
|
137 |
+
tensor<int32, [2]> var_204 = const()[name = tensor<string, []>("op_204"), val = tensor<int32, [2]>([1, 1])];
|
138 |
+
tensor<string, []> var_206_pad_type_0 = const()[name = tensor<string, []>("op_206_pad_type_0"), val = tensor<string, []>("custom")];
|
139 |
+
tensor<int32, [4]> var_206_pad_0 = const()[name = tensor<string, []>("op_206_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
140 |
+
tensor<fp16, [1, 11008, 1, 64]> var_206_cast_fp16 = conv(dilations = var_204, groups = var_32, pad = var_206_pad_0, pad_type = var_206_pad_type_0, strides = var_202, weight = blocks_0_mlp_fc_1_weight_palettized_cast_fp16, x = input_3_cast_fp16)[name = tensor<string, []>("op_206_cast_fp16")];
|
141 |
+
tensor<fp16, [1, 11008, 1, 1]> blocks_0_mlp_fc_1_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_0_mlp_fc_1_output_scales_to_fp16"), val = tensor<fp16, [1, 11008, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303616384)))];
|
142 |
+
tensor<fp16, [1, 11008, 1, 64]> input_5_cast_fp16 = mul(x = var_206_cast_fp16, y = blocks_0_mlp_fc_1_output_scales_to_fp16)[name = tensor<string, []>("input_5_cast_fp16")];
|
143 |
+
tensor<int32, [2]> var_210 = const()[name = tensor<string, []>("op_210"), val = tensor<int32, [2]>([1, 1])];
|
144 |
+
tensor<int32, [2]> var_212 = const()[name = tensor<string, []>("op_212"), val = tensor<int32, [2]>([1, 1])];
|
145 |
+
tensor<string, []> var_214_pad_type_0 = const()[name = tensor<string, []>("op_214_pad_type_0"), val = tensor<string, []>("custom")];
|
146 |
+
tensor<int32, [4]> var_214_pad_0 = const()[name = tensor<string, []>("op_214_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
147 |
+
tensor<fp16, [1, 11008, 1, 64]> var_214_cast_fp16 = conv(dilations = var_212, groups = var_32, pad = var_214_pad_0, pad_type = var_214_pad_type_0, strides = var_210, weight = blocks_0_mlp_fc_2_weight_palettized_cast_fp16, x = input_3_cast_fp16)[name = tensor<string, []>("op_214_cast_fp16")];
|
148 |
+
tensor<fp16, [1, 11008, 1, 1]> blocks_0_mlp_fc_2_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_0_mlp_fc_2_output_scales_to_fp16"), val = tensor<fp16, [1, 11008, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303638464)))];
|
149 |
+
tensor<fp16, [1, 11008, 1, 64]> x_fc_2_1_cast_fp16 = mul(x = var_214_cast_fp16, y = blocks_0_mlp_fc_2_output_scales_to_fp16)[name = tensor<string, []>("x_fc_2_1_cast_fp16")];
|
150 |
+
tensor<fp16, [1, 11008, 1, 64]> var_216_cast_fp16 = silu(x = input_5_cast_fp16)[name = tensor<string, []>("op_216_cast_fp16")];
|
151 |
+
tensor<fp16, [1, 11008, 1, 64]> input_7_cast_fp16 = mul(x = var_216_cast_fp16, y = x_fc_2_1_cast_fp16)[name = tensor<string, []>("input_7_cast_fp16")];
|
152 |
+
tensor<int32, [2]> var_220 = const()[name = tensor<string, []>("op_220"), val = tensor<int32, [2]>([1, 1])];
|
153 |
+
tensor<int32, [2]> var_222 = const()[name = tensor<string, []>("op_222"), val = tensor<int32, [2]>([1, 1])];
|
154 |
+
tensor<string, []> var_224_pad_type_0 = const()[name = tensor<string, []>("op_224_pad_type_0"), val = tensor<string, []>("custom")];
|
155 |
+
tensor<int32, [4]> var_224_pad_0 = const()[name = tensor<string, []>("op_224_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
156 |
+
tensor<fp16, [1, 4096, 1, 64]> var_224_cast_fp16 = conv(dilations = var_222, groups = var_32, pad = var_224_pad_0, pad_type = var_224_pad_type_0, strides = var_220, weight = blocks_0_mlp_proj_weight_palettized_cast_fp16, x = input_7_cast_fp16)[name = tensor<string, []>("op_224_cast_fp16")];
|
157 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_0_mlp_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_0_mlp_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303660544)))];
|
158 |
+
tensor<fp16, [1, 4096, 1, 64]> var_225_cast_fp16 = mul(x = var_224_cast_fp16, y = blocks_0_mlp_proj_output_scales_to_fp16)[name = tensor<string, []>("op_225_cast_fp16")];
|
159 |
+
tensor<fp16, [1, 4096, 1, 64]> x_15_cast_fp16 = add(x = var_225_cast_fp16, y = x_11_cast_fp16)[name = tensor<string, []>("x_15_cast_fp16")];
|
160 |
+
tensor<int32, []> var_232 = const()[name = tensor<string, []>("op_232"), val = tensor<int32, []>(3)];
|
161 |
+
tensor<int32, []> var_237 = const()[name = tensor<string, []>("op_237"), val = tensor<int32, []>(-2)];
|
162 |
+
tensor<int32, []> var_239 = const()[name = tensor<string, []>("op_239"), val = tensor<int32, []>(-1)];
|
163 |
+
tensor<int32, []> var_246 = const()[name = tensor<string, []>("op_246"), val = tensor<int32, []>(1)];
|
164 |
+
tensor<bool, []> var_247 = const()[name = tensor<string, []>("op_247"), val = tensor<bool, []>(true)];
|
165 |
+
tensor<fp16, [1, 4096, 1, 64]> var_254_cast_fp16 = mul(x = x_15_cast_fp16, y = x_15_cast_fp16)[name = tensor<string, []>("op_254_cast_fp16")];
|
166 |
+
tensor<int32, [1]> var_255 = const()[name = tensor<string, []>("op_255"), val = tensor<int32, [1]>([1])];
|
167 |
+
tensor<fp16, [1, 1, 1, 64]> norm_x_5_cast_fp16 = reduce_mean(axes = var_255, keep_dims = var_247, x = var_254_cast_fp16)[name = tensor<string, []>("norm_x_5_cast_fp16")];
|
168 |
+
tensor<fp16, []> var_257_to_fp16 = const()[name = tensor<string, []>("op_257_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
|
169 |
+
tensor<fp16, [1, 1, 1, 64]> var_258_cast_fp16 = add(x = norm_x_5_cast_fp16, y = var_257_to_fp16)[name = tensor<string, []>("op_258_cast_fp16")];
|
170 |
+
tensor<fp16, []> var_259_epsilon_0_to_fp16 = const()[name = tensor<string, []>("op_259_epsilon_0_to_fp16"), val = tensor<fp16, []>(0x1p-24)];
|
171 |
+
tensor<fp16, [1, 1, 1, 64]> var_259_cast_fp16 = rsqrt(epsilon = var_259_epsilon_0_to_fp16, x = var_258_cast_fp16)[name = tensor<string, []>("op_259_cast_fp16")];
|
172 |
+
tensor<fp16, [1, 4096, 1, 64]> x_normed_9_cast_fp16 = mul(x = x_15_cast_fp16, y = var_259_cast_fp16)[name = tensor<string, []>("x_normed_9_cast_fp16")];
|
173 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_1_norm_1_weight_to_fp16 = const()[name = tensor<string, []>("blocks_1_norm_1_weight_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303668800)))];
|
174 |
+
tensor<fp16, [1, 4096, 1, 64]> x_19_cast_fp16 = mul(x = x_normed_9_cast_fp16, y = blocks_1_norm_1_weight_to_fp16)[name = tensor<string, []>("x_19_cast_fp16")];
|
175 |
+
tensor<int32, [2]> var_274 = const()[name = tensor<string, []>("op_274"), val = tensor<int32, [2]>([1, 1])];
|
176 |
+
tensor<int32, [2]> var_276 = const()[name = tensor<string, []>("op_276"), val = tensor<int32, [2]>([1, 1])];
|
177 |
+
tensor<string, []> var_278_pad_type_0 = const()[name = tensor<string, []>("op_278_pad_type_0"), val = tensor<string, []>("custom")];
|
178 |
+
tensor<int32, [4]> var_278_pad_0 = const()[name = tensor<string, []>("op_278_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
179 |
+
tensor<fp16, [1, 4096, 1, 64]> var_278_cast_fp16 = conv(dilations = var_276, groups = var_246, pad = var_278_pad_0, pad_type = var_278_pad_type_0, strides = var_274, weight = blocks_1_attn_q_proj_weight_palettized_cast_fp16, x = x_19_cast_fp16)[name = tensor<string, []>("op_278_cast_fp16")];
|
180 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_1_attn_q_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_1_attn_q_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303677056)))];
|
181 |
+
tensor<fp16, [1, 4096, 1, 64]> q_7_cast_fp16 = mul(x = var_278_cast_fp16, y = blocks_1_attn_q_proj_output_scales_to_fp16)[name = tensor<string, []>("q_7_cast_fp16")];
|
182 |
+
tensor<int32, [2]> var_282 = const()[name = tensor<string, []>("op_282"), val = tensor<int32, [2]>([1, 1])];
|
183 |
+
tensor<int32, [2]> var_284 = const()[name = tensor<string, []>("op_284"), val = tensor<int32, [2]>([1, 1])];
|
184 |
+
tensor<string, []> var_286_pad_type_0 = const()[name = tensor<string, []>("op_286_pad_type_0"), val = tensor<string, []>("custom")];
|
185 |
+
tensor<int32, [4]> var_286_pad_0 = const()[name = tensor<string, []>("op_286_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
186 |
+
tensor<fp16, [1, 4096, 1, 64]> var_286_cast_fp16 = conv(dilations = var_284, groups = var_246, pad = var_286_pad_0, pad_type = var_286_pad_type_0, strides = var_282, weight = blocks_1_attn_k_proj_weight_palettized_cast_fp16, x = x_19_cast_fp16)[name = tensor<string, []>("op_286_cast_fp16")];
|
187 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_1_attn_k_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_1_attn_k_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303685312)))];
|
188 |
+
tensor<fp16, [1, 4096, 1, 64]> k_9_cast_fp16 = mul(x = var_286_cast_fp16, y = blocks_1_attn_k_proj_output_scales_to_fp16)[name = tensor<string, []>("k_9_cast_fp16")];
|
189 |
+
tensor<int32, [2]> var_290 = const()[name = tensor<string, []>("op_290"), val = tensor<int32, [2]>([1, 1])];
|
190 |
+
tensor<int32, [2]> var_292 = const()[name = tensor<string, []>("op_292"), val = tensor<int32, [2]>([1, 1])];
|
191 |
+
tensor<string, []> var_294_pad_type_0 = const()[name = tensor<string, []>("op_294_pad_type_0"), val = tensor<string, []>("custom")];
|
192 |
+
tensor<int32, [4]> var_294_pad_0 = const()[name = tensor<string, []>("op_294_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
193 |
+
tensor<fp16, [1, 4096, 1, 64]> var_294_cast_fp16 = conv(dilations = var_292, groups = var_246, pad = var_294_pad_0, pad_type = var_294_pad_type_0, strides = var_290, weight = blocks_1_attn_v_proj_weight_palettized_cast_fp16, x = x_19_cast_fp16)[name = tensor<string, []>("op_294_cast_fp16")];
|
194 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_1_attn_v_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_1_attn_v_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303693568)))];
|
195 |
+
tensor<fp16, [1, 4096, 1, 64]> v_7_cast_fp16 = mul(x = var_294_cast_fp16, y = blocks_1_attn_v_proj_output_scales_to_fp16)[name = tensor<string, []>("v_7_cast_fp16")];
|
196 |
+
tensor<int32, [4]> var_296 = const()[name = tensor<string, []>("op_296"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
197 |
+
tensor<fp16, [1, 32, 128, 64]> q_9_cast_fp16 = reshape(shape = var_296, x = q_7_cast_fp16)[name = tensor<string, []>("q_9_cast_fp16")];
|
198 |
+
tensor<int32, [4]> var_298 = const()[name = tensor<string, []>("op_298"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
199 |
+
tensor<fp16, [1, 32, 128, 64]> k_11_cast_fp16 = reshape(shape = var_298, x = k_9_cast_fp16)[name = tensor<string, []>("k_11_cast_fp16")];
|
200 |
+
tensor<int32, [4]> var_300 = const()[name = tensor<string, []>("op_300"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
201 |
+
tensor<fp16, [1, 32, 128, 64]> new_v_cache_1 = reshape(shape = var_300, x = v_7_cast_fp16)[name = tensor<string, []>("v_9_cast_fp16")];
|
202 |
+
tensor<int32, [4]> var_312_begin_0 = const()[name = tensor<string, []>("op_312_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
203 |
+
tensor<int32, [4]> var_312_end_0 = const()[name = tensor<string, []>("op_312_end_0"), val = tensor<int32, [4]>([1, 32, 64, 64])];
|
204 |
+
tensor<bool, [4]> var_312_end_mask_0 = const()[name = tensor<string, []>("op_312_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
|
205 |
+
tensor<fp16, [1, 32, 64, 64]> var_312_cast_fp16 = slice_by_index(begin = var_312_begin_0, end = var_312_end_0, end_mask = var_312_end_mask_0, x = q_9_cast_fp16)[name = tensor<string, []>("op_312_cast_fp16")];
|
206 |
+
tensor<int32, [4]> var_318_begin_0 = const()[name = tensor<string, []>("op_318_begin_0"), val = tensor<int32, [4]>([0, 0, 64, 0])];
|
207 |
+
tensor<int32, [4]> var_318_end_0 = const()[name = tensor<string, []>("op_318_end_0"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
208 |
+
tensor<bool, [4]> var_318_end_mask_0 = const()[name = tensor<string, []>("op_318_end_mask_0"), val = tensor<bool, [4]>([true, true, true, true])];
|
209 |
+
tensor<fp16, [1, 32, 64, 64]> var_318_cast_fp16 = slice_by_index(begin = var_318_begin_0, end = var_318_end_0, end_mask = var_318_end_mask_0, x = q_9_cast_fp16)[name = tensor<string, []>("op_318_cast_fp16")];
|
210 |
+
tensor<fp16, []> const_10_promoted_to_fp16 = const()[name = tensor<string, []>("const_10_promoted_to_fp16"), val = tensor<fp16, []>(-0x1p+0)];
|
211 |
+
tensor<fp16, [1, 32, 64, 64]> var_320_cast_fp16 = mul(x = var_318_cast_fp16, y = const_10_promoted_to_fp16)[name = tensor<string, []>("op_320_cast_fp16")];
|
212 |
+
tensor<bool, []> rotated_5_interleave_0 = const()[name = tensor<string, []>("rotated_5_interleave_0"), val = tensor<bool, []>(false)];
|
213 |
+
tensor<fp16, [1, 32, 128, 64]> rotated_5_cast_fp16 = concat(axis = var_237, interleave = rotated_5_interleave_0, values = (var_320_cast_fp16, var_312_cast_fp16))[name = tensor<string, []>("rotated_5_cast_fp16")];
|
214 |
+
tensor<fp16, [1, 32, 128, 64]> var_323_cast_fp16 = mul(x = q_9_cast_fp16, y = cos)[name = tensor<string, []>("op_323_cast_fp16")];
|
215 |
+
tensor<fp16, [1, 32, 128, 64]> var_324_cast_fp16 = mul(x = rotated_5_cast_fp16, y = sin)[name = tensor<string, []>("op_324_cast_fp16")];
|
216 |
+
tensor<fp16, [1, 32, 128, 64]> roped_5_cast_fp16 = add(x = var_323_cast_fp16, y = var_324_cast_fp16)[name = tensor<string, []>("roped_5_cast_fp16")];
|
217 |
+
tensor<int32, [4]> var_337_begin_0 = const()[name = tensor<string, []>("op_337_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
218 |
+
tensor<int32, [4]> var_337_end_0 = const()[name = tensor<string, []>("op_337_end_0"), val = tensor<int32, [4]>([1, 32, 64, 64])];
|
219 |
+
tensor<bool, [4]> var_337_end_mask_0 = const()[name = tensor<string, []>("op_337_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
|
220 |
+
tensor<fp16, [1, 32, 64, 64]> var_337_cast_fp16 = slice_by_index(begin = var_337_begin_0, end = var_337_end_0, end_mask = var_337_end_mask_0, x = k_11_cast_fp16)[name = tensor<string, []>("op_337_cast_fp16")];
|
221 |
+
tensor<int32, [4]> var_343_begin_0 = const()[name = tensor<string, []>("op_343_begin_0"), val = tensor<int32, [4]>([0, 0, 64, 0])];
|
222 |
+
tensor<int32, [4]> var_343_end_0 = const()[name = tensor<string, []>("op_343_end_0"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
223 |
+
tensor<bool, [4]> var_343_end_mask_0 = const()[name = tensor<string, []>("op_343_end_mask_0"), val = tensor<bool, [4]>([true, true, true, true])];
|
224 |
+
tensor<fp16, [1, 32, 64, 64]> var_343_cast_fp16 = slice_by_index(begin = var_343_begin_0, end = var_343_end_0, end_mask = var_343_end_mask_0, x = k_11_cast_fp16)[name = tensor<string, []>("op_343_cast_fp16")];
|
225 |
+
tensor<fp16, []> const_12_promoted_to_fp16 = const()[name = tensor<string, []>("const_12_promoted_to_fp16"), val = tensor<fp16, []>(-0x1p+0)];
|
226 |
+
tensor<fp16, [1, 32, 64, 64]> var_345_cast_fp16 = mul(x = var_343_cast_fp16, y = const_12_promoted_to_fp16)[name = tensor<string, []>("op_345_cast_fp16")];
|
227 |
+
tensor<bool, []> rotated_7_interleave_0 = const()[name = tensor<string, []>("rotated_7_interleave_0"), val = tensor<bool, []>(false)];
|
228 |
+
tensor<fp16, [1, 32, 128, 64]> rotated_7_cast_fp16 = concat(axis = var_237, interleave = rotated_7_interleave_0, values = (var_345_cast_fp16, var_337_cast_fp16))[name = tensor<string, []>("rotated_7_cast_fp16")];
|
229 |
+
tensor<fp16, [1, 32, 128, 64]> var_348_cast_fp16 = mul(x = k_11_cast_fp16, y = cos)[name = tensor<string, []>("op_348_cast_fp16")];
|
230 |
+
tensor<fp16, [1, 32, 128, 64]> var_349_cast_fp16 = mul(x = rotated_7_cast_fp16, y = sin)[name = tensor<string, []>("op_349_cast_fp16")];
|
231 |
+
tensor<fp16, [1, 32, 128, 64]> roped_7_cast_fp16 = add(x = var_348_cast_fp16, y = var_349_cast_fp16)[name = tensor<string, []>("roped_7_cast_fp16")];
|
232 |
+
tensor<bool, []> q_11_interleave_0 = const()[name = tensor<string, []>("q_11_interleave_0"), val = tensor<bool, []>(false)];
|
233 |
+
tensor<fp16, [1, 32, 128, 64]> q_11_cast_fp16 = concat(axis = var_237, interleave = q_11_interleave_0, values = roped_5_cast_fp16)[name = tensor<string, []>("q_11_cast_fp16")];
|
234 |
+
tensor<bool, []> k_13_interleave_0 = const()[name = tensor<string, []>("k_13_interleave_0"), val = tensor<bool, []>(false)];
|
235 |
+
tensor<fp16, [1, 32, 128, 64]> new_k_cache_1 = concat(axis = var_237, interleave = k_13_interleave_0, values = roped_7_cast_fp16)[name = tensor<string, []>("k_13_cast_fp16")];
|
236 |
+
tensor<bool, []> k_15_interleave_0 = const()[name = tensor<string, []>("k_15_interleave_0"), val = tensor<bool, []>(false)];
|
237 |
+
tensor<fp16, [1, 32, 128, 512]> k_15_cast_fp16 = concat(axis = var_239, interleave = k_15_interleave_0, values = (k_cache_1, new_k_cache_1))[name = tensor<string, []>("k_15_cast_fp16")];
|
238 |
+
tensor<bool, []> v_11_interleave_0 = const()[name = tensor<string, []>("v_11_interleave_0"), val = tensor<bool, []>(false)];
|
239 |
+
tensor<fp16, [1, 32, 128, 512]> v_11_cast_fp16 = concat(axis = var_239, interleave = v_11_interleave_0, values = (v_cache_1, new_v_cache_1))[name = tensor<string, []>("v_11_cast_fp16")];
|
240 |
+
tensor<fp16, []> var_371_to_fp16 = const()[name = tensor<string, []>("op_371_to_fp16"), val = tensor<fp16, []>(0x1.6ap-4)];
|
241 |
+
tensor<fp16, [1, 32, 128, 64]> var_372_cast_fp16 = mul(x = q_11_cast_fp16, y = var_371_to_fp16)[name = tensor<string, []>("op_372_cast_fp16")];
|
242 |
+
tensor<bool, []> attn_weights_5_transpose_x_0 = const()[name = tensor<string, []>("attn_weights_5_transpose_x_0"), val = tensor<bool, []>(true)];
|
243 |
+
tensor<bool, []> attn_weights_5_transpose_y_0 = const()[name = tensor<string, []>("attn_weights_5_transpose_y_0"), val = tensor<bool, []>(false)];
|
244 |
+
tensor<fp16, [1, 32, 64, 512]> attn_weights_5_cast_fp16 = matmul(transpose_x = attn_weights_5_transpose_x_0, transpose_y = attn_weights_5_transpose_y_0, x = var_372_cast_fp16, y = k_15_cast_fp16)[name = tensor<string, []>("attn_weights_5_cast_fp16")];
|
245 |
+
tensor<fp16, [1, 32, 64, 512]> attn_weights_7_cast_fp16 = add(x = attn_weights_5_cast_fp16, y = mask)[name = tensor<string, []>("attn_weights_7_cast_fp16")];
|
246 |
+
tensor<fp16, [1, 32, 64, 512]> var_380_cast_fp16 = softmax(axis = var_232, x = attn_weights_7_cast_fp16)[name = tensor<string, []>("op_380_cast_fp16")];
|
247 |
+
tensor<bool, []> attn_3_transpose_x_0 = const()[name = tensor<string, []>("attn_3_transpose_x_0"), val = tensor<bool, []>(false)];
|
248 |
+
tensor<bool, []> attn_3_transpose_y_0 = const()[name = tensor<string, []>("attn_3_transpose_y_0"), val = tensor<bool, []>(true)];
|
249 |
+
tensor<fp16, [1, 32, 128, 64]> attn_3_cast_fp16 = matmul(transpose_x = attn_3_transpose_x_0, transpose_y = attn_3_transpose_y_0, x = v_11_cast_fp16, y = var_380_cast_fp16)[name = tensor<string, []>("attn_3_cast_fp16")];
|
250 |
+
tensor<int32, [4]> var_384 = const()[name = tensor<string, []>("op_384"), val = tensor<int32, [4]>([1, 4096, 1, -1])];
|
251 |
+
tensor<fp16, [1, 4096, 1, 64]> input_9_cast_fp16 = reshape(shape = var_384, x = attn_3_cast_fp16)[name = tensor<string, []>("input_9_cast_fp16")];
|
252 |
+
tensor<int32, [2]> var_388 = const()[name = tensor<string, []>("op_388"), val = tensor<int32, [2]>([1, 1])];
|
253 |
+
tensor<int32, [2]> var_390 = const()[name = tensor<string, []>("op_390"), val = tensor<int32, [2]>([1, 1])];
|
254 |
+
tensor<string, []> var_392_pad_type_0 = const()[name = tensor<string, []>("op_392_pad_type_0"), val = tensor<string, []>("custom")];
|
255 |
+
tensor<int32, [4]> var_392_pad_0 = const()[name = tensor<string, []>("op_392_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
256 |
+
tensor<fp16, [1, 4096, 1, 64]> var_392_cast_fp16 = conv(dilations = var_390, groups = var_246, pad = var_392_pad_0, pad_type = var_392_pad_type_0, strides = var_388, weight = blocks_1_attn_proj_weight_palettized_cast_fp16, x = input_9_cast_fp16)[name = tensor<string, []>("op_392_cast_fp16")];
|
257 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_1_attn_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_1_attn_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303701824)))];
|
258 |
+
tensor<fp16, [1, 4096, 1, 64]> attention_output_3_cast_fp16 = mul(x = var_392_cast_fp16, y = blocks_1_attn_proj_output_scales_to_fp16)[name = tensor<string, []>("attention_output_3_cast_fp16")];
|
259 |
+
tensor<fp16, [1, 4096, 1, 64]> x_25_cast_fp16 = add(x = attention_output_3_cast_fp16, y = x_15_cast_fp16)[name = tensor<string, []>("x_25_cast_fp16")];
|
260 |
+
tensor<fp16, [1, 4096, 1, 64]> var_401_cast_fp16 = mul(x = x_25_cast_fp16, y = x_25_cast_fp16)[name = tensor<string, []>("op_401_cast_fp16")];
|
261 |
+
tensor<int32, [1]> var_402 = const()[name = tensor<string, []>("op_402"), val = tensor<int32, [1]>([1])];
|
262 |
+
tensor<fp16, [1, 1, 1, 64]> norm_x_7_cast_fp16 = reduce_mean(axes = var_402, keep_dims = var_247, x = var_401_cast_fp16)[name = tensor<string, []>("norm_x_7_cast_fp16")];
|
263 |
+
tensor<fp16, []> var_404_to_fp16 = const()[name = tensor<string, []>("op_404_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
|
264 |
+
tensor<fp16, [1, 1, 1, 64]> var_405_cast_fp16 = add(x = norm_x_7_cast_fp16, y = var_404_to_fp16)[name = tensor<string, []>("op_405_cast_fp16")];
|
265 |
+
tensor<fp16, []> var_406_epsilon_0_to_fp16 = const()[name = tensor<string, []>("op_406_epsilon_0_to_fp16"), val = tensor<fp16, []>(0x1p-24)];
|
266 |
+
tensor<fp16, [1, 1, 1, 64]> var_406_cast_fp16 = rsqrt(epsilon = var_406_epsilon_0_to_fp16, x = var_405_cast_fp16)[name = tensor<string, []>("op_406_cast_fp16")];
|
267 |
+
tensor<fp16, [1, 4096, 1, 64]> x_normed_13_cast_fp16 = mul(x = x_25_cast_fp16, y = var_406_cast_fp16)[name = tensor<string, []>("x_normed_13_cast_fp16")];
|
268 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_1_norm_2_weight_to_fp16 = const()[name = tensor<string, []>("blocks_1_norm_2_weight_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303710080)))];
|
269 |
+
tensor<fp16, [1, 4096, 1, 64]> input_11_cast_fp16 = mul(x = x_normed_13_cast_fp16, y = blocks_1_norm_2_weight_to_fp16)[name = tensor<string, []>("input_11_cast_fp16")];
|
270 |
+
tensor<int32, [2]> var_418 = const()[name = tensor<string, []>("op_418"), val = tensor<int32, [2]>([1, 1])];
|
271 |
+
tensor<int32, [2]> var_420 = const()[name = tensor<string, []>("op_420"), val = tensor<int32, [2]>([1, 1])];
|
272 |
+
tensor<string, []> var_422_pad_type_0 = const()[name = tensor<string, []>("op_422_pad_type_0"), val = tensor<string, []>("custom")];
|
273 |
+
tensor<int32, [4]> var_422_pad_0 = const()[name = tensor<string, []>("op_422_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
274 |
+
tensor<fp16, [1, 11008, 1, 64]> var_422_cast_fp16 = conv(dilations = var_420, groups = var_246, pad = var_422_pad_0, pad_type = var_422_pad_type_0, strides = var_418, weight = blocks_1_mlp_fc_1_weight_palettized_cast_fp16, x = input_11_cast_fp16)[name = tensor<string, []>("op_422_cast_fp16")];
|
275 |
+
tensor<fp16, [1, 11008, 1, 1]> blocks_1_mlp_fc_1_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_1_mlp_fc_1_output_scales_to_fp16"), val = tensor<fp16, [1, 11008, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303718336)))];
|
276 |
+
tensor<fp16, [1, 11008, 1, 64]> input_13_cast_fp16 = mul(x = var_422_cast_fp16, y = blocks_1_mlp_fc_1_output_scales_to_fp16)[name = tensor<string, []>("input_13_cast_fp16")];
|
277 |
+
tensor<int32, [2]> var_426 = const()[name = tensor<string, []>("op_426"), val = tensor<int32, [2]>([1, 1])];
|
278 |
+
tensor<int32, [2]> var_428 = const()[name = tensor<string, []>("op_428"), val = tensor<int32, [2]>([1, 1])];
|
279 |
+
tensor<string, []> var_430_pad_type_0 = const()[name = tensor<string, []>("op_430_pad_type_0"), val = tensor<string, []>("custom")];
|
280 |
+
tensor<int32, [4]> var_430_pad_0 = const()[name = tensor<string, []>("op_430_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
281 |
+
tensor<fp16, [1, 11008, 1, 64]> var_430_cast_fp16 = conv(dilations = var_428, groups = var_246, pad = var_430_pad_0, pad_type = var_430_pad_type_0, strides = var_426, weight = blocks_1_mlp_fc_2_weight_palettized_cast_fp16, x = input_11_cast_fp16)[name = tensor<string, []>("op_430_cast_fp16")];
|
282 |
+
tensor<fp16, [1, 11008, 1, 1]> blocks_1_mlp_fc_2_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_1_mlp_fc_2_output_scales_to_fp16"), val = tensor<fp16, [1, 11008, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303740416)))];
|
283 |
+
tensor<fp16, [1, 11008, 1, 64]> x_fc_2_3_cast_fp16 = mul(x = var_430_cast_fp16, y = blocks_1_mlp_fc_2_output_scales_to_fp16)[name = tensor<string, []>("x_fc_2_3_cast_fp16")];
|
284 |
+
tensor<fp16, [1, 11008, 1, 64]> var_432_cast_fp16 = silu(x = input_13_cast_fp16)[name = tensor<string, []>("op_432_cast_fp16")];
|
285 |
+
tensor<fp16, [1, 11008, 1, 64]> input_15_cast_fp16 = mul(x = var_432_cast_fp16, y = x_fc_2_3_cast_fp16)[name = tensor<string, []>("input_15_cast_fp16")];
|
286 |
+
tensor<int32, [2]> var_436 = const()[name = tensor<string, []>("op_436"), val = tensor<int32, [2]>([1, 1])];
|
287 |
+
tensor<int32, [2]> var_438 = const()[name = tensor<string, []>("op_438"), val = tensor<int32, [2]>([1, 1])];
|
288 |
+
tensor<string, []> var_440_pad_type_0 = const()[name = tensor<string, []>("op_440_pad_type_0"), val = tensor<string, []>("custom")];
|
289 |
+
tensor<int32, [4]> var_440_pad_0 = const()[name = tensor<string, []>("op_440_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
290 |
+
tensor<fp16, [1, 4096, 1, 64]> var_440_cast_fp16 = conv(dilations = var_438, groups = var_246, pad = var_440_pad_0, pad_type = var_440_pad_type_0, strides = var_436, weight = blocks_1_mlp_proj_weight_palettized_cast_fp16, x = input_15_cast_fp16)[name = tensor<string, []>("op_440_cast_fp16")];
|
291 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_1_mlp_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_1_mlp_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303762496)))];
|
292 |
+
tensor<fp16, [1, 4096, 1, 64]> var_441_cast_fp16 = mul(x = var_440_cast_fp16, y = blocks_1_mlp_proj_output_scales_to_fp16)[name = tensor<string, []>("op_441_cast_fp16")];
|
293 |
+
tensor<fp16, [1, 4096, 1, 64]> x_29_cast_fp16 = add(x = var_441_cast_fp16, y = x_25_cast_fp16)[name = tensor<string, []>("x_29_cast_fp16")];
|
294 |
+
tensor<int32, []> var_448 = const()[name = tensor<string, []>("op_448"), val = tensor<int32, []>(3)];
|
295 |
+
tensor<int32, []> var_453 = const()[name = tensor<string, []>("op_453"), val = tensor<int32, []>(-2)];
|
296 |
+
tensor<int32, []> var_455 = const()[name = tensor<string, []>("op_455"), val = tensor<int32, []>(-1)];
|
297 |
+
tensor<int32, []> var_462 = const()[name = tensor<string, []>("op_462"), val = tensor<int32, []>(1)];
|
298 |
+
tensor<bool, []> var_463 = const()[name = tensor<string, []>("op_463"), val = tensor<bool, []>(true)];
|
299 |
+
tensor<fp16, [1, 4096, 1, 64]> var_470_cast_fp16 = mul(x = x_29_cast_fp16, y = x_29_cast_fp16)[name = tensor<string, []>("op_470_cast_fp16")];
|
300 |
+
tensor<int32, [1]> var_471 = const()[name = tensor<string, []>("op_471"), val = tensor<int32, [1]>([1])];
|
301 |
+
tensor<fp16, [1, 1, 1, 64]> norm_x_9_cast_fp16 = reduce_mean(axes = var_471, keep_dims = var_463, x = var_470_cast_fp16)[name = tensor<string, []>("norm_x_9_cast_fp16")];
|
302 |
+
tensor<fp16, []> var_473_to_fp16 = const()[name = tensor<string, []>("op_473_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
|
303 |
+
tensor<fp16, [1, 1, 1, 64]> var_474_cast_fp16 = add(x = norm_x_9_cast_fp16, y = var_473_to_fp16)[name = tensor<string, []>("op_474_cast_fp16")];
|
304 |
+
tensor<fp16, []> var_475_epsilon_0_to_fp16 = const()[name = tensor<string, []>("op_475_epsilon_0_to_fp16"), val = tensor<fp16, []>(0x1p-24)];
|
305 |
+
tensor<fp16, [1, 1, 1, 64]> var_475_cast_fp16 = rsqrt(epsilon = var_475_epsilon_0_to_fp16, x = var_474_cast_fp16)[name = tensor<string, []>("op_475_cast_fp16")];
|
306 |
+
tensor<fp16, [1, 4096, 1, 64]> x_normed_17_cast_fp16 = mul(x = x_29_cast_fp16, y = var_475_cast_fp16)[name = tensor<string, []>("x_normed_17_cast_fp16")];
|
307 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_2_norm_1_weight_to_fp16 = const()[name = tensor<string, []>("blocks_2_norm_1_weight_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303770752)))];
|
308 |
+
tensor<fp16, [1, 4096, 1, 64]> x_33_cast_fp16 = mul(x = x_normed_17_cast_fp16, y = blocks_2_norm_1_weight_to_fp16)[name = tensor<string, []>("x_33_cast_fp16")];
|
309 |
+
tensor<int32, [2]> var_490 = const()[name = tensor<string, []>("op_490"), val = tensor<int32, [2]>([1, 1])];
|
310 |
+
tensor<int32, [2]> var_492 = const()[name = tensor<string, []>("op_492"), val = tensor<int32, [2]>([1, 1])];
|
311 |
+
tensor<string, []> var_494_pad_type_0 = const()[name = tensor<string, []>("op_494_pad_type_0"), val = tensor<string, []>("custom")];
|
312 |
+
tensor<int32, [4]> var_494_pad_0 = const()[name = tensor<string, []>("op_494_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
313 |
+
tensor<fp16, [1, 4096, 1, 64]> var_494_cast_fp16 = conv(dilations = var_492, groups = var_462, pad = var_494_pad_0, pad_type = var_494_pad_type_0, strides = var_490, weight = blocks_2_attn_q_proj_weight_palettized_cast_fp16, x = x_33_cast_fp16)[name = tensor<string, []>("op_494_cast_fp16")];
|
314 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_2_attn_q_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_2_attn_q_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303779008)))];
|
315 |
+
tensor<fp16, [1, 4096, 1, 64]> q_13_cast_fp16 = mul(x = var_494_cast_fp16, y = blocks_2_attn_q_proj_output_scales_to_fp16)[name = tensor<string, []>("q_13_cast_fp16")];
|
316 |
+
tensor<int32, [2]> var_498 = const()[name = tensor<string, []>("op_498"), val = tensor<int32, [2]>([1, 1])];
|
317 |
+
tensor<int32, [2]> var_500 = const()[name = tensor<string, []>("op_500"), val = tensor<int32, [2]>([1, 1])];
|
318 |
+
tensor<string, []> var_502_pad_type_0 = const()[name = tensor<string, []>("op_502_pad_type_0"), val = tensor<string, []>("custom")];
|
319 |
+
tensor<int32, [4]> var_502_pad_0 = const()[name = tensor<string, []>("op_502_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
320 |
+
tensor<fp16, [1, 4096, 1, 64]> var_502_cast_fp16 = conv(dilations = var_500, groups = var_462, pad = var_502_pad_0, pad_type = var_502_pad_type_0, strides = var_498, weight = blocks_2_attn_k_proj_weight_palettized_cast_fp16, x = x_33_cast_fp16)[name = tensor<string, []>("op_502_cast_fp16")];
|
321 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_2_attn_k_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_2_attn_k_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303787264)))];
|
322 |
+
tensor<fp16, [1, 4096, 1, 64]> k_17_cast_fp16 = mul(x = var_502_cast_fp16, y = blocks_2_attn_k_proj_output_scales_to_fp16)[name = tensor<string, []>("k_17_cast_fp16")];
|
323 |
+
tensor<int32, [2]> var_506 = const()[name = tensor<string, []>("op_506"), val = tensor<int32, [2]>([1, 1])];
|
324 |
+
tensor<int32, [2]> var_508 = const()[name = tensor<string, []>("op_508"), val = tensor<int32, [2]>([1, 1])];
|
325 |
+
tensor<string, []> var_510_pad_type_0 = const()[name = tensor<string, []>("op_510_pad_type_0"), val = tensor<string, []>("custom")];
|
326 |
+
tensor<int32, [4]> var_510_pad_0 = const()[name = tensor<string, []>("op_510_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
327 |
+
tensor<fp16, [1, 4096, 1, 64]> var_510_cast_fp16 = conv(dilations = var_508, groups = var_462, pad = var_510_pad_0, pad_type = var_510_pad_type_0, strides = var_506, weight = blocks_2_attn_v_proj_weight_palettized_cast_fp16, x = x_33_cast_fp16)[name = tensor<string, []>("op_510_cast_fp16")];
|
328 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_2_attn_v_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_2_attn_v_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303795520)))];
|
329 |
+
tensor<fp16, [1, 4096, 1, 64]> v_13_cast_fp16 = mul(x = var_510_cast_fp16, y = blocks_2_attn_v_proj_output_scales_to_fp16)[name = tensor<string, []>("v_13_cast_fp16")];
|
330 |
+
tensor<int32, [4]> var_512 = const()[name = tensor<string, []>("op_512"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
331 |
+
tensor<fp16, [1, 32, 128, 64]> q_15_cast_fp16 = reshape(shape = var_512, x = q_13_cast_fp16)[name = tensor<string, []>("q_15_cast_fp16")];
|
332 |
+
tensor<int32, [4]> var_514 = const()[name = tensor<string, []>("op_514"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
333 |
+
tensor<fp16, [1, 32, 128, 64]> k_19_cast_fp16 = reshape(shape = var_514, x = k_17_cast_fp16)[name = tensor<string, []>("k_19_cast_fp16")];
|
334 |
+
tensor<int32, [4]> var_516 = const()[name = tensor<string, []>("op_516"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
335 |
+
tensor<fp16, [1, 32, 128, 64]> new_v_cache_2 = reshape(shape = var_516, x = v_13_cast_fp16)[name = tensor<string, []>("v_15_cast_fp16")];
|
336 |
+
tensor<int32, [4]> var_528_begin_0 = const()[name = tensor<string, []>("op_528_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
337 |
+
tensor<int32, [4]> var_528_end_0 = const()[name = tensor<string, []>("op_528_end_0"), val = tensor<int32, [4]>([1, 32, 64, 64])];
|
338 |
+
tensor<bool, [4]> var_528_end_mask_0 = const()[name = tensor<string, []>("op_528_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
|
339 |
+
tensor<fp16, [1, 32, 64, 64]> var_528_cast_fp16 = slice_by_index(begin = var_528_begin_0, end = var_528_end_0, end_mask = var_528_end_mask_0, x = q_15_cast_fp16)[name = tensor<string, []>("op_528_cast_fp16")];
|
340 |
+
tensor<int32, [4]> var_534_begin_0 = const()[name = tensor<string, []>("op_534_begin_0"), val = tensor<int32, [4]>([0, 0, 64, 0])];
|
341 |
+
tensor<int32, [4]> var_534_end_0 = const()[name = tensor<string, []>("op_534_end_0"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
342 |
+
tensor<bool, [4]> var_534_end_mask_0 = const()[name = tensor<string, []>("op_534_end_mask_0"), val = tensor<bool, [4]>([true, true, true, true])];
|
343 |
+
tensor<fp16, [1, 32, 64, 64]> var_534_cast_fp16 = slice_by_index(begin = var_534_begin_0, end = var_534_end_0, end_mask = var_534_end_mask_0, x = q_15_cast_fp16)[name = tensor<string, []>("op_534_cast_fp16")];
|
344 |
+
tensor<fp16, []> const_17_promoted_to_fp16 = const()[name = tensor<string, []>("const_17_promoted_to_fp16"), val = tensor<fp16, []>(-0x1p+0)];
|
345 |
+
tensor<fp16, [1, 32, 64, 64]> var_536_cast_fp16 = mul(x = var_534_cast_fp16, y = const_17_promoted_to_fp16)[name = tensor<string, []>("op_536_cast_fp16")];
|
346 |
+
tensor<bool, []> rotated_9_interleave_0 = const()[name = tensor<string, []>("rotated_9_interleave_0"), val = tensor<bool, []>(false)];
|
347 |
+
tensor<fp16, [1, 32, 128, 64]> rotated_9_cast_fp16 = concat(axis = var_453, interleave = rotated_9_interleave_0, values = (var_536_cast_fp16, var_528_cast_fp16))[name = tensor<string, []>("rotated_9_cast_fp16")];
|
348 |
+
tensor<fp16, [1, 32, 128, 64]> var_539_cast_fp16 = mul(x = q_15_cast_fp16, y = cos)[name = tensor<string, []>("op_539_cast_fp16")];
|
349 |
+
tensor<fp16, [1, 32, 128, 64]> var_540_cast_fp16 = mul(x = rotated_9_cast_fp16, y = sin)[name = tensor<string, []>("op_540_cast_fp16")];
|
350 |
+
tensor<fp16, [1, 32, 128, 64]> roped_9_cast_fp16 = add(x = var_539_cast_fp16, y = var_540_cast_fp16)[name = tensor<string, []>("roped_9_cast_fp16")];
|
351 |
+
tensor<int32, [4]> var_553_begin_0 = const()[name = tensor<string, []>("op_553_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
352 |
+
tensor<int32, [4]> var_553_end_0 = const()[name = tensor<string, []>("op_553_end_0"), val = tensor<int32, [4]>([1, 32, 64, 64])];
|
353 |
+
tensor<bool, [4]> var_553_end_mask_0 = const()[name = tensor<string, []>("op_553_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
|
354 |
+
tensor<fp16, [1, 32, 64, 64]> var_553_cast_fp16 = slice_by_index(begin = var_553_begin_0, end = var_553_end_0, end_mask = var_553_end_mask_0, x = k_19_cast_fp16)[name = tensor<string, []>("op_553_cast_fp16")];
|
355 |
+
tensor<int32, [4]> var_559_begin_0 = const()[name = tensor<string, []>("op_559_begin_0"), val = tensor<int32, [4]>([0, 0, 64, 0])];
|
356 |
+
tensor<int32, [4]> var_559_end_0 = const()[name = tensor<string, []>("op_559_end_0"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
357 |
+
tensor<bool, [4]> var_559_end_mask_0 = const()[name = tensor<string, []>("op_559_end_mask_0"), val = tensor<bool, [4]>([true, true, true, true])];
|
358 |
+
tensor<fp16, [1, 32, 64, 64]> var_559_cast_fp16 = slice_by_index(begin = var_559_begin_0, end = var_559_end_0, end_mask = var_559_end_mask_0, x = k_19_cast_fp16)[name = tensor<string, []>("op_559_cast_fp16")];
|
359 |
+
tensor<fp16, []> const_19_promoted_to_fp16 = const()[name = tensor<string, []>("const_19_promoted_to_fp16"), val = tensor<fp16, []>(-0x1p+0)];
|
360 |
+
tensor<fp16, [1, 32, 64, 64]> var_561_cast_fp16 = mul(x = var_559_cast_fp16, y = const_19_promoted_to_fp16)[name = tensor<string, []>("op_561_cast_fp16")];
|
361 |
+
tensor<bool, []> rotated_interleave_0 = const()[name = tensor<string, []>("rotated_interleave_0"), val = tensor<bool, []>(false)];
|
362 |
+
tensor<fp16, [1, 32, 128, 64]> rotated_cast_fp16 = concat(axis = var_453, interleave = rotated_interleave_0, values = (var_561_cast_fp16, var_553_cast_fp16))[name = tensor<string, []>("rotated_cast_fp16")];
|
363 |
+
tensor<fp16, [1, 32, 128, 64]> var_564_cast_fp16 = mul(x = k_19_cast_fp16, y = cos)[name = tensor<string, []>("op_564_cast_fp16")];
|
364 |
+
tensor<fp16, [1, 32, 128, 64]> var_565_cast_fp16 = mul(x = rotated_cast_fp16, y = sin)[name = tensor<string, []>("op_565_cast_fp16")];
|
365 |
+
tensor<fp16, [1, 32, 128, 64]> roped_cast_fp16 = add(x = var_564_cast_fp16, y = var_565_cast_fp16)[name = tensor<string, []>("roped_cast_fp16")];
|
366 |
+
tensor<bool, []> q_interleave_0 = const()[name = tensor<string, []>("q_interleave_0"), val = tensor<bool, []>(false)];
|
367 |
+
tensor<fp16, [1, 32, 128, 64]> q_cast_fp16 = concat(axis = var_453, interleave = q_interleave_0, values = roped_9_cast_fp16)[name = tensor<string, []>("q_cast_fp16")];
|
368 |
+
tensor<bool, []> k_21_interleave_0 = const()[name = tensor<string, []>("k_21_interleave_0"), val = tensor<bool, []>(false)];
|
369 |
+
tensor<fp16, [1, 32, 128, 64]> new_k_cache_2 = concat(axis = var_453, interleave = k_21_interleave_0, values = roped_cast_fp16)[name = tensor<string, []>("k_21_cast_fp16")];
|
370 |
+
tensor<bool, []> k_interleave_0 = const()[name = tensor<string, []>("k_interleave_0"), val = tensor<bool, []>(false)];
|
371 |
+
tensor<fp16, [1, 32, 128, 512]> k_cast_fp16 = concat(axis = var_455, interleave = k_interleave_0, values = (k_cache_2, new_k_cache_2))[name = tensor<string, []>("k_cast_fp16")];
|
372 |
+
tensor<bool, []> v_interleave_0 = const()[name = tensor<string, []>("v_interleave_0"), val = tensor<bool, []>(false)];
|
373 |
+
tensor<fp16, [1, 32, 128, 512]> v_cast_fp16 = concat(axis = var_455, interleave = v_interleave_0, values = (v_cache_2, new_v_cache_2))[name = tensor<string, []>("v_cast_fp16")];
|
374 |
+
tensor<fp16, []> var_587_to_fp16 = const()[name = tensor<string, []>("op_587_to_fp16"), val = tensor<fp16, []>(0x1.6ap-4)];
|
375 |
+
tensor<fp16, [1, 32, 128, 64]> var_588_cast_fp16 = mul(x = q_cast_fp16, y = var_587_to_fp16)[name = tensor<string, []>("op_588_cast_fp16")];
|
376 |
+
tensor<bool, []> attn_weights_9_transpose_x_0 = const()[name = tensor<string, []>("attn_weights_9_transpose_x_0"), val = tensor<bool, []>(true)];
|
377 |
+
tensor<bool, []> attn_weights_9_transpose_y_0 = const()[name = tensor<string, []>("attn_weights_9_transpose_y_0"), val = tensor<bool, []>(false)];
|
378 |
+
tensor<fp16, [1, 32, 64, 512]> attn_weights_9_cast_fp16 = matmul(transpose_x = attn_weights_9_transpose_x_0, transpose_y = attn_weights_9_transpose_y_0, x = var_588_cast_fp16, y = k_cast_fp16)[name = tensor<string, []>("attn_weights_9_cast_fp16")];
|
379 |
+
tensor<fp16, [1, 32, 64, 512]> attn_weights_cast_fp16 = add(x = attn_weights_9_cast_fp16, y = mask)[name = tensor<string, []>("attn_weights_cast_fp16")];
|
380 |
+
tensor<fp16, [1, 32, 64, 512]> var_596_cast_fp16 = softmax(axis = var_448, x = attn_weights_cast_fp16)[name = tensor<string, []>("op_596_cast_fp16")];
|
381 |
+
tensor<bool, []> attn_5_transpose_x_0 = const()[name = tensor<string, []>("attn_5_transpose_x_0"), val = tensor<bool, []>(false)];
|
382 |
+
tensor<bool, []> attn_5_transpose_y_0 = const()[name = tensor<string, []>("attn_5_transpose_y_0"), val = tensor<bool, []>(true)];
|
383 |
+
tensor<fp16, [1, 32, 128, 64]> attn_5_cast_fp16 = matmul(transpose_x = attn_5_transpose_x_0, transpose_y = attn_5_transpose_y_0, x = v_cast_fp16, y = var_596_cast_fp16)[name = tensor<string, []>("attn_5_cast_fp16")];
|
384 |
+
tensor<int32, [4]> var_600 = const()[name = tensor<string, []>("op_600"), val = tensor<int32, [4]>([1, 4096, 1, -1])];
|
385 |
+
tensor<fp16, [1, 4096, 1, 64]> input_17_cast_fp16 = reshape(shape = var_600, x = attn_5_cast_fp16)[name = tensor<string, []>("input_17_cast_fp16")];
|
386 |
+
tensor<int32, [2]> var_604 = const()[name = tensor<string, []>("op_604"), val = tensor<int32, [2]>([1, 1])];
|
387 |
+
tensor<int32, [2]> var_606 = const()[name = tensor<string, []>("op_606"), val = tensor<int32, [2]>([1, 1])];
|
388 |
+
tensor<string, []> var_608_pad_type_0 = const()[name = tensor<string, []>("op_608_pad_type_0"), val = tensor<string, []>("custom")];
|
389 |
+
tensor<int32, [4]> var_608_pad_0 = const()[name = tensor<string, []>("op_608_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
390 |
+
tensor<fp16, [1, 4096, 1, 64]> var_608_cast_fp16 = conv(dilations = var_606, groups = var_462, pad = var_608_pad_0, pad_type = var_608_pad_type_0, strides = var_604, weight = blocks_2_attn_proj_weight_palettized_cast_fp16, x = input_17_cast_fp16)[name = tensor<string, []>("op_608_cast_fp16")];
|
391 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_2_attn_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_2_attn_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303803776)))];
|
392 |
+
tensor<fp16, [1, 4096, 1, 64]> attention_output_cast_fp16 = mul(x = var_608_cast_fp16, y = blocks_2_attn_proj_output_scales_to_fp16)[name = tensor<string, []>("attention_output_cast_fp16")];
|
393 |
+
tensor<fp16, [1, 4096, 1, 64]> x_39_cast_fp16 = add(x = attention_output_cast_fp16, y = x_29_cast_fp16)[name = tensor<string, []>("x_39_cast_fp16")];
|
394 |
+
tensor<fp16, [1, 4096, 1, 64]> var_617_cast_fp16 = mul(x = x_39_cast_fp16, y = x_39_cast_fp16)[name = tensor<string, []>("op_617_cast_fp16")];
|
395 |
+
tensor<int32, [1]> var_618 = const()[name = tensor<string, []>("op_618"), val = tensor<int32, [1]>([1])];
|
396 |
+
tensor<fp16, [1, 1, 1, 64]> norm_x_cast_fp16 = reduce_mean(axes = var_618, keep_dims = var_463, x = var_617_cast_fp16)[name = tensor<string, []>("norm_x_cast_fp16")];
|
397 |
+
tensor<fp16, []> var_620_to_fp16 = const()[name = tensor<string, []>("op_620_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
|
398 |
+
tensor<fp16, [1, 1, 1, 64]> var_621_cast_fp16 = add(x = norm_x_cast_fp16, y = var_620_to_fp16)[name = tensor<string, []>("op_621_cast_fp16")];
|
399 |
+
tensor<fp16, []> var_622_epsilon_0_to_fp16 = const()[name = tensor<string, []>("op_622_epsilon_0_to_fp16"), val = tensor<fp16, []>(0x1p-24)];
|
400 |
+
tensor<fp16, [1, 1, 1, 64]> var_622_cast_fp16 = rsqrt(epsilon = var_622_epsilon_0_to_fp16, x = var_621_cast_fp16)[name = tensor<string, []>("op_622_cast_fp16")];
|
401 |
+
tensor<fp16, [1, 4096, 1, 64]> x_normed_21_cast_fp16 = mul(x = x_39_cast_fp16, y = var_622_cast_fp16)[name = tensor<string, []>("x_normed_21_cast_fp16")];
|
402 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_2_norm_2_weight_to_fp16 = const()[name = tensor<string, []>("blocks_2_norm_2_weight_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303812032)))];
|
403 |
+
tensor<fp16, [1, 4096, 1, 64]> input_19_cast_fp16 = mul(x = x_normed_21_cast_fp16, y = blocks_2_norm_2_weight_to_fp16)[name = tensor<string, []>("input_19_cast_fp16")];
|
404 |
+
tensor<int32, [2]> var_634 = const()[name = tensor<string, []>("op_634"), val = tensor<int32, [2]>([1, 1])];
|
405 |
+
tensor<int32, [2]> var_636 = const()[name = tensor<string, []>("op_636"), val = tensor<int32, [2]>([1, 1])];
|
406 |
+
tensor<string, []> var_638_pad_type_0 = const()[name = tensor<string, []>("op_638_pad_type_0"), val = tensor<string, []>("custom")];
|
407 |
+
tensor<int32, [4]> var_638_pad_0 = const()[name = tensor<string, []>("op_638_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
408 |
+
tensor<fp16, [1, 11008, 1, 64]> var_638_cast_fp16 = conv(dilations = var_636, groups = var_462, pad = var_638_pad_0, pad_type = var_638_pad_type_0, strides = var_634, weight = blocks_2_mlp_fc_1_weight_palettized_cast_fp16, x = input_19_cast_fp16)[name = tensor<string, []>("op_638_cast_fp16")];
|
409 |
+
tensor<fp16, [1, 11008, 1, 1]> blocks_2_mlp_fc_1_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_2_mlp_fc_1_output_scales_to_fp16"), val = tensor<fp16, [1, 11008, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303820288)))];
|
410 |
+
tensor<fp16, [1, 11008, 1, 64]> input_21_cast_fp16 = mul(x = var_638_cast_fp16, y = blocks_2_mlp_fc_1_output_scales_to_fp16)[name = tensor<string, []>("input_21_cast_fp16")];
|
411 |
+
tensor<int32, [2]> var_642 = const()[name = tensor<string, []>("op_642"), val = tensor<int32, [2]>([1, 1])];
|
412 |
+
tensor<int32, [2]> var_644 = const()[name = tensor<string, []>("op_644"), val = tensor<int32, [2]>([1, 1])];
|
413 |
+
tensor<string, []> var_646_pad_type_0 = const()[name = tensor<string, []>("op_646_pad_type_0"), val = tensor<string, []>("custom")];
|
414 |
+
tensor<int32, [4]> var_646_pad_0 = const()[name = tensor<string, []>("op_646_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
415 |
+
tensor<fp16, [1, 11008, 1, 64]> var_646_cast_fp16 = conv(dilations = var_644, groups = var_462, pad = var_646_pad_0, pad_type = var_646_pad_type_0, strides = var_642, weight = blocks_2_mlp_fc_2_weight_palettized_cast_fp16, x = input_19_cast_fp16)[name = tensor<string, []>("op_646_cast_fp16")];
|
416 |
+
tensor<fp16, [1, 11008, 1, 1]> blocks_2_mlp_fc_2_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_2_mlp_fc_2_output_scales_to_fp16"), val = tensor<fp16, [1, 11008, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303842368)))];
|
417 |
+
tensor<fp16, [1, 11008, 1, 64]> x_fc_2_cast_fp16 = mul(x = var_646_cast_fp16, y = blocks_2_mlp_fc_2_output_scales_to_fp16)[name = tensor<string, []>("x_fc_2_cast_fp16")];
|
418 |
+
tensor<fp16, [1, 11008, 1, 64]> var_648_cast_fp16 = silu(x = input_21_cast_fp16)[name = tensor<string, []>("op_648_cast_fp16")];
|
419 |
+
tensor<fp16, [1, 11008, 1, 64]> input_cast_fp16 = mul(x = var_648_cast_fp16, y = x_fc_2_cast_fp16)[name = tensor<string, []>("input_cast_fp16")];
|
420 |
+
tensor<int32, [2]> var_652 = const()[name = tensor<string, []>("op_652"), val = tensor<int32, [2]>([1, 1])];
|
421 |
+
tensor<int32, [2]> var_654 = const()[name = tensor<string, []>("op_654"), val = tensor<int32, [2]>([1, 1])];
|
422 |
+
tensor<string, []> var_656_pad_type_0 = const()[name = tensor<string, []>("op_656_pad_type_0"), val = tensor<string, []>("custom")];
|
423 |
+
tensor<int32, [4]> var_656_pad_0 = const()[name = tensor<string, []>("op_656_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
424 |
+
tensor<fp16, [1, 4096, 1, 64]> var_656_cast_fp16 = conv(dilations = var_654, groups = var_462, pad = var_656_pad_0, pad_type = var_656_pad_type_0, strides = var_652, weight = blocks_2_mlp_proj_weight_palettized_cast_fp16, x = input_cast_fp16)[name = tensor<string, []>("op_656_cast_fp16")];
|
425 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_2_mlp_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_2_mlp_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303864448)))];
|
426 |
+
tensor<fp16, [1, 4096, 1, 64]> var_657_cast_fp16 = mul(x = var_656_cast_fp16, y = blocks_2_mlp_proj_output_scales_to_fp16)[name = tensor<string, []>("op_657_cast_fp16")];
|
427 |
+
tensor<fp16, [1, 4096, 1, 64]> new_x = add(x = var_657_cast_fp16, y = x_39_cast_fp16)[name = tensor<string, []>("op_658_cast_fp16")];
|
428 |
+
} -> (new_x, new_k_cache_0, new_k_cache_1, new_k_cache_2, new_v_cache_0, new_v_cache_1, new_v_cache_2);
|
429 |
+
}
|
Llama-2-7b-hf_chunk4.mlmodelc/weights/weight.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a2b1969a0b2372ca72340108bf7967f643d02a423cac947a5bd3608fdde48b86
|
3 |
+
size 303872704
|
Llama-2-7b-hf_chunk5.mlmodelc/analytics/coremldata.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3412284b024b899a736cd77112d4b1a4a5faa19d954259e925ef429f58bd886b
|
3 |
+
size 243
|
Llama-2-7b-hf_chunk5.mlmodelc/coremldata.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:589729b2995d8ca8246bbb5d92b910207bab816ad67282b0a285bcd2de77f80e
|
3 |
+
size 791
|
Llama-2-7b-hf_chunk5.mlmodelc/metadata.json
ADDED
@@ -0,0 +1,218 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[
|
2 |
+
{
|
3 |
+
"metadataOutputVersion" : "3.0",
|
4 |
+
"storagePrecision" : "Mixed (Float16, Palettized (4 bits))",
|
5 |
+
"outputSchema" : [
|
6 |
+
{
|
7 |
+
"hasShapeFlexibility" : "0",
|
8 |
+
"isOptional" : "0",
|
9 |
+
"dataType" : "Float16",
|
10 |
+
"formattedType" : "MultiArray (Float16 1 × 4096 × 1 × 64)",
|
11 |
+
"shortDescription" : "",
|
12 |
+
"shape" : "[1, 4096, 1, 64]",
|
13 |
+
"name" : "new_x",
|
14 |
+
"type" : "MultiArray"
|
15 |
+
},
|
16 |
+
{
|
17 |
+
"hasShapeFlexibility" : "0",
|
18 |
+
"isOptional" : "0",
|
19 |
+
"dataType" : "Float16",
|
20 |
+
"formattedType" : "MultiArray (Float16 1 × 32 × 128 × 64)",
|
21 |
+
"shortDescription" : "",
|
22 |
+
"shape" : "[1, 32, 128, 64]",
|
23 |
+
"name" : "new_k_cache_0",
|
24 |
+
"type" : "MultiArray"
|
25 |
+
},
|
26 |
+
{
|
27 |
+
"hasShapeFlexibility" : "0",
|
28 |
+
"isOptional" : "0",
|
29 |
+
"dataType" : "Float16",
|
30 |
+
"formattedType" : "MultiArray (Float16 1 × 32 × 128 × 64)",
|
31 |
+
"shortDescription" : "",
|
32 |
+
"shape" : "[1, 32, 128, 64]",
|
33 |
+
"name" : "new_k_cache_1",
|
34 |
+
"type" : "MultiArray"
|
35 |
+
},
|
36 |
+
{
|
37 |
+
"hasShapeFlexibility" : "0",
|
38 |
+
"isOptional" : "0",
|
39 |
+
"dataType" : "Float16",
|
40 |
+
"formattedType" : "MultiArray (Float16 1 × 32 × 128 × 64)",
|
41 |
+
"shortDescription" : "",
|
42 |
+
"shape" : "[1, 32, 128, 64]",
|
43 |
+
"name" : "new_k_cache_2",
|
44 |
+
"type" : "MultiArray"
|
45 |
+
},
|
46 |
+
{
|
47 |
+
"hasShapeFlexibility" : "0",
|
48 |
+
"isOptional" : "0",
|
49 |
+
"dataType" : "Float16",
|
50 |
+
"formattedType" : "MultiArray (Float16 1 × 32 × 128 × 64)",
|
51 |
+
"shortDescription" : "",
|
52 |
+
"shape" : "[1, 32, 128, 64]",
|
53 |
+
"name" : "new_v_cache_0",
|
54 |
+
"type" : "MultiArray"
|
55 |
+
},
|
56 |
+
{
|
57 |
+
"hasShapeFlexibility" : "0",
|
58 |
+
"isOptional" : "0",
|
59 |
+
"dataType" : "Float16",
|
60 |
+
"formattedType" : "MultiArray (Float16 1 × 32 × 128 × 64)",
|
61 |
+
"shortDescription" : "",
|
62 |
+
"shape" : "[1, 32, 128, 64]",
|
63 |
+
"name" : "new_v_cache_1",
|
64 |
+
"type" : "MultiArray"
|
65 |
+
},
|
66 |
+
{
|
67 |
+
"hasShapeFlexibility" : "0",
|
68 |
+
"isOptional" : "0",
|
69 |
+
"dataType" : "Float16",
|
70 |
+
"formattedType" : "MultiArray (Float16 1 × 32 × 128 × 64)",
|
71 |
+
"shortDescription" : "",
|
72 |
+
"shape" : "[1, 32, 128, 64]",
|
73 |
+
"name" : "new_v_cache_2",
|
74 |
+
"type" : "MultiArray"
|
75 |
+
}
|
76 |
+
],
|
77 |
+
"modelParameters" : [
|
78 |
+
|
79 |
+
],
|
80 |
+
"specificationVersion" : 7,
|
81 |
+
"mlProgramOperationTypeHistogram" : {
|
82 |
+
"Concat" : 18,
|
83 |
+
"Ios16.rsqrt" : 6,
|
84 |
+
"Ios16.mul" : 63,
|
85 |
+
"SliceByIndex" : 12,
|
86 |
+
"Ios16.constexprLutToDense" : 21,
|
87 |
+
"Ios16.conv" : 21,
|
88 |
+
"Ios16.add" : 21,
|
89 |
+
"Ios16.reduceMean" : 6,
|
90 |
+
"Ios16.matmul" : 6,
|
91 |
+
"Ios16.softmax" : 3,
|
92 |
+
"Ios16.reshape" : 12,
|
93 |
+
"Ios16.silu" : 3
|
94 |
+
},
|
95 |
+
"computePrecision" : "Mixed (Float16, Int32)",
|
96 |
+
"isUpdatable" : "0",
|
97 |
+
"availability" : {
|
98 |
+
"macOS" : "13.0",
|
99 |
+
"tvOS" : "16.0",
|
100 |
+
"visionOS" : "1.0",
|
101 |
+
"watchOS" : "9.0",
|
102 |
+
"iOS" : "16.0",
|
103 |
+
"macCatalyst" : "16.0"
|
104 |
+
},
|
105 |
+
"modelType" : {
|
106 |
+
"name" : "MLModelType_mlProgram"
|
107 |
+
},
|
108 |
+
"userDefinedMetadata" : {
|
109 |
+
"com.github.apple.coremltools.source_dialect" : "TorchScript",
|
110 |
+
"com.github.apple.coremltools.source" : "torch==2.1.0",
|
111 |
+
"com.github.apple.coremltools.version" : "7.2"
|
112 |
+
},
|
113 |
+
"inputSchema" : [
|
114 |
+
{
|
115 |
+
"hasShapeFlexibility" : "0",
|
116 |
+
"isOptional" : "0",
|
117 |
+
"dataType" : "Float16",
|
118 |
+
"formattedType" : "MultiArray (Float16 1 × 4096 × 1 × 64)",
|
119 |
+
"shortDescription" : "",
|
120 |
+
"shape" : "[1, 4096, 1, 64]",
|
121 |
+
"name" : "x",
|
122 |
+
"type" : "MultiArray"
|
123 |
+
},
|
124 |
+
{
|
125 |
+
"hasShapeFlexibility" : "0",
|
126 |
+
"isOptional" : "0",
|
127 |
+
"dataType" : "Float16",
|
128 |
+
"formattedType" : "MultiArray (Float16 128 × 64)",
|
129 |
+
"shortDescription" : "",
|
130 |
+
"shape" : "[128, 64]",
|
131 |
+
"name" : "cos",
|
132 |
+
"type" : "MultiArray"
|
133 |
+
},
|
134 |
+
{
|
135 |
+
"hasShapeFlexibility" : "0",
|
136 |
+
"isOptional" : "0",
|
137 |
+
"dataType" : "Float16",
|
138 |
+
"formattedType" : "MultiArray (Float16 128 × 64)",
|
139 |
+
"shortDescription" : "",
|
140 |
+
"shape" : "[128, 64]",
|
141 |
+
"name" : "sin",
|
142 |
+
"type" : "MultiArray"
|
143 |
+
},
|
144 |
+
{
|
145 |
+
"hasShapeFlexibility" : "0",
|
146 |
+
"isOptional" : "0",
|
147 |
+
"dataType" : "Float16",
|
148 |
+
"formattedType" : "MultiArray (Float16 1 × 1 × 64 × 512)",
|
149 |
+
"shortDescription" : "",
|
150 |
+
"shape" : "[1, 1, 64, 512]",
|
151 |
+
"name" : "mask",
|
152 |
+
"type" : "MultiArray"
|
153 |
+
},
|
154 |
+
{
|
155 |
+
"hasShapeFlexibility" : "0",
|
156 |
+
"isOptional" : "1",
|
157 |
+
"dataType" : "Float16",
|
158 |
+
"formattedType" : "MultiArray (Float16 1 × 32 × 128 × 448)?",
|
159 |
+
"shortDescription" : "",
|
160 |
+
"shape" : "[1, 32, 128, 448]",
|
161 |
+
"name" : "k_cache_0",
|
162 |
+
"type" : "MultiArray"
|
163 |
+
},
|
164 |
+
{
|
165 |
+
"hasShapeFlexibility" : "0",
|
166 |
+
"isOptional" : "1",
|
167 |
+
"dataType" : "Float16",
|
168 |
+
"formattedType" : "MultiArray (Float16 1 × 32 × 128 × 448)?",
|
169 |
+
"shortDescription" : "",
|
170 |
+
"shape" : "[1, 32, 128, 448]",
|
171 |
+
"name" : "v_cache_0",
|
172 |
+
"type" : "MultiArray"
|
173 |
+
},
|
174 |
+
{
|
175 |
+
"hasShapeFlexibility" : "0",
|
176 |
+
"isOptional" : "1",
|
177 |
+
"dataType" : "Float16",
|
178 |
+
"formattedType" : "MultiArray (Float16 1 × 32 × 128 × 448)?",
|
179 |
+
"shortDescription" : "",
|
180 |
+
"shape" : "[1, 32, 128, 448]",
|
181 |
+
"name" : "k_cache_1",
|
182 |
+
"type" : "MultiArray"
|
183 |
+
},
|
184 |
+
{
|
185 |
+
"hasShapeFlexibility" : "0",
|
186 |
+
"isOptional" : "1",
|
187 |
+
"dataType" : "Float16",
|
188 |
+
"formattedType" : "MultiArray (Float16 1 × 32 × 128 × 448)?",
|
189 |
+
"shortDescription" : "",
|
190 |
+
"shape" : "[1, 32, 128, 448]",
|
191 |
+
"name" : "v_cache_1",
|
192 |
+
"type" : "MultiArray"
|
193 |
+
},
|
194 |
+
{
|
195 |
+
"hasShapeFlexibility" : "0",
|
196 |
+
"isOptional" : "1",
|
197 |
+
"dataType" : "Float16",
|
198 |
+
"formattedType" : "MultiArray (Float16 1 × 32 × 128 × 448)?",
|
199 |
+
"shortDescription" : "",
|
200 |
+
"shape" : "[1, 32, 128, 448]",
|
201 |
+
"name" : "k_cache_2",
|
202 |
+
"type" : "MultiArray"
|
203 |
+
},
|
204 |
+
{
|
205 |
+
"hasShapeFlexibility" : "0",
|
206 |
+
"isOptional" : "1",
|
207 |
+
"dataType" : "Float16",
|
208 |
+
"formattedType" : "MultiArray (Float16 1 × 32 × 128 × 448)?",
|
209 |
+
"shortDescription" : "",
|
210 |
+
"shape" : "[1, 32, 128, 448]",
|
211 |
+
"name" : "v_cache_2",
|
212 |
+
"type" : "MultiArray"
|
213 |
+
}
|
214 |
+
],
|
215 |
+
"generatedClassName" : "Llama_2_7b_hf_2024_05_25_14_03_55_chunk5",
|
216 |
+
"method" : "predict"
|
217 |
+
}
|
218 |
+
]
|
Llama-2-7b-hf_chunk5.mlmodelc/model.mil
ADDED
@@ -0,0 +1,429 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
program(1.0)
|
2 |
+
[buildInfo = dict<tensor<string, []>, tensor<string, []>>({{"coremlc-component-MIL", "5.33.5"}, {"coremlc-version", "1877.40.3"}, {"coremltools-component-torch", "2.1.0"}, {"coremltools-source-dialect", "TorchScript"}, {"coremltools-version", "7.2"}})]
|
3 |
+
{
|
4 |
+
func main<ios16>(tensor<fp16, [128, 64]> cos, tensor<fp16, [1, 32, 128, 448]> k_cache_0, tensor<fp16, [1, 32, 128, 448]> k_cache_1, tensor<fp16, [1, 32, 128, 448]> k_cache_2, tensor<fp16, [1, 1, 64, 512]> mask, tensor<fp16, [128, 64]> sin, tensor<fp16, [1, 32, 128, 448]> v_cache_0, tensor<fp16, [1, 32, 128, 448]> v_cache_1, tensor<fp16, [1, 32, 128, 448]> v_cache_2, tensor<fp16, [1, 4096, 1, 64]> x) [CoreML_InputDefaultValues = dict<tensor<string, []>, tensor<fp32, []>>({{"k_cache_0", 0}, {"k_cache_1", 0}, {"k_cache_2", 0}, {"v_cache_0", 0}, {"v_cache_1", 0}, {"v_cache_2", 0}})] {
|
5 |
+
tensor<fp16, [4096, 4096, 1, 1]> blocks_0_attn_q_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [8388608]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(64))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(8388736))), name = tensor<string, []>("blocks_0_attn_q_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 4096, 1, 1])];
|
6 |
+
tensor<fp16, [4096, 4096, 1, 1]> blocks_0_attn_k_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [8388608]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(8388864))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(16777536))), name = tensor<string, []>("blocks_0_attn_k_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 4096, 1, 1])];
|
7 |
+
tensor<fp16, [4096, 4096, 1, 1]> blocks_0_attn_v_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [8388608]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(16777664))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(25166336))), name = tensor<string, []>("blocks_0_attn_v_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 4096, 1, 1])];
|
8 |
+
tensor<fp16, [4096, 4096, 1, 1]> blocks_0_attn_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [8388608]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(25166464))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(33555136))), name = tensor<string, []>("blocks_0_attn_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 4096, 1, 1])];
|
9 |
+
tensor<fp16, [11008, 4096, 1, 1]> blocks_0_mlp_fc_1_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [22544384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(33555264))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(56099712))), name = tensor<string, []>("blocks_0_mlp_fc_1_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([11008, 4096, 1, 1])];
|
10 |
+
tensor<fp16, [11008, 4096, 1, 1]> blocks_0_mlp_fc_2_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [22544384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(56099840))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(78644288))), name = tensor<string, []>("blocks_0_mlp_fc_2_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([11008, 4096, 1, 1])];
|
11 |
+
tensor<fp16, [4096, 11008, 1, 1]> blocks_0_mlp_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [22544384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(78644416))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(101188864))), name = tensor<string, []>("blocks_0_mlp_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 11008, 1, 1])];
|
12 |
+
tensor<fp16, [4096, 4096, 1, 1]> blocks_1_attn_q_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [8388608]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(101188992))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(109577664))), name = tensor<string, []>("blocks_1_attn_q_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 4096, 1, 1])];
|
13 |
+
tensor<fp16, [4096, 4096, 1, 1]> blocks_1_attn_k_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [8388608]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(109577792))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(117966464))), name = tensor<string, []>("blocks_1_attn_k_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 4096, 1, 1])];
|
14 |
+
tensor<fp16, [4096, 4096, 1, 1]> blocks_1_attn_v_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [8388608]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(117966592))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(126355264))), name = tensor<string, []>("blocks_1_attn_v_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 4096, 1, 1])];
|
15 |
+
tensor<fp16, [4096, 4096, 1, 1]> blocks_1_attn_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [8388608]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(126355392))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(134744064))), name = tensor<string, []>("blocks_1_attn_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 4096, 1, 1])];
|
16 |
+
tensor<fp16, [11008, 4096, 1, 1]> blocks_1_mlp_fc_1_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [22544384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(134744192))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(157288640))), name = tensor<string, []>("blocks_1_mlp_fc_1_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([11008, 4096, 1, 1])];
|
17 |
+
tensor<fp16, [11008, 4096, 1, 1]> blocks_1_mlp_fc_2_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [22544384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(157288768))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(179833216))), name = tensor<string, []>("blocks_1_mlp_fc_2_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([11008, 4096, 1, 1])];
|
18 |
+
tensor<fp16, [4096, 11008, 1, 1]> blocks_1_mlp_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [22544384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(179833344))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(202377792))), name = tensor<string, []>("blocks_1_mlp_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 11008, 1, 1])];
|
19 |
+
tensor<fp16, [4096, 4096, 1, 1]> blocks_2_attn_q_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [8388608]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(202377920))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(210766592))), name = tensor<string, []>("blocks_2_attn_q_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 4096, 1, 1])];
|
20 |
+
tensor<fp16, [4096, 4096, 1, 1]> blocks_2_attn_k_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [8388608]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(210766720))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(219155392))), name = tensor<string, []>("blocks_2_attn_k_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 4096, 1, 1])];
|
21 |
+
tensor<fp16, [4096, 4096, 1, 1]> blocks_2_attn_v_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [8388608]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(219155520))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(227544192))), name = tensor<string, []>("blocks_2_attn_v_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 4096, 1, 1])];
|
22 |
+
tensor<fp16, [4096, 4096, 1, 1]> blocks_2_attn_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [8388608]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(227544320))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(235932992))), name = tensor<string, []>("blocks_2_attn_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 4096, 1, 1])];
|
23 |
+
tensor<fp16, [11008, 4096, 1, 1]> blocks_2_mlp_fc_1_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [22544384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(235933120))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(258477568))), name = tensor<string, []>("blocks_2_mlp_fc_1_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([11008, 4096, 1, 1])];
|
24 |
+
tensor<fp16, [11008, 4096, 1, 1]> blocks_2_mlp_fc_2_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [22544384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(258477696))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(281022144))), name = tensor<string, []>("blocks_2_mlp_fc_2_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([11008, 4096, 1, 1])];
|
25 |
+
tensor<fp16, [4096, 11008, 1, 1]> blocks_2_mlp_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [22544384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(281022272))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303566720))), name = tensor<string, []>("blocks_2_mlp_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 11008, 1, 1])];
|
26 |
+
tensor<int32, []> var_18 = const()[name = tensor<string, []>("op_18"), val = tensor<int32, []>(3)];
|
27 |
+
tensor<int32, []> var_23 = const()[name = tensor<string, []>("op_23"), val = tensor<int32, []>(-2)];
|
28 |
+
tensor<int32, []> var_25 = const()[name = tensor<string, []>("op_25"), val = tensor<int32, []>(-1)];
|
29 |
+
tensor<int32, []> var_32 = const()[name = tensor<string, []>("op_32"), val = tensor<int32, []>(1)];
|
30 |
+
tensor<bool, []> var_33 = const()[name = tensor<string, []>("op_33"), val = tensor<bool, []>(true)];
|
31 |
+
tensor<fp16, [1, 4096, 1, 64]> var_41_cast_fp16 = mul(x = x, y = x)[name = tensor<string, []>("op_41_cast_fp16")];
|
32 |
+
tensor<int32, [1]> var_42 = const()[name = tensor<string, []>("op_42"), val = tensor<int32, [1]>([1])];
|
33 |
+
tensor<fp16, [1, 1, 1, 64]> norm_x_1_cast_fp16 = reduce_mean(axes = var_42, keep_dims = var_33, x = var_41_cast_fp16)[name = tensor<string, []>("norm_x_1_cast_fp16")];
|
34 |
+
tensor<fp16, []> var_44_to_fp16 = const()[name = tensor<string, []>("op_44_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
|
35 |
+
tensor<fp16, [1, 1, 1, 64]> var_45_cast_fp16 = add(x = norm_x_1_cast_fp16, y = var_44_to_fp16)[name = tensor<string, []>("op_45_cast_fp16")];
|
36 |
+
tensor<fp16, []> var_46_epsilon_0_to_fp16 = const()[name = tensor<string, []>("op_46_epsilon_0_to_fp16"), val = tensor<fp16, []>(0x1p-24)];
|
37 |
+
tensor<fp16, [1, 1, 1, 64]> var_46_cast_fp16 = rsqrt(epsilon = var_46_epsilon_0_to_fp16, x = var_45_cast_fp16)[name = tensor<string, []>("op_46_cast_fp16")];
|
38 |
+
tensor<fp16, [1, 4096, 1, 64]> x_normed_1_cast_fp16 = mul(x = x, y = var_46_cast_fp16)[name = tensor<string, []>("x_normed_1_cast_fp16")];
|
39 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_0_norm_1_weight_to_fp16 = const()[name = tensor<string, []>("blocks_0_norm_1_weight_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303566848)))];
|
40 |
+
tensor<fp16, [1, 4096, 1, 64]> x_5_cast_fp16 = mul(x = x_normed_1_cast_fp16, y = blocks_0_norm_1_weight_to_fp16)[name = tensor<string, []>("x_5_cast_fp16")];
|
41 |
+
tensor<int32, [2]> var_58 = const()[name = tensor<string, []>("op_58"), val = tensor<int32, [2]>([1, 1])];
|
42 |
+
tensor<int32, [2]> var_60 = const()[name = tensor<string, []>("op_60"), val = tensor<int32, [2]>([1, 1])];
|
43 |
+
tensor<string, []> var_62_pad_type_0 = const()[name = tensor<string, []>("op_62_pad_type_0"), val = tensor<string, []>("custom")];
|
44 |
+
tensor<int32, [4]> var_62_pad_0 = const()[name = tensor<string, []>("op_62_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
45 |
+
tensor<fp16, [1, 4096, 1, 64]> var_62_cast_fp16 = conv(dilations = var_60, groups = var_32, pad = var_62_pad_0, pad_type = var_62_pad_type_0, strides = var_58, weight = blocks_0_attn_q_proj_weight_palettized_cast_fp16, x = x_5_cast_fp16)[name = tensor<string, []>("op_62_cast_fp16")];
|
46 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_0_attn_q_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_0_attn_q_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303575104)))];
|
47 |
+
tensor<fp16, [1, 4096, 1, 64]> q_1_cast_fp16 = mul(x = var_62_cast_fp16, y = blocks_0_attn_q_proj_output_scales_to_fp16)[name = tensor<string, []>("q_1_cast_fp16")];
|
48 |
+
tensor<int32, [2]> var_66 = const()[name = tensor<string, []>("op_66"), val = tensor<int32, [2]>([1, 1])];
|
49 |
+
tensor<int32, [2]> var_68 = const()[name = tensor<string, []>("op_68"), val = tensor<int32, [2]>([1, 1])];
|
50 |
+
tensor<string, []> var_70_pad_type_0 = const()[name = tensor<string, []>("op_70_pad_type_0"), val = tensor<string, []>("custom")];
|
51 |
+
tensor<int32, [4]> var_70_pad_0 = const()[name = tensor<string, []>("op_70_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
52 |
+
tensor<fp16, [1, 4096, 1, 64]> var_70_cast_fp16 = conv(dilations = var_68, groups = var_32, pad = var_70_pad_0, pad_type = var_70_pad_type_0, strides = var_66, weight = blocks_0_attn_k_proj_weight_palettized_cast_fp16, x = x_5_cast_fp16)[name = tensor<string, []>("op_70_cast_fp16")];
|
53 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_0_attn_k_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_0_attn_k_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303583360)))];
|
54 |
+
tensor<fp16, [1, 4096, 1, 64]> k_1_cast_fp16 = mul(x = var_70_cast_fp16, y = blocks_0_attn_k_proj_output_scales_to_fp16)[name = tensor<string, []>("k_1_cast_fp16")];
|
55 |
+
tensor<int32, [2]> var_74 = const()[name = tensor<string, []>("op_74"), val = tensor<int32, [2]>([1, 1])];
|
56 |
+
tensor<int32, [2]> var_76 = const()[name = tensor<string, []>("op_76"), val = tensor<int32, [2]>([1, 1])];
|
57 |
+
tensor<string, []> var_78_pad_type_0 = const()[name = tensor<string, []>("op_78_pad_type_0"), val = tensor<string, []>("custom")];
|
58 |
+
tensor<int32, [4]> var_78_pad_0 = const()[name = tensor<string, []>("op_78_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
59 |
+
tensor<fp16, [1, 4096, 1, 64]> var_78_cast_fp16 = conv(dilations = var_76, groups = var_32, pad = var_78_pad_0, pad_type = var_78_pad_type_0, strides = var_74, weight = blocks_0_attn_v_proj_weight_palettized_cast_fp16, x = x_5_cast_fp16)[name = tensor<string, []>("op_78_cast_fp16")];
|
60 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_0_attn_v_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_0_attn_v_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303591616)))];
|
61 |
+
tensor<fp16, [1, 4096, 1, 64]> v_1_cast_fp16 = mul(x = var_78_cast_fp16, y = blocks_0_attn_v_proj_output_scales_to_fp16)[name = tensor<string, []>("v_1_cast_fp16")];
|
62 |
+
tensor<int32, [4]> var_80 = const()[name = tensor<string, []>("op_80"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
63 |
+
tensor<fp16, [1, 32, 128, 64]> q_3_cast_fp16 = reshape(shape = var_80, x = q_1_cast_fp16)[name = tensor<string, []>("q_3_cast_fp16")];
|
64 |
+
tensor<int32, [4]> var_82 = const()[name = tensor<string, []>("op_82"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
65 |
+
tensor<fp16, [1, 32, 128, 64]> k_3_cast_fp16 = reshape(shape = var_82, x = k_1_cast_fp16)[name = tensor<string, []>("k_3_cast_fp16")];
|
66 |
+
tensor<int32, [4]> var_84 = const()[name = tensor<string, []>("op_84"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
67 |
+
tensor<fp16, [1, 32, 128, 64]> new_v_cache_0 = reshape(shape = var_84, x = v_1_cast_fp16)[name = tensor<string, []>("v_3_cast_fp16")];
|
68 |
+
tensor<int32, [4]> var_96_begin_0 = const()[name = tensor<string, []>("op_96_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
69 |
+
tensor<int32, [4]> var_96_end_0 = const()[name = tensor<string, []>("op_96_end_0"), val = tensor<int32, [4]>([1, 32, 64, 64])];
|
70 |
+
tensor<bool, [4]> var_96_end_mask_0 = const()[name = tensor<string, []>("op_96_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
|
71 |
+
tensor<fp16, [1, 32, 64, 64]> var_96_cast_fp16 = slice_by_index(begin = var_96_begin_0, end = var_96_end_0, end_mask = var_96_end_mask_0, x = q_3_cast_fp16)[name = tensor<string, []>("op_96_cast_fp16")];
|
72 |
+
tensor<int32, [4]> var_102_begin_0 = const()[name = tensor<string, []>("op_102_begin_0"), val = tensor<int32, [4]>([0, 0, 64, 0])];
|
73 |
+
tensor<int32, [4]> var_102_end_0 = const()[name = tensor<string, []>("op_102_end_0"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
74 |
+
tensor<bool, [4]> var_102_end_mask_0 = const()[name = tensor<string, []>("op_102_end_mask_0"), val = tensor<bool, [4]>([true, true, true, true])];
|
75 |
+
tensor<fp16, [1, 32, 64, 64]> var_102_cast_fp16 = slice_by_index(begin = var_102_begin_0, end = var_102_end_0, end_mask = var_102_end_mask_0, x = q_3_cast_fp16)[name = tensor<string, []>("op_102_cast_fp16")];
|
76 |
+
tensor<fp16, []> const_3_promoted_to_fp16 = const()[name = tensor<string, []>("const_3_promoted_to_fp16"), val = tensor<fp16, []>(-0x1p+0)];
|
77 |
+
tensor<fp16, [1, 32, 64, 64]> var_104_cast_fp16 = mul(x = var_102_cast_fp16, y = const_3_promoted_to_fp16)[name = tensor<string, []>("op_104_cast_fp16")];
|
78 |
+
tensor<bool, []> rotated_1_interleave_0 = const()[name = tensor<string, []>("rotated_1_interleave_0"), val = tensor<bool, []>(false)];
|
79 |
+
tensor<fp16, [1, 32, 128, 64]> rotated_1_cast_fp16 = concat(axis = var_23, interleave = rotated_1_interleave_0, values = (var_104_cast_fp16, var_96_cast_fp16))[name = tensor<string, []>("rotated_1_cast_fp16")];
|
80 |
+
tensor<fp16, [1, 32, 128, 64]> var_107_cast_fp16 = mul(x = q_3_cast_fp16, y = cos)[name = tensor<string, []>("op_107_cast_fp16")];
|
81 |
+
tensor<fp16, [1, 32, 128, 64]> var_108_cast_fp16 = mul(x = rotated_1_cast_fp16, y = sin)[name = tensor<string, []>("op_108_cast_fp16")];
|
82 |
+
tensor<fp16, [1, 32, 128, 64]> roped_1_cast_fp16 = add(x = var_107_cast_fp16, y = var_108_cast_fp16)[name = tensor<string, []>("roped_1_cast_fp16")];
|
83 |
+
tensor<int32, [4]> var_121_begin_0 = const()[name = tensor<string, []>("op_121_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
84 |
+
tensor<int32, [4]> var_121_end_0 = const()[name = tensor<string, []>("op_121_end_0"), val = tensor<int32, [4]>([1, 32, 64, 64])];
|
85 |
+
tensor<bool, [4]> var_121_end_mask_0 = const()[name = tensor<string, []>("op_121_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
|
86 |
+
tensor<fp16, [1, 32, 64, 64]> var_121_cast_fp16 = slice_by_index(begin = var_121_begin_0, end = var_121_end_0, end_mask = var_121_end_mask_0, x = k_3_cast_fp16)[name = tensor<string, []>("op_121_cast_fp16")];
|
87 |
+
tensor<int32, [4]> var_127_begin_0 = const()[name = tensor<string, []>("op_127_begin_0"), val = tensor<int32, [4]>([0, 0, 64, 0])];
|
88 |
+
tensor<int32, [4]> var_127_end_0 = const()[name = tensor<string, []>("op_127_end_0"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
89 |
+
tensor<bool, [4]> var_127_end_mask_0 = const()[name = tensor<string, []>("op_127_end_mask_0"), val = tensor<bool, [4]>([true, true, true, true])];
|
90 |
+
tensor<fp16, [1, 32, 64, 64]> var_127_cast_fp16 = slice_by_index(begin = var_127_begin_0, end = var_127_end_0, end_mask = var_127_end_mask_0, x = k_3_cast_fp16)[name = tensor<string, []>("op_127_cast_fp16")];
|
91 |
+
tensor<fp16, []> const_5_promoted_to_fp16 = const()[name = tensor<string, []>("const_5_promoted_to_fp16"), val = tensor<fp16, []>(-0x1p+0)];
|
92 |
+
tensor<fp16, [1, 32, 64, 64]> var_129_cast_fp16 = mul(x = var_127_cast_fp16, y = const_5_promoted_to_fp16)[name = tensor<string, []>("op_129_cast_fp16")];
|
93 |
+
tensor<bool, []> rotated_3_interleave_0 = const()[name = tensor<string, []>("rotated_3_interleave_0"), val = tensor<bool, []>(false)];
|
94 |
+
tensor<fp16, [1, 32, 128, 64]> rotated_3_cast_fp16 = concat(axis = var_23, interleave = rotated_3_interleave_0, values = (var_129_cast_fp16, var_121_cast_fp16))[name = tensor<string, []>("rotated_3_cast_fp16")];
|
95 |
+
tensor<fp16, [1, 32, 128, 64]> var_132_cast_fp16 = mul(x = k_3_cast_fp16, y = cos)[name = tensor<string, []>("op_132_cast_fp16")];
|
96 |
+
tensor<fp16, [1, 32, 128, 64]> var_133_cast_fp16 = mul(x = rotated_3_cast_fp16, y = sin)[name = tensor<string, []>("op_133_cast_fp16")];
|
97 |
+
tensor<fp16, [1, 32, 128, 64]> roped_3_cast_fp16 = add(x = var_132_cast_fp16, y = var_133_cast_fp16)[name = tensor<string, []>("roped_3_cast_fp16")];
|
98 |
+
tensor<bool, []> q_5_interleave_0 = const()[name = tensor<string, []>("q_5_interleave_0"), val = tensor<bool, []>(false)];
|
99 |
+
tensor<fp16, [1, 32, 128, 64]> q_5_cast_fp16 = concat(axis = var_23, interleave = q_5_interleave_0, values = roped_1_cast_fp16)[name = tensor<string, []>("q_5_cast_fp16")];
|
100 |
+
tensor<bool, []> k_5_interleave_0 = const()[name = tensor<string, []>("k_5_interleave_0"), val = tensor<bool, []>(false)];
|
101 |
+
tensor<fp16, [1, 32, 128, 64]> new_k_cache_0 = concat(axis = var_23, interleave = k_5_interleave_0, values = roped_3_cast_fp16)[name = tensor<string, []>("k_5_cast_fp16")];
|
102 |
+
tensor<bool, []> k_7_interleave_0 = const()[name = tensor<string, []>("k_7_interleave_0"), val = tensor<bool, []>(false)];
|
103 |
+
tensor<fp16, [1, 32, 128, 512]> k_7_cast_fp16 = concat(axis = var_25, interleave = k_7_interleave_0, values = (k_cache_0, new_k_cache_0))[name = tensor<string, []>("k_7_cast_fp16")];
|
104 |
+
tensor<bool, []> v_5_interleave_0 = const()[name = tensor<string, []>("v_5_interleave_0"), val = tensor<bool, []>(false)];
|
105 |
+
tensor<fp16, [1, 32, 128, 512]> v_5_cast_fp16 = concat(axis = var_25, interleave = v_5_interleave_0, values = (v_cache_0, new_v_cache_0))[name = tensor<string, []>("v_5_cast_fp16")];
|
106 |
+
tensor<fp16, []> var_155_to_fp16 = const()[name = tensor<string, []>("op_155_to_fp16"), val = tensor<fp16, []>(0x1.6ap-4)];
|
107 |
+
tensor<fp16, [1, 32, 128, 64]> var_156_cast_fp16 = mul(x = q_5_cast_fp16, y = var_155_to_fp16)[name = tensor<string, []>("op_156_cast_fp16")];
|
108 |
+
tensor<bool, []> attn_weights_1_transpose_x_0 = const()[name = tensor<string, []>("attn_weights_1_transpose_x_0"), val = tensor<bool, []>(true)];
|
109 |
+
tensor<bool, []> attn_weights_1_transpose_y_0 = const()[name = tensor<string, []>("attn_weights_1_transpose_y_0"), val = tensor<bool, []>(false)];
|
110 |
+
tensor<fp16, [1, 32, 64, 512]> attn_weights_1_cast_fp16 = matmul(transpose_x = attn_weights_1_transpose_x_0, transpose_y = attn_weights_1_transpose_y_0, x = var_156_cast_fp16, y = k_7_cast_fp16)[name = tensor<string, []>("attn_weights_1_cast_fp16")];
|
111 |
+
tensor<fp16, [1, 32, 64, 512]> attn_weights_3_cast_fp16 = add(x = attn_weights_1_cast_fp16, y = mask)[name = tensor<string, []>("attn_weights_3_cast_fp16")];
|
112 |
+
tensor<fp16, [1, 32, 64, 512]> var_164_cast_fp16 = softmax(axis = var_18, x = attn_weights_3_cast_fp16)[name = tensor<string, []>("op_164_cast_fp16")];
|
113 |
+
tensor<bool, []> attn_1_transpose_x_0 = const()[name = tensor<string, []>("attn_1_transpose_x_0"), val = tensor<bool, []>(false)];
|
114 |
+
tensor<bool, []> attn_1_transpose_y_0 = const()[name = tensor<string, []>("attn_1_transpose_y_0"), val = tensor<bool, []>(true)];
|
115 |
+
tensor<fp16, [1, 32, 128, 64]> attn_1_cast_fp16 = matmul(transpose_x = attn_1_transpose_x_0, transpose_y = attn_1_transpose_y_0, x = v_5_cast_fp16, y = var_164_cast_fp16)[name = tensor<string, []>("attn_1_cast_fp16")];
|
116 |
+
tensor<int32, [4]> var_168 = const()[name = tensor<string, []>("op_168"), val = tensor<int32, [4]>([1, 4096, 1, -1])];
|
117 |
+
tensor<fp16, [1, 4096, 1, 64]> input_1_cast_fp16 = reshape(shape = var_168, x = attn_1_cast_fp16)[name = tensor<string, []>("input_1_cast_fp16")];
|
118 |
+
tensor<int32, [2]> var_172 = const()[name = tensor<string, []>("op_172"), val = tensor<int32, [2]>([1, 1])];
|
119 |
+
tensor<int32, [2]> var_174 = const()[name = tensor<string, []>("op_174"), val = tensor<int32, [2]>([1, 1])];
|
120 |
+
tensor<string, []> var_176_pad_type_0 = const()[name = tensor<string, []>("op_176_pad_type_0"), val = tensor<string, []>("custom")];
|
121 |
+
tensor<int32, [4]> var_176_pad_0 = const()[name = tensor<string, []>("op_176_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
122 |
+
tensor<fp16, [1, 4096, 1, 64]> var_176_cast_fp16 = conv(dilations = var_174, groups = var_32, pad = var_176_pad_0, pad_type = var_176_pad_type_0, strides = var_172, weight = blocks_0_attn_proj_weight_palettized_cast_fp16, x = input_1_cast_fp16)[name = tensor<string, []>("op_176_cast_fp16")];
|
123 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_0_attn_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_0_attn_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303599872)))];
|
124 |
+
tensor<fp16, [1, 4096, 1, 64]> attention_output_1_cast_fp16 = mul(x = var_176_cast_fp16, y = blocks_0_attn_proj_output_scales_to_fp16)[name = tensor<string, []>("attention_output_1_cast_fp16")];
|
125 |
+
tensor<fp16, [1, 4096, 1, 64]> x_11_cast_fp16 = add(x = attention_output_1_cast_fp16, y = x)[name = tensor<string, []>("x_11_cast_fp16")];
|
126 |
+
tensor<fp16, [1, 4096, 1, 64]> var_185_cast_fp16 = mul(x = x_11_cast_fp16, y = x_11_cast_fp16)[name = tensor<string, []>("op_185_cast_fp16")];
|
127 |
+
tensor<int32, [1]> var_186 = const()[name = tensor<string, []>("op_186"), val = tensor<int32, [1]>([1])];
|
128 |
+
tensor<fp16, [1, 1, 1, 64]> norm_x_3_cast_fp16 = reduce_mean(axes = var_186, keep_dims = var_33, x = var_185_cast_fp16)[name = tensor<string, []>("norm_x_3_cast_fp16")];
|
129 |
+
tensor<fp16, []> var_188_to_fp16 = const()[name = tensor<string, []>("op_188_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
|
130 |
+
tensor<fp16, [1, 1, 1, 64]> var_189_cast_fp16 = add(x = norm_x_3_cast_fp16, y = var_188_to_fp16)[name = tensor<string, []>("op_189_cast_fp16")];
|
131 |
+
tensor<fp16, []> var_190_epsilon_0_to_fp16 = const()[name = tensor<string, []>("op_190_epsilon_0_to_fp16"), val = tensor<fp16, []>(0x1p-24)];
|
132 |
+
tensor<fp16, [1, 1, 1, 64]> var_190_cast_fp16 = rsqrt(epsilon = var_190_epsilon_0_to_fp16, x = var_189_cast_fp16)[name = tensor<string, []>("op_190_cast_fp16")];
|
133 |
+
tensor<fp16, [1, 4096, 1, 64]> x_normed_5_cast_fp16 = mul(x = x_11_cast_fp16, y = var_190_cast_fp16)[name = tensor<string, []>("x_normed_5_cast_fp16")];
|
134 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_0_norm_2_weight_to_fp16 = const()[name = tensor<string, []>("blocks_0_norm_2_weight_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303608128)))];
|
135 |
+
tensor<fp16, [1, 4096, 1, 64]> input_3_cast_fp16 = mul(x = x_normed_5_cast_fp16, y = blocks_0_norm_2_weight_to_fp16)[name = tensor<string, []>("input_3_cast_fp16")];
|
136 |
+
tensor<int32, [2]> var_202 = const()[name = tensor<string, []>("op_202"), val = tensor<int32, [2]>([1, 1])];
|
137 |
+
tensor<int32, [2]> var_204 = const()[name = tensor<string, []>("op_204"), val = tensor<int32, [2]>([1, 1])];
|
138 |
+
tensor<string, []> var_206_pad_type_0 = const()[name = tensor<string, []>("op_206_pad_type_0"), val = tensor<string, []>("custom")];
|
139 |
+
tensor<int32, [4]> var_206_pad_0 = const()[name = tensor<string, []>("op_206_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
140 |
+
tensor<fp16, [1, 11008, 1, 64]> var_206_cast_fp16 = conv(dilations = var_204, groups = var_32, pad = var_206_pad_0, pad_type = var_206_pad_type_0, strides = var_202, weight = blocks_0_mlp_fc_1_weight_palettized_cast_fp16, x = input_3_cast_fp16)[name = tensor<string, []>("op_206_cast_fp16")];
|
141 |
+
tensor<fp16, [1, 11008, 1, 1]> blocks_0_mlp_fc_1_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_0_mlp_fc_1_output_scales_to_fp16"), val = tensor<fp16, [1, 11008, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303616384)))];
|
142 |
+
tensor<fp16, [1, 11008, 1, 64]> input_5_cast_fp16 = mul(x = var_206_cast_fp16, y = blocks_0_mlp_fc_1_output_scales_to_fp16)[name = tensor<string, []>("input_5_cast_fp16")];
|
143 |
+
tensor<int32, [2]> var_210 = const()[name = tensor<string, []>("op_210"), val = tensor<int32, [2]>([1, 1])];
|
144 |
+
tensor<int32, [2]> var_212 = const()[name = tensor<string, []>("op_212"), val = tensor<int32, [2]>([1, 1])];
|
145 |
+
tensor<string, []> var_214_pad_type_0 = const()[name = tensor<string, []>("op_214_pad_type_0"), val = tensor<string, []>("custom")];
|
146 |
+
tensor<int32, [4]> var_214_pad_0 = const()[name = tensor<string, []>("op_214_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
147 |
+
tensor<fp16, [1, 11008, 1, 64]> var_214_cast_fp16 = conv(dilations = var_212, groups = var_32, pad = var_214_pad_0, pad_type = var_214_pad_type_0, strides = var_210, weight = blocks_0_mlp_fc_2_weight_palettized_cast_fp16, x = input_3_cast_fp16)[name = tensor<string, []>("op_214_cast_fp16")];
|
148 |
+
tensor<fp16, [1, 11008, 1, 1]> blocks_0_mlp_fc_2_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_0_mlp_fc_2_output_scales_to_fp16"), val = tensor<fp16, [1, 11008, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303638464)))];
|
149 |
+
tensor<fp16, [1, 11008, 1, 64]> x_fc_2_1_cast_fp16 = mul(x = var_214_cast_fp16, y = blocks_0_mlp_fc_2_output_scales_to_fp16)[name = tensor<string, []>("x_fc_2_1_cast_fp16")];
|
150 |
+
tensor<fp16, [1, 11008, 1, 64]> var_216_cast_fp16 = silu(x = input_5_cast_fp16)[name = tensor<string, []>("op_216_cast_fp16")];
|
151 |
+
tensor<fp16, [1, 11008, 1, 64]> input_7_cast_fp16 = mul(x = var_216_cast_fp16, y = x_fc_2_1_cast_fp16)[name = tensor<string, []>("input_7_cast_fp16")];
|
152 |
+
tensor<int32, [2]> var_220 = const()[name = tensor<string, []>("op_220"), val = tensor<int32, [2]>([1, 1])];
|
153 |
+
tensor<int32, [2]> var_222 = const()[name = tensor<string, []>("op_222"), val = tensor<int32, [2]>([1, 1])];
|
154 |
+
tensor<string, []> var_224_pad_type_0 = const()[name = tensor<string, []>("op_224_pad_type_0"), val = tensor<string, []>("custom")];
|
155 |
+
tensor<int32, [4]> var_224_pad_0 = const()[name = tensor<string, []>("op_224_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
156 |
+
tensor<fp16, [1, 4096, 1, 64]> var_224_cast_fp16 = conv(dilations = var_222, groups = var_32, pad = var_224_pad_0, pad_type = var_224_pad_type_0, strides = var_220, weight = blocks_0_mlp_proj_weight_palettized_cast_fp16, x = input_7_cast_fp16)[name = tensor<string, []>("op_224_cast_fp16")];
|
157 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_0_mlp_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_0_mlp_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303660544)))];
|
158 |
+
tensor<fp16, [1, 4096, 1, 64]> var_225_cast_fp16 = mul(x = var_224_cast_fp16, y = blocks_0_mlp_proj_output_scales_to_fp16)[name = tensor<string, []>("op_225_cast_fp16")];
|
159 |
+
tensor<fp16, [1, 4096, 1, 64]> x_15_cast_fp16 = add(x = var_225_cast_fp16, y = x_11_cast_fp16)[name = tensor<string, []>("x_15_cast_fp16")];
|
160 |
+
tensor<int32, []> var_232 = const()[name = tensor<string, []>("op_232"), val = tensor<int32, []>(3)];
|
161 |
+
tensor<int32, []> var_237 = const()[name = tensor<string, []>("op_237"), val = tensor<int32, []>(-2)];
|
162 |
+
tensor<int32, []> var_239 = const()[name = tensor<string, []>("op_239"), val = tensor<int32, []>(-1)];
|
163 |
+
tensor<int32, []> var_246 = const()[name = tensor<string, []>("op_246"), val = tensor<int32, []>(1)];
|
164 |
+
tensor<bool, []> var_247 = const()[name = tensor<string, []>("op_247"), val = tensor<bool, []>(true)];
|
165 |
+
tensor<fp16, [1, 4096, 1, 64]> var_254_cast_fp16 = mul(x = x_15_cast_fp16, y = x_15_cast_fp16)[name = tensor<string, []>("op_254_cast_fp16")];
|
166 |
+
tensor<int32, [1]> var_255 = const()[name = tensor<string, []>("op_255"), val = tensor<int32, [1]>([1])];
|
167 |
+
tensor<fp16, [1, 1, 1, 64]> norm_x_5_cast_fp16 = reduce_mean(axes = var_255, keep_dims = var_247, x = var_254_cast_fp16)[name = tensor<string, []>("norm_x_5_cast_fp16")];
|
168 |
+
tensor<fp16, []> var_257_to_fp16 = const()[name = tensor<string, []>("op_257_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
|
169 |
+
tensor<fp16, [1, 1, 1, 64]> var_258_cast_fp16 = add(x = norm_x_5_cast_fp16, y = var_257_to_fp16)[name = tensor<string, []>("op_258_cast_fp16")];
|
170 |
+
tensor<fp16, []> var_259_epsilon_0_to_fp16 = const()[name = tensor<string, []>("op_259_epsilon_0_to_fp16"), val = tensor<fp16, []>(0x1p-24)];
|
171 |
+
tensor<fp16, [1, 1, 1, 64]> var_259_cast_fp16 = rsqrt(epsilon = var_259_epsilon_0_to_fp16, x = var_258_cast_fp16)[name = tensor<string, []>("op_259_cast_fp16")];
|
172 |
+
tensor<fp16, [1, 4096, 1, 64]> x_normed_9_cast_fp16 = mul(x = x_15_cast_fp16, y = var_259_cast_fp16)[name = tensor<string, []>("x_normed_9_cast_fp16")];
|
173 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_1_norm_1_weight_to_fp16 = const()[name = tensor<string, []>("blocks_1_norm_1_weight_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303668800)))];
|
174 |
+
tensor<fp16, [1, 4096, 1, 64]> x_19_cast_fp16 = mul(x = x_normed_9_cast_fp16, y = blocks_1_norm_1_weight_to_fp16)[name = tensor<string, []>("x_19_cast_fp16")];
|
175 |
+
tensor<int32, [2]> var_274 = const()[name = tensor<string, []>("op_274"), val = tensor<int32, [2]>([1, 1])];
|
176 |
+
tensor<int32, [2]> var_276 = const()[name = tensor<string, []>("op_276"), val = tensor<int32, [2]>([1, 1])];
|
177 |
+
tensor<string, []> var_278_pad_type_0 = const()[name = tensor<string, []>("op_278_pad_type_0"), val = tensor<string, []>("custom")];
|
178 |
+
tensor<int32, [4]> var_278_pad_0 = const()[name = tensor<string, []>("op_278_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
179 |
+
tensor<fp16, [1, 4096, 1, 64]> var_278_cast_fp16 = conv(dilations = var_276, groups = var_246, pad = var_278_pad_0, pad_type = var_278_pad_type_0, strides = var_274, weight = blocks_1_attn_q_proj_weight_palettized_cast_fp16, x = x_19_cast_fp16)[name = tensor<string, []>("op_278_cast_fp16")];
|
180 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_1_attn_q_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_1_attn_q_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303677056)))];
|
181 |
+
tensor<fp16, [1, 4096, 1, 64]> q_7_cast_fp16 = mul(x = var_278_cast_fp16, y = blocks_1_attn_q_proj_output_scales_to_fp16)[name = tensor<string, []>("q_7_cast_fp16")];
|
182 |
+
tensor<int32, [2]> var_282 = const()[name = tensor<string, []>("op_282"), val = tensor<int32, [2]>([1, 1])];
|
183 |
+
tensor<int32, [2]> var_284 = const()[name = tensor<string, []>("op_284"), val = tensor<int32, [2]>([1, 1])];
|
184 |
+
tensor<string, []> var_286_pad_type_0 = const()[name = tensor<string, []>("op_286_pad_type_0"), val = tensor<string, []>("custom")];
|
185 |
+
tensor<int32, [4]> var_286_pad_0 = const()[name = tensor<string, []>("op_286_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
186 |
+
tensor<fp16, [1, 4096, 1, 64]> var_286_cast_fp16 = conv(dilations = var_284, groups = var_246, pad = var_286_pad_0, pad_type = var_286_pad_type_0, strides = var_282, weight = blocks_1_attn_k_proj_weight_palettized_cast_fp16, x = x_19_cast_fp16)[name = tensor<string, []>("op_286_cast_fp16")];
|
187 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_1_attn_k_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_1_attn_k_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303685312)))];
|
188 |
+
tensor<fp16, [1, 4096, 1, 64]> k_9_cast_fp16 = mul(x = var_286_cast_fp16, y = blocks_1_attn_k_proj_output_scales_to_fp16)[name = tensor<string, []>("k_9_cast_fp16")];
|
189 |
+
tensor<int32, [2]> var_290 = const()[name = tensor<string, []>("op_290"), val = tensor<int32, [2]>([1, 1])];
|
190 |
+
tensor<int32, [2]> var_292 = const()[name = tensor<string, []>("op_292"), val = tensor<int32, [2]>([1, 1])];
|
191 |
+
tensor<string, []> var_294_pad_type_0 = const()[name = tensor<string, []>("op_294_pad_type_0"), val = tensor<string, []>("custom")];
|
192 |
+
tensor<int32, [4]> var_294_pad_0 = const()[name = tensor<string, []>("op_294_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
193 |
+
tensor<fp16, [1, 4096, 1, 64]> var_294_cast_fp16 = conv(dilations = var_292, groups = var_246, pad = var_294_pad_0, pad_type = var_294_pad_type_0, strides = var_290, weight = blocks_1_attn_v_proj_weight_palettized_cast_fp16, x = x_19_cast_fp16)[name = tensor<string, []>("op_294_cast_fp16")];
|
194 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_1_attn_v_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_1_attn_v_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303693568)))];
|
195 |
+
tensor<fp16, [1, 4096, 1, 64]> v_7_cast_fp16 = mul(x = var_294_cast_fp16, y = blocks_1_attn_v_proj_output_scales_to_fp16)[name = tensor<string, []>("v_7_cast_fp16")];
|
196 |
+
tensor<int32, [4]> var_296 = const()[name = tensor<string, []>("op_296"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
197 |
+
tensor<fp16, [1, 32, 128, 64]> q_9_cast_fp16 = reshape(shape = var_296, x = q_7_cast_fp16)[name = tensor<string, []>("q_9_cast_fp16")];
|
198 |
+
tensor<int32, [4]> var_298 = const()[name = tensor<string, []>("op_298"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
199 |
+
tensor<fp16, [1, 32, 128, 64]> k_11_cast_fp16 = reshape(shape = var_298, x = k_9_cast_fp16)[name = tensor<string, []>("k_11_cast_fp16")];
|
200 |
+
tensor<int32, [4]> var_300 = const()[name = tensor<string, []>("op_300"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
201 |
+
tensor<fp16, [1, 32, 128, 64]> new_v_cache_1 = reshape(shape = var_300, x = v_7_cast_fp16)[name = tensor<string, []>("v_9_cast_fp16")];
|
202 |
+
tensor<int32, [4]> var_312_begin_0 = const()[name = tensor<string, []>("op_312_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
203 |
+
tensor<int32, [4]> var_312_end_0 = const()[name = tensor<string, []>("op_312_end_0"), val = tensor<int32, [4]>([1, 32, 64, 64])];
|
204 |
+
tensor<bool, [4]> var_312_end_mask_0 = const()[name = tensor<string, []>("op_312_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
|
205 |
+
tensor<fp16, [1, 32, 64, 64]> var_312_cast_fp16 = slice_by_index(begin = var_312_begin_0, end = var_312_end_0, end_mask = var_312_end_mask_0, x = q_9_cast_fp16)[name = tensor<string, []>("op_312_cast_fp16")];
|
206 |
+
tensor<int32, [4]> var_318_begin_0 = const()[name = tensor<string, []>("op_318_begin_0"), val = tensor<int32, [4]>([0, 0, 64, 0])];
|
207 |
+
tensor<int32, [4]> var_318_end_0 = const()[name = tensor<string, []>("op_318_end_0"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
208 |
+
tensor<bool, [4]> var_318_end_mask_0 = const()[name = tensor<string, []>("op_318_end_mask_0"), val = tensor<bool, [4]>([true, true, true, true])];
|
209 |
+
tensor<fp16, [1, 32, 64, 64]> var_318_cast_fp16 = slice_by_index(begin = var_318_begin_0, end = var_318_end_0, end_mask = var_318_end_mask_0, x = q_9_cast_fp16)[name = tensor<string, []>("op_318_cast_fp16")];
|
210 |
+
tensor<fp16, []> const_10_promoted_to_fp16 = const()[name = tensor<string, []>("const_10_promoted_to_fp16"), val = tensor<fp16, []>(-0x1p+0)];
|
211 |
+
tensor<fp16, [1, 32, 64, 64]> var_320_cast_fp16 = mul(x = var_318_cast_fp16, y = const_10_promoted_to_fp16)[name = tensor<string, []>("op_320_cast_fp16")];
|
212 |
+
tensor<bool, []> rotated_5_interleave_0 = const()[name = tensor<string, []>("rotated_5_interleave_0"), val = tensor<bool, []>(false)];
|
213 |
+
tensor<fp16, [1, 32, 128, 64]> rotated_5_cast_fp16 = concat(axis = var_237, interleave = rotated_5_interleave_0, values = (var_320_cast_fp16, var_312_cast_fp16))[name = tensor<string, []>("rotated_5_cast_fp16")];
|
214 |
+
tensor<fp16, [1, 32, 128, 64]> var_323_cast_fp16 = mul(x = q_9_cast_fp16, y = cos)[name = tensor<string, []>("op_323_cast_fp16")];
|
215 |
+
tensor<fp16, [1, 32, 128, 64]> var_324_cast_fp16 = mul(x = rotated_5_cast_fp16, y = sin)[name = tensor<string, []>("op_324_cast_fp16")];
|
216 |
+
tensor<fp16, [1, 32, 128, 64]> roped_5_cast_fp16 = add(x = var_323_cast_fp16, y = var_324_cast_fp16)[name = tensor<string, []>("roped_5_cast_fp16")];
|
217 |
+
tensor<int32, [4]> var_337_begin_0 = const()[name = tensor<string, []>("op_337_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
218 |
+
tensor<int32, [4]> var_337_end_0 = const()[name = tensor<string, []>("op_337_end_0"), val = tensor<int32, [4]>([1, 32, 64, 64])];
|
219 |
+
tensor<bool, [4]> var_337_end_mask_0 = const()[name = tensor<string, []>("op_337_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
|
220 |
+
tensor<fp16, [1, 32, 64, 64]> var_337_cast_fp16 = slice_by_index(begin = var_337_begin_0, end = var_337_end_0, end_mask = var_337_end_mask_0, x = k_11_cast_fp16)[name = tensor<string, []>("op_337_cast_fp16")];
|
221 |
+
tensor<int32, [4]> var_343_begin_0 = const()[name = tensor<string, []>("op_343_begin_0"), val = tensor<int32, [4]>([0, 0, 64, 0])];
|
222 |
+
tensor<int32, [4]> var_343_end_0 = const()[name = tensor<string, []>("op_343_end_0"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
223 |
+
tensor<bool, [4]> var_343_end_mask_0 = const()[name = tensor<string, []>("op_343_end_mask_0"), val = tensor<bool, [4]>([true, true, true, true])];
|
224 |
+
tensor<fp16, [1, 32, 64, 64]> var_343_cast_fp16 = slice_by_index(begin = var_343_begin_0, end = var_343_end_0, end_mask = var_343_end_mask_0, x = k_11_cast_fp16)[name = tensor<string, []>("op_343_cast_fp16")];
|
225 |
+
tensor<fp16, []> const_12_promoted_to_fp16 = const()[name = tensor<string, []>("const_12_promoted_to_fp16"), val = tensor<fp16, []>(-0x1p+0)];
|
226 |
+
tensor<fp16, [1, 32, 64, 64]> var_345_cast_fp16 = mul(x = var_343_cast_fp16, y = const_12_promoted_to_fp16)[name = tensor<string, []>("op_345_cast_fp16")];
|
227 |
+
tensor<bool, []> rotated_7_interleave_0 = const()[name = tensor<string, []>("rotated_7_interleave_0"), val = tensor<bool, []>(false)];
|
228 |
+
tensor<fp16, [1, 32, 128, 64]> rotated_7_cast_fp16 = concat(axis = var_237, interleave = rotated_7_interleave_0, values = (var_345_cast_fp16, var_337_cast_fp16))[name = tensor<string, []>("rotated_7_cast_fp16")];
|
229 |
+
tensor<fp16, [1, 32, 128, 64]> var_348_cast_fp16 = mul(x = k_11_cast_fp16, y = cos)[name = tensor<string, []>("op_348_cast_fp16")];
|
230 |
+
tensor<fp16, [1, 32, 128, 64]> var_349_cast_fp16 = mul(x = rotated_7_cast_fp16, y = sin)[name = tensor<string, []>("op_349_cast_fp16")];
|
231 |
+
tensor<fp16, [1, 32, 128, 64]> roped_7_cast_fp16 = add(x = var_348_cast_fp16, y = var_349_cast_fp16)[name = tensor<string, []>("roped_7_cast_fp16")];
|
232 |
+
tensor<bool, []> q_11_interleave_0 = const()[name = tensor<string, []>("q_11_interleave_0"), val = tensor<bool, []>(false)];
|
233 |
+
tensor<fp16, [1, 32, 128, 64]> q_11_cast_fp16 = concat(axis = var_237, interleave = q_11_interleave_0, values = roped_5_cast_fp16)[name = tensor<string, []>("q_11_cast_fp16")];
|
234 |
+
tensor<bool, []> k_13_interleave_0 = const()[name = tensor<string, []>("k_13_interleave_0"), val = tensor<bool, []>(false)];
|
235 |
+
tensor<fp16, [1, 32, 128, 64]> new_k_cache_1 = concat(axis = var_237, interleave = k_13_interleave_0, values = roped_7_cast_fp16)[name = tensor<string, []>("k_13_cast_fp16")];
|
236 |
+
tensor<bool, []> k_15_interleave_0 = const()[name = tensor<string, []>("k_15_interleave_0"), val = tensor<bool, []>(false)];
|
237 |
+
tensor<fp16, [1, 32, 128, 512]> k_15_cast_fp16 = concat(axis = var_239, interleave = k_15_interleave_0, values = (k_cache_1, new_k_cache_1))[name = tensor<string, []>("k_15_cast_fp16")];
|
238 |
+
tensor<bool, []> v_11_interleave_0 = const()[name = tensor<string, []>("v_11_interleave_0"), val = tensor<bool, []>(false)];
|
239 |
+
tensor<fp16, [1, 32, 128, 512]> v_11_cast_fp16 = concat(axis = var_239, interleave = v_11_interleave_0, values = (v_cache_1, new_v_cache_1))[name = tensor<string, []>("v_11_cast_fp16")];
|
240 |
+
tensor<fp16, []> var_371_to_fp16 = const()[name = tensor<string, []>("op_371_to_fp16"), val = tensor<fp16, []>(0x1.6ap-4)];
|
241 |
+
tensor<fp16, [1, 32, 128, 64]> var_372_cast_fp16 = mul(x = q_11_cast_fp16, y = var_371_to_fp16)[name = tensor<string, []>("op_372_cast_fp16")];
|
242 |
+
tensor<bool, []> attn_weights_5_transpose_x_0 = const()[name = tensor<string, []>("attn_weights_5_transpose_x_0"), val = tensor<bool, []>(true)];
|
243 |
+
tensor<bool, []> attn_weights_5_transpose_y_0 = const()[name = tensor<string, []>("attn_weights_5_transpose_y_0"), val = tensor<bool, []>(false)];
|
244 |
+
tensor<fp16, [1, 32, 64, 512]> attn_weights_5_cast_fp16 = matmul(transpose_x = attn_weights_5_transpose_x_0, transpose_y = attn_weights_5_transpose_y_0, x = var_372_cast_fp16, y = k_15_cast_fp16)[name = tensor<string, []>("attn_weights_5_cast_fp16")];
|
245 |
+
tensor<fp16, [1, 32, 64, 512]> attn_weights_7_cast_fp16 = add(x = attn_weights_5_cast_fp16, y = mask)[name = tensor<string, []>("attn_weights_7_cast_fp16")];
|
246 |
+
tensor<fp16, [1, 32, 64, 512]> var_380_cast_fp16 = softmax(axis = var_232, x = attn_weights_7_cast_fp16)[name = tensor<string, []>("op_380_cast_fp16")];
|
247 |
+
tensor<bool, []> attn_3_transpose_x_0 = const()[name = tensor<string, []>("attn_3_transpose_x_0"), val = tensor<bool, []>(false)];
|
248 |
+
tensor<bool, []> attn_3_transpose_y_0 = const()[name = tensor<string, []>("attn_3_transpose_y_0"), val = tensor<bool, []>(true)];
|
249 |
+
tensor<fp16, [1, 32, 128, 64]> attn_3_cast_fp16 = matmul(transpose_x = attn_3_transpose_x_0, transpose_y = attn_3_transpose_y_0, x = v_11_cast_fp16, y = var_380_cast_fp16)[name = tensor<string, []>("attn_3_cast_fp16")];
|
250 |
+
tensor<int32, [4]> var_384 = const()[name = tensor<string, []>("op_384"), val = tensor<int32, [4]>([1, 4096, 1, -1])];
|
251 |
+
tensor<fp16, [1, 4096, 1, 64]> input_9_cast_fp16 = reshape(shape = var_384, x = attn_3_cast_fp16)[name = tensor<string, []>("input_9_cast_fp16")];
|
252 |
+
tensor<int32, [2]> var_388 = const()[name = tensor<string, []>("op_388"), val = tensor<int32, [2]>([1, 1])];
|
253 |
+
tensor<int32, [2]> var_390 = const()[name = tensor<string, []>("op_390"), val = tensor<int32, [2]>([1, 1])];
|
254 |
+
tensor<string, []> var_392_pad_type_0 = const()[name = tensor<string, []>("op_392_pad_type_0"), val = tensor<string, []>("custom")];
|
255 |
+
tensor<int32, [4]> var_392_pad_0 = const()[name = tensor<string, []>("op_392_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
256 |
+
tensor<fp16, [1, 4096, 1, 64]> var_392_cast_fp16 = conv(dilations = var_390, groups = var_246, pad = var_392_pad_0, pad_type = var_392_pad_type_0, strides = var_388, weight = blocks_1_attn_proj_weight_palettized_cast_fp16, x = input_9_cast_fp16)[name = tensor<string, []>("op_392_cast_fp16")];
|
257 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_1_attn_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_1_attn_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303701824)))];
|
258 |
+
tensor<fp16, [1, 4096, 1, 64]> attention_output_3_cast_fp16 = mul(x = var_392_cast_fp16, y = blocks_1_attn_proj_output_scales_to_fp16)[name = tensor<string, []>("attention_output_3_cast_fp16")];
|
259 |
+
tensor<fp16, [1, 4096, 1, 64]> x_25_cast_fp16 = add(x = attention_output_3_cast_fp16, y = x_15_cast_fp16)[name = tensor<string, []>("x_25_cast_fp16")];
|
260 |
+
tensor<fp16, [1, 4096, 1, 64]> var_401_cast_fp16 = mul(x = x_25_cast_fp16, y = x_25_cast_fp16)[name = tensor<string, []>("op_401_cast_fp16")];
|
261 |
+
tensor<int32, [1]> var_402 = const()[name = tensor<string, []>("op_402"), val = tensor<int32, [1]>([1])];
|
262 |
+
tensor<fp16, [1, 1, 1, 64]> norm_x_7_cast_fp16 = reduce_mean(axes = var_402, keep_dims = var_247, x = var_401_cast_fp16)[name = tensor<string, []>("norm_x_7_cast_fp16")];
|
263 |
+
tensor<fp16, []> var_404_to_fp16 = const()[name = tensor<string, []>("op_404_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
|
264 |
+
tensor<fp16, [1, 1, 1, 64]> var_405_cast_fp16 = add(x = norm_x_7_cast_fp16, y = var_404_to_fp16)[name = tensor<string, []>("op_405_cast_fp16")];
|
265 |
+
tensor<fp16, []> var_406_epsilon_0_to_fp16 = const()[name = tensor<string, []>("op_406_epsilon_0_to_fp16"), val = tensor<fp16, []>(0x1p-24)];
|
266 |
+
tensor<fp16, [1, 1, 1, 64]> var_406_cast_fp16 = rsqrt(epsilon = var_406_epsilon_0_to_fp16, x = var_405_cast_fp16)[name = tensor<string, []>("op_406_cast_fp16")];
|
267 |
+
tensor<fp16, [1, 4096, 1, 64]> x_normed_13_cast_fp16 = mul(x = x_25_cast_fp16, y = var_406_cast_fp16)[name = tensor<string, []>("x_normed_13_cast_fp16")];
|
268 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_1_norm_2_weight_to_fp16 = const()[name = tensor<string, []>("blocks_1_norm_2_weight_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303710080)))];
|
269 |
+
tensor<fp16, [1, 4096, 1, 64]> input_11_cast_fp16 = mul(x = x_normed_13_cast_fp16, y = blocks_1_norm_2_weight_to_fp16)[name = tensor<string, []>("input_11_cast_fp16")];
|
270 |
+
tensor<int32, [2]> var_418 = const()[name = tensor<string, []>("op_418"), val = tensor<int32, [2]>([1, 1])];
|
271 |
+
tensor<int32, [2]> var_420 = const()[name = tensor<string, []>("op_420"), val = tensor<int32, [2]>([1, 1])];
|
272 |
+
tensor<string, []> var_422_pad_type_0 = const()[name = tensor<string, []>("op_422_pad_type_0"), val = tensor<string, []>("custom")];
|
273 |
+
tensor<int32, [4]> var_422_pad_0 = const()[name = tensor<string, []>("op_422_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
274 |
+
tensor<fp16, [1, 11008, 1, 64]> var_422_cast_fp16 = conv(dilations = var_420, groups = var_246, pad = var_422_pad_0, pad_type = var_422_pad_type_0, strides = var_418, weight = blocks_1_mlp_fc_1_weight_palettized_cast_fp16, x = input_11_cast_fp16)[name = tensor<string, []>("op_422_cast_fp16")];
|
275 |
+
tensor<fp16, [1, 11008, 1, 1]> blocks_1_mlp_fc_1_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_1_mlp_fc_1_output_scales_to_fp16"), val = tensor<fp16, [1, 11008, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303718336)))];
|
276 |
+
tensor<fp16, [1, 11008, 1, 64]> input_13_cast_fp16 = mul(x = var_422_cast_fp16, y = blocks_1_mlp_fc_1_output_scales_to_fp16)[name = tensor<string, []>("input_13_cast_fp16")];
|
277 |
+
tensor<int32, [2]> var_426 = const()[name = tensor<string, []>("op_426"), val = tensor<int32, [2]>([1, 1])];
|
278 |
+
tensor<int32, [2]> var_428 = const()[name = tensor<string, []>("op_428"), val = tensor<int32, [2]>([1, 1])];
|
279 |
+
tensor<string, []> var_430_pad_type_0 = const()[name = tensor<string, []>("op_430_pad_type_0"), val = tensor<string, []>("custom")];
|
280 |
+
tensor<int32, [4]> var_430_pad_0 = const()[name = tensor<string, []>("op_430_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
281 |
+
tensor<fp16, [1, 11008, 1, 64]> var_430_cast_fp16 = conv(dilations = var_428, groups = var_246, pad = var_430_pad_0, pad_type = var_430_pad_type_0, strides = var_426, weight = blocks_1_mlp_fc_2_weight_palettized_cast_fp16, x = input_11_cast_fp16)[name = tensor<string, []>("op_430_cast_fp16")];
|
282 |
+
tensor<fp16, [1, 11008, 1, 1]> blocks_1_mlp_fc_2_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_1_mlp_fc_2_output_scales_to_fp16"), val = tensor<fp16, [1, 11008, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303740416)))];
|
283 |
+
tensor<fp16, [1, 11008, 1, 64]> x_fc_2_3_cast_fp16 = mul(x = var_430_cast_fp16, y = blocks_1_mlp_fc_2_output_scales_to_fp16)[name = tensor<string, []>("x_fc_2_3_cast_fp16")];
|
284 |
+
tensor<fp16, [1, 11008, 1, 64]> var_432_cast_fp16 = silu(x = input_13_cast_fp16)[name = tensor<string, []>("op_432_cast_fp16")];
|
285 |
+
tensor<fp16, [1, 11008, 1, 64]> input_15_cast_fp16 = mul(x = var_432_cast_fp16, y = x_fc_2_3_cast_fp16)[name = tensor<string, []>("input_15_cast_fp16")];
|
286 |
+
tensor<int32, [2]> var_436 = const()[name = tensor<string, []>("op_436"), val = tensor<int32, [2]>([1, 1])];
|
287 |
+
tensor<int32, [2]> var_438 = const()[name = tensor<string, []>("op_438"), val = tensor<int32, [2]>([1, 1])];
|
288 |
+
tensor<string, []> var_440_pad_type_0 = const()[name = tensor<string, []>("op_440_pad_type_0"), val = tensor<string, []>("custom")];
|
289 |
+
tensor<int32, [4]> var_440_pad_0 = const()[name = tensor<string, []>("op_440_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
290 |
+
tensor<fp16, [1, 4096, 1, 64]> var_440_cast_fp16 = conv(dilations = var_438, groups = var_246, pad = var_440_pad_0, pad_type = var_440_pad_type_0, strides = var_436, weight = blocks_1_mlp_proj_weight_palettized_cast_fp16, x = input_15_cast_fp16)[name = tensor<string, []>("op_440_cast_fp16")];
|
291 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_1_mlp_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_1_mlp_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303762496)))];
|
292 |
+
tensor<fp16, [1, 4096, 1, 64]> var_441_cast_fp16 = mul(x = var_440_cast_fp16, y = blocks_1_mlp_proj_output_scales_to_fp16)[name = tensor<string, []>("op_441_cast_fp16")];
|
293 |
+
tensor<fp16, [1, 4096, 1, 64]> x_29_cast_fp16 = add(x = var_441_cast_fp16, y = x_25_cast_fp16)[name = tensor<string, []>("x_29_cast_fp16")];
|
294 |
+
tensor<int32, []> var_448 = const()[name = tensor<string, []>("op_448"), val = tensor<int32, []>(3)];
|
295 |
+
tensor<int32, []> var_453 = const()[name = tensor<string, []>("op_453"), val = tensor<int32, []>(-2)];
|
296 |
+
tensor<int32, []> var_455 = const()[name = tensor<string, []>("op_455"), val = tensor<int32, []>(-1)];
|
297 |
+
tensor<int32, []> var_462 = const()[name = tensor<string, []>("op_462"), val = tensor<int32, []>(1)];
|
298 |
+
tensor<bool, []> var_463 = const()[name = tensor<string, []>("op_463"), val = tensor<bool, []>(true)];
|
299 |
+
tensor<fp16, [1, 4096, 1, 64]> var_470_cast_fp16 = mul(x = x_29_cast_fp16, y = x_29_cast_fp16)[name = tensor<string, []>("op_470_cast_fp16")];
|
300 |
+
tensor<int32, [1]> var_471 = const()[name = tensor<string, []>("op_471"), val = tensor<int32, [1]>([1])];
|
301 |
+
tensor<fp16, [1, 1, 1, 64]> norm_x_9_cast_fp16 = reduce_mean(axes = var_471, keep_dims = var_463, x = var_470_cast_fp16)[name = tensor<string, []>("norm_x_9_cast_fp16")];
|
302 |
+
tensor<fp16, []> var_473_to_fp16 = const()[name = tensor<string, []>("op_473_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
|
303 |
+
tensor<fp16, [1, 1, 1, 64]> var_474_cast_fp16 = add(x = norm_x_9_cast_fp16, y = var_473_to_fp16)[name = tensor<string, []>("op_474_cast_fp16")];
|
304 |
+
tensor<fp16, []> var_475_epsilon_0_to_fp16 = const()[name = tensor<string, []>("op_475_epsilon_0_to_fp16"), val = tensor<fp16, []>(0x1p-24)];
|
305 |
+
tensor<fp16, [1, 1, 1, 64]> var_475_cast_fp16 = rsqrt(epsilon = var_475_epsilon_0_to_fp16, x = var_474_cast_fp16)[name = tensor<string, []>("op_475_cast_fp16")];
|
306 |
+
tensor<fp16, [1, 4096, 1, 64]> x_normed_17_cast_fp16 = mul(x = x_29_cast_fp16, y = var_475_cast_fp16)[name = tensor<string, []>("x_normed_17_cast_fp16")];
|
307 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_2_norm_1_weight_to_fp16 = const()[name = tensor<string, []>("blocks_2_norm_1_weight_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303770752)))];
|
308 |
+
tensor<fp16, [1, 4096, 1, 64]> x_33_cast_fp16 = mul(x = x_normed_17_cast_fp16, y = blocks_2_norm_1_weight_to_fp16)[name = tensor<string, []>("x_33_cast_fp16")];
|
309 |
+
tensor<int32, [2]> var_490 = const()[name = tensor<string, []>("op_490"), val = tensor<int32, [2]>([1, 1])];
|
310 |
+
tensor<int32, [2]> var_492 = const()[name = tensor<string, []>("op_492"), val = tensor<int32, [2]>([1, 1])];
|
311 |
+
tensor<string, []> var_494_pad_type_0 = const()[name = tensor<string, []>("op_494_pad_type_0"), val = tensor<string, []>("custom")];
|
312 |
+
tensor<int32, [4]> var_494_pad_0 = const()[name = tensor<string, []>("op_494_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
313 |
+
tensor<fp16, [1, 4096, 1, 64]> var_494_cast_fp16 = conv(dilations = var_492, groups = var_462, pad = var_494_pad_0, pad_type = var_494_pad_type_0, strides = var_490, weight = blocks_2_attn_q_proj_weight_palettized_cast_fp16, x = x_33_cast_fp16)[name = tensor<string, []>("op_494_cast_fp16")];
|
314 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_2_attn_q_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_2_attn_q_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303779008)))];
|
315 |
+
tensor<fp16, [1, 4096, 1, 64]> q_13_cast_fp16 = mul(x = var_494_cast_fp16, y = blocks_2_attn_q_proj_output_scales_to_fp16)[name = tensor<string, []>("q_13_cast_fp16")];
|
316 |
+
tensor<int32, [2]> var_498 = const()[name = tensor<string, []>("op_498"), val = tensor<int32, [2]>([1, 1])];
|
317 |
+
tensor<int32, [2]> var_500 = const()[name = tensor<string, []>("op_500"), val = tensor<int32, [2]>([1, 1])];
|
318 |
+
tensor<string, []> var_502_pad_type_0 = const()[name = tensor<string, []>("op_502_pad_type_0"), val = tensor<string, []>("custom")];
|
319 |
+
tensor<int32, [4]> var_502_pad_0 = const()[name = tensor<string, []>("op_502_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
320 |
+
tensor<fp16, [1, 4096, 1, 64]> var_502_cast_fp16 = conv(dilations = var_500, groups = var_462, pad = var_502_pad_0, pad_type = var_502_pad_type_0, strides = var_498, weight = blocks_2_attn_k_proj_weight_palettized_cast_fp16, x = x_33_cast_fp16)[name = tensor<string, []>("op_502_cast_fp16")];
|
321 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_2_attn_k_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_2_attn_k_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303787264)))];
|
322 |
+
tensor<fp16, [1, 4096, 1, 64]> k_17_cast_fp16 = mul(x = var_502_cast_fp16, y = blocks_2_attn_k_proj_output_scales_to_fp16)[name = tensor<string, []>("k_17_cast_fp16")];
|
323 |
+
tensor<int32, [2]> var_506 = const()[name = tensor<string, []>("op_506"), val = tensor<int32, [2]>([1, 1])];
|
324 |
+
tensor<int32, [2]> var_508 = const()[name = tensor<string, []>("op_508"), val = tensor<int32, [2]>([1, 1])];
|
325 |
+
tensor<string, []> var_510_pad_type_0 = const()[name = tensor<string, []>("op_510_pad_type_0"), val = tensor<string, []>("custom")];
|
326 |
+
tensor<int32, [4]> var_510_pad_0 = const()[name = tensor<string, []>("op_510_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
327 |
+
tensor<fp16, [1, 4096, 1, 64]> var_510_cast_fp16 = conv(dilations = var_508, groups = var_462, pad = var_510_pad_0, pad_type = var_510_pad_type_0, strides = var_506, weight = blocks_2_attn_v_proj_weight_palettized_cast_fp16, x = x_33_cast_fp16)[name = tensor<string, []>("op_510_cast_fp16")];
|
328 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_2_attn_v_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_2_attn_v_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303795520)))];
|
329 |
+
tensor<fp16, [1, 4096, 1, 64]> v_13_cast_fp16 = mul(x = var_510_cast_fp16, y = blocks_2_attn_v_proj_output_scales_to_fp16)[name = tensor<string, []>("v_13_cast_fp16")];
|
330 |
+
tensor<int32, [4]> var_512 = const()[name = tensor<string, []>("op_512"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
331 |
+
tensor<fp16, [1, 32, 128, 64]> q_15_cast_fp16 = reshape(shape = var_512, x = q_13_cast_fp16)[name = tensor<string, []>("q_15_cast_fp16")];
|
332 |
+
tensor<int32, [4]> var_514 = const()[name = tensor<string, []>("op_514"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
333 |
+
tensor<fp16, [1, 32, 128, 64]> k_19_cast_fp16 = reshape(shape = var_514, x = k_17_cast_fp16)[name = tensor<string, []>("k_19_cast_fp16")];
|
334 |
+
tensor<int32, [4]> var_516 = const()[name = tensor<string, []>("op_516"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
335 |
+
tensor<fp16, [1, 32, 128, 64]> new_v_cache_2 = reshape(shape = var_516, x = v_13_cast_fp16)[name = tensor<string, []>("v_15_cast_fp16")];
|
336 |
+
tensor<int32, [4]> var_528_begin_0 = const()[name = tensor<string, []>("op_528_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
337 |
+
tensor<int32, [4]> var_528_end_0 = const()[name = tensor<string, []>("op_528_end_0"), val = tensor<int32, [4]>([1, 32, 64, 64])];
|
338 |
+
tensor<bool, [4]> var_528_end_mask_0 = const()[name = tensor<string, []>("op_528_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
|
339 |
+
tensor<fp16, [1, 32, 64, 64]> var_528_cast_fp16 = slice_by_index(begin = var_528_begin_0, end = var_528_end_0, end_mask = var_528_end_mask_0, x = q_15_cast_fp16)[name = tensor<string, []>("op_528_cast_fp16")];
|
340 |
+
tensor<int32, [4]> var_534_begin_0 = const()[name = tensor<string, []>("op_534_begin_0"), val = tensor<int32, [4]>([0, 0, 64, 0])];
|
341 |
+
tensor<int32, [4]> var_534_end_0 = const()[name = tensor<string, []>("op_534_end_0"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
342 |
+
tensor<bool, [4]> var_534_end_mask_0 = const()[name = tensor<string, []>("op_534_end_mask_0"), val = tensor<bool, [4]>([true, true, true, true])];
|
343 |
+
tensor<fp16, [1, 32, 64, 64]> var_534_cast_fp16 = slice_by_index(begin = var_534_begin_0, end = var_534_end_0, end_mask = var_534_end_mask_0, x = q_15_cast_fp16)[name = tensor<string, []>("op_534_cast_fp16")];
|
344 |
+
tensor<fp16, []> const_17_promoted_to_fp16 = const()[name = tensor<string, []>("const_17_promoted_to_fp16"), val = tensor<fp16, []>(-0x1p+0)];
|
345 |
+
tensor<fp16, [1, 32, 64, 64]> var_536_cast_fp16 = mul(x = var_534_cast_fp16, y = const_17_promoted_to_fp16)[name = tensor<string, []>("op_536_cast_fp16")];
|
346 |
+
tensor<bool, []> rotated_9_interleave_0 = const()[name = tensor<string, []>("rotated_9_interleave_0"), val = tensor<bool, []>(false)];
|
347 |
+
tensor<fp16, [1, 32, 128, 64]> rotated_9_cast_fp16 = concat(axis = var_453, interleave = rotated_9_interleave_0, values = (var_536_cast_fp16, var_528_cast_fp16))[name = tensor<string, []>("rotated_9_cast_fp16")];
|
348 |
+
tensor<fp16, [1, 32, 128, 64]> var_539_cast_fp16 = mul(x = q_15_cast_fp16, y = cos)[name = tensor<string, []>("op_539_cast_fp16")];
|
349 |
+
tensor<fp16, [1, 32, 128, 64]> var_540_cast_fp16 = mul(x = rotated_9_cast_fp16, y = sin)[name = tensor<string, []>("op_540_cast_fp16")];
|
350 |
+
tensor<fp16, [1, 32, 128, 64]> roped_9_cast_fp16 = add(x = var_539_cast_fp16, y = var_540_cast_fp16)[name = tensor<string, []>("roped_9_cast_fp16")];
|
351 |
+
tensor<int32, [4]> var_553_begin_0 = const()[name = tensor<string, []>("op_553_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
352 |
+
tensor<int32, [4]> var_553_end_0 = const()[name = tensor<string, []>("op_553_end_0"), val = tensor<int32, [4]>([1, 32, 64, 64])];
|
353 |
+
tensor<bool, [4]> var_553_end_mask_0 = const()[name = tensor<string, []>("op_553_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
|
354 |
+
tensor<fp16, [1, 32, 64, 64]> var_553_cast_fp16 = slice_by_index(begin = var_553_begin_0, end = var_553_end_0, end_mask = var_553_end_mask_0, x = k_19_cast_fp16)[name = tensor<string, []>("op_553_cast_fp16")];
|
355 |
+
tensor<int32, [4]> var_559_begin_0 = const()[name = tensor<string, []>("op_559_begin_0"), val = tensor<int32, [4]>([0, 0, 64, 0])];
|
356 |
+
tensor<int32, [4]> var_559_end_0 = const()[name = tensor<string, []>("op_559_end_0"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
357 |
+
tensor<bool, [4]> var_559_end_mask_0 = const()[name = tensor<string, []>("op_559_end_mask_0"), val = tensor<bool, [4]>([true, true, true, true])];
|
358 |
+
tensor<fp16, [1, 32, 64, 64]> var_559_cast_fp16 = slice_by_index(begin = var_559_begin_0, end = var_559_end_0, end_mask = var_559_end_mask_0, x = k_19_cast_fp16)[name = tensor<string, []>("op_559_cast_fp16")];
|
359 |
+
tensor<fp16, []> const_19_promoted_to_fp16 = const()[name = tensor<string, []>("const_19_promoted_to_fp16"), val = tensor<fp16, []>(-0x1p+0)];
|
360 |
+
tensor<fp16, [1, 32, 64, 64]> var_561_cast_fp16 = mul(x = var_559_cast_fp16, y = const_19_promoted_to_fp16)[name = tensor<string, []>("op_561_cast_fp16")];
|
361 |
+
tensor<bool, []> rotated_interleave_0 = const()[name = tensor<string, []>("rotated_interleave_0"), val = tensor<bool, []>(false)];
|
362 |
+
tensor<fp16, [1, 32, 128, 64]> rotated_cast_fp16 = concat(axis = var_453, interleave = rotated_interleave_0, values = (var_561_cast_fp16, var_553_cast_fp16))[name = tensor<string, []>("rotated_cast_fp16")];
|
363 |
+
tensor<fp16, [1, 32, 128, 64]> var_564_cast_fp16 = mul(x = k_19_cast_fp16, y = cos)[name = tensor<string, []>("op_564_cast_fp16")];
|
364 |
+
tensor<fp16, [1, 32, 128, 64]> var_565_cast_fp16 = mul(x = rotated_cast_fp16, y = sin)[name = tensor<string, []>("op_565_cast_fp16")];
|
365 |
+
tensor<fp16, [1, 32, 128, 64]> roped_cast_fp16 = add(x = var_564_cast_fp16, y = var_565_cast_fp16)[name = tensor<string, []>("roped_cast_fp16")];
|
366 |
+
tensor<bool, []> q_interleave_0 = const()[name = tensor<string, []>("q_interleave_0"), val = tensor<bool, []>(false)];
|
367 |
+
tensor<fp16, [1, 32, 128, 64]> q_cast_fp16 = concat(axis = var_453, interleave = q_interleave_0, values = roped_9_cast_fp16)[name = tensor<string, []>("q_cast_fp16")];
|
368 |
+
tensor<bool, []> k_21_interleave_0 = const()[name = tensor<string, []>("k_21_interleave_0"), val = tensor<bool, []>(false)];
|
369 |
+
tensor<fp16, [1, 32, 128, 64]> new_k_cache_2 = concat(axis = var_453, interleave = k_21_interleave_0, values = roped_cast_fp16)[name = tensor<string, []>("k_21_cast_fp16")];
|
370 |
+
tensor<bool, []> k_interleave_0 = const()[name = tensor<string, []>("k_interleave_0"), val = tensor<bool, []>(false)];
|
371 |
+
tensor<fp16, [1, 32, 128, 512]> k_cast_fp16 = concat(axis = var_455, interleave = k_interleave_0, values = (k_cache_2, new_k_cache_2))[name = tensor<string, []>("k_cast_fp16")];
|
372 |
+
tensor<bool, []> v_interleave_0 = const()[name = tensor<string, []>("v_interleave_0"), val = tensor<bool, []>(false)];
|
373 |
+
tensor<fp16, [1, 32, 128, 512]> v_cast_fp16 = concat(axis = var_455, interleave = v_interleave_0, values = (v_cache_2, new_v_cache_2))[name = tensor<string, []>("v_cast_fp16")];
|
374 |
+
tensor<fp16, []> var_587_to_fp16 = const()[name = tensor<string, []>("op_587_to_fp16"), val = tensor<fp16, []>(0x1.6ap-4)];
|
375 |
+
tensor<fp16, [1, 32, 128, 64]> var_588_cast_fp16 = mul(x = q_cast_fp16, y = var_587_to_fp16)[name = tensor<string, []>("op_588_cast_fp16")];
|
376 |
+
tensor<bool, []> attn_weights_9_transpose_x_0 = const()[name = tensor<string, []>("attn_weights_9_transpose_x_0"), val = tensor<bool, []>(true)];
|
377 |
+
tensor<bool, []> attn_weights_9_transpose_y_0 = const()[name = tensor<string, []>("attn_weights_9_transpose_y_0"), val = tensor<bool, []>(false)];
|
378 |
+
tensor<fp16, [1, 32, 64, 512]> attn_weights_9_cast_fp16 = matmul(transpose_x = attn_weights_9_transpose_x_0, transpose_y = attn_weights_9_transpose_y_0, x = var_588_cast_fp16, y = k_cast_fp16)[name = tensor<string, []>("attn_weights_9_cast_fp16")];
|
379 |
+
tensor<fp16, [1, 32, 64, 512]> attn_weights_cast_fp16 = add(x = attn_weights_9_cast_fp16, y = mask)[name = tensor<string, []>("attn_weights_cast_fp16")];
|
380 |
+
tensor<fp16, [1, 32, 64, 512]> var_596_cast_fp16 = softmax(axis = var_448, x = attn_weights_cast_fp16)[name = tensor<string, []>("op_596_cast_fp16")];
|
381 |
+
tensor<bool, []> attn_5_transpose_x_0 = const()[name = tensor<string, []>("attn_5_transpose_x_0"), val = tensor<bool, []>(false)];
|
382 |
+
tensor<bool, []> attn_5_transpose_y_0 = const()[name = tensor<string, []>("attn_5_transpose_y_0"), val = tensor<bool, []>(true)];
|
383 |
+
tensor<fp16, [1, 32, 128, 64]> attn_5_cast_fp16 = matmul(transpose_x = attn_5_transpose_x_0, transpose_y = attn_5_transpose_y_0, x = v_cast_fp16, y = var_596_cast_fp16)[name = tensor<string, []>("attn_5_cast_fp16")];
|
384 |
+
tensor<int32, [4]> var_600 = const()[name = tensor<string, []>("op_600"), val = tensor<int32, [4]>([1, 4096, 1, -1])];
|
385 |
+
tensor<fp16, [1, 4096, 1, 64]> input_17_cast_fp16 = reshape(shape = var_600, x = attn_5_cast_fp16)[name = tensor<string, []>("input_17_cast_fp16")];
|
386 |
+
tensor<int32, [2]> var_604 = const()[name = tensor<string, []>("op_604"), val = tensor<int32, [2]>([1, 1])];
|
387 |
+
tensor<int32, [2]> var_606 = const()[name = tensor<string, []>("op_606"), val = tensor<int32, [2]>([1, 1])];
|
388 |
+
tensor<string, []> var_608_pad_type_0 = const()[name = tensor<string, []>("op_608_pad_type_0"), val = tensor<string, []>("custom")];
|
389 |
+
tensor<int32, [4]> var_608_pad_0 = const()[name = tensor<string, []>("op_608_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
390 |
+
tensor<fp16, [1, 4096, 1, 64]> var_608_cast_fp16 = conv(dilations = var_606, groups = var_462, pad = var_608_pad_0, pad_type = var_608_pad_type_0, strides = var_604, weight = blocks_2_attn_proj_weight_palettized_cast_fp16, x = input_17_cast_fp16)[name = tensor<string, []>("op_608_cast_fp16")];
|
391 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_2_attn_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_2_attn_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303803776)))];
|
392 |
+
tensor<fp16, [1, 4096, 1, 64]> attention_output_cast_fp16 = mul(x = var_608_cast_fp16, y = blocks_2_attn_proj_output_scales_to_fp16)[name = tensor<string, []>("attention_output_cast_fp16")];
|
393 |
+
tensor<fp16, [1, 4096, 1, 64]> x_39_cast_fp16 = add(x = attention_output_cast_fp16, y = x_29_cast_fp16)[name = tensor<string, []>("x_39_cast_fp16")];
|
394 |
+
tensor<fp16, [1, 4096, 1, 64]> var_617_cast_fp16 = mul(x = x_39_cast_fp16, y = x_39_cast_fp16)[name = tensor<string, []>("op_617_cast_fp16")];
|
395 |
+
tensor<int32, [1]> var_618 = const()[name = tensor<string, []>("op_618"), val = tensor<int32, [1]>([1])];
|
396 |
+
tensor<fp16, [1, 1, 1, 64]> norm_x_cast_fp16 = reduce_mean(axes = var_618, keep_dims = var_463, x = var_617_cast_fp16)[name = tensor<string, []>("norm_x_cast_fp16")];
|
397 |
+
tensor<fp16, []> var_620_to_fp16 = const()[name = tensor<string, []>("op_620_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
|
398 |
+
tensor<fp16, [1, 1, 1, 64]> var_621_cast_fp16 = add(x = norm_x_cast_fp16, y = var_620_to_fp16)[name = tensor<string, []>("op_621_cast_fp16")];
|
399 |
+
tensor<fp16, []> var_622_epsilon_0_to_fp16 = const()[name = tensor<string, []>("op_622_epsilon_0_to_fp16"), val = tensor<fp16, []>(0x1p-24)];
|
400 |
+
tensor<fp16, [1, 1, 1, 64]> var_622_cast_fp16 = rsqrt(epsilon = var_622_epsilon_0_to_fp16, x = var_621_cast_fp16)[name = tensor<string, []>("op_622_cast_fp16")];
|
401 |
+
tensor<fp16, [1, 4096, 1, 64]> x_normed_21_cast_fp16 = mul(x = x_39_cast_fp16, y = var_622_cast_fp16)[name = tensor<string, []>("x_normed_21_cast_fp16")];
|
402 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_2_norm_2_weight_to_fp16 = const()[name = tensor<string, []>("blocks_2_norm_2_weight_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303812032)))];
|
403 |
+
tensor<fp16, [1, 4096, 1, 64]> input_19_cast_fp16 = mul(x = x_normed_21_cast_fp16, y = blocks_2_norm_2_weight_to_fp16)[name = tensor<string, []>("input_19_cast_fp16")];
|
404 |
+
tensor<int32, [2]> var_634 = const()[name = tensor<string, []>("op_634"), val = tensor<int32, [2]>([1, 1])];
|
405 |
+
tensor<int32, [2]> var_636 = const()[name = tensor<string, []>("op_636"), val = tensor<int32, [2]>([1, 1])];
|
406 |
+
tensor<string, []> var_638_pad_type_0 = const()[name = tensor<string, []>("op_638_pad_type_0"), val = tensor<string, []>("custom")];
|
407 |
+
tensor<int32, [4]> var_638_pad_0 = const()[name = tensor<string, []>("op_638_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
408 |
+
tensor<fp16, [1, 11008, 1, 64]> var_638_cast_fp16 = conv(dilations = var_636, groups = var_462, pad = var_638_pad_0, pad_type = var_638_pad_type_0, strides = var_634, weight = blocks_2_mlp_fc_1_weight_palettized_cast_fp16, x = input_19_cast_fp16)[name = tensor<string, []>("op_638_cast_fp16")];
|
409 |
+
tensor<fp16, [1, 11008, 1, 1]> blocks_2_mlp_fc_1_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_2_mlp_fc_1_output_scales_to_fp16"), val = tensor<fp16, [1, 11008, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303820288)))];
|
410 |
+
tensor<fp16, [1, 11008, 1, 64]> input_21_cast_fp16 = mul(x = var_638_cast_fp16, y = blocks_2_mlp_fc_1_output_scales_to_fp16)[name = tensor<string, []>("input_21_cast_fp16")];
|
411 |
+
tensor<int32, [2]> var_642 = const()[name = tensor<string, []>("op_642"), val = tensor<int32, [2]>([1, 1])];
|
412 |
+
tensor<int32, [2]> var_644 = const()[name = tensor<string, []>("op_644"), val = tensor<int32, [2]>([1, 1])];
|
413 |
+
tensor<string, []> var_646_pad_type_0 = const()[name = tensor<string, []>("op_646_pad_type_0"), val = tensor<string, []>("custom")];
|
414 |
+
tensor<int32, [4]> var_646_pad_0 = const()[name = tensor<string, []>("op_646_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
415 |
+
tensor<fp16, [1, 11008, 1, 64]> var_646_cast_fp16 = conv(dilations = var_644, groups = var_462, pad = var_646_pad_0, pad_type = var_646_pad_type_0, strides = var_642, weight = blocks_2_mlp_fc_2_weight_palettized_cast_fp16, x = input_19_cast_fp16)[name = tensor<string, []>("op_646_cast_fp16")];
|
416 |
+
tensor<fp16, [1, 11008, 1, 1]> blocks_2_mlp_fc_2_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_2_mlp_fc_2_output_scales_to_fp16"), val = tensor<fp16, [1, 11008, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303842368)))];
|
417 |
+
tensor<fp16, [1, 11008, 1, 64]> x_fc_2_cast_fp16 = mul(x = var_646_cast_fp16, y = blocks_2_mlp_fc_2_output_scales_to_fp16)[name = tensor<string, []>("x_fc_2_cast_fp16")];
|
418 |
+
tensor<fp16, [1, 11008, 1, 64]> var_648_cast_fp16 = silu(x = input_21_cast_fp16)[name = tensor<string, []>("op_648_cast_fp16")];
|
419 |
+
tensor<fp16, [1, 11008, 1, 64]> input_cast_fp16 = mul(x = var_648_cast_fp16, y = x_fc_2_cast_fp16)[name = tensor<string, []>("input_cast_fp16")];
|
420 |
+
tensor<int32, [2]> var_652 = const()[name = tensor<string, []>("op_652"), val = tensor<int32, [2]>([1, 1])];
|
421 |
+
tensor<int32, [2]> var_654 = const()[name = tensor<string, []>("op_654"), val = tensor<int32, [2]>([1, 1])];
|
422 |
+
tensor<string, []> var_656_pad_type_0 = const()[name = tensor<string, []>("op_656_pad_type_0"), val = tensor<string, []>("custom")];
|
423 |
+
tensor<int32, [4]> var_656_pad_0 = const()[name = tensor<string, []>("op_656_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
424 |
+
tensor<fp16, [1, 4096, 1, 64]> var_656_cast_fp16 = conv(dilations = var_654, groups = var_462, pad = var_656_pad_0, pad_type = var_656_pad_type_0, strides = var_652, weight = blocks_2_mlp_proj_weight_palettized_cast_fp16, x = input_cast_fp16)[name = tensor<string, []>("op_656_cast_fp16")];
|
425 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_2_mlp_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_2_mlp_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303864448)))];
|
426 |
+
tensor<fp16, [1, 4096, 1, 64]> var_657_cast_fp16 = mul(x = var_656_cast_fp16, y = blocks_2_mlp_proj_output_scales_to_fp16)[name = tensor<string, []>("op_657_cast_fp16")];
|
427 |
+
tensor<fp16, [1, 4096, 1, 64]> new_x = add(x = var_657_cast_fp16, y = x_39_cast_fp16)[name = tensor<string, []>("op_658_cast_fp16")];
|
428 |
+
} -> (new_x, new_k_cache_0, new_k_cache_1, new_k_cache_2, new_v_cache_0, new_v_cache_1, new_v_cache_2);
|
429 |
+
}
|
Llama-2-7b-hf_chunk5.mlmodelc/weights/weight.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5d931534284a44e5004b85274be8d122ee55af90a599ea689a9491c6ce13fa16
|
3 |
+
size 303872704
|
Llama-2-7b-hf_chunk6.mlmodelc/analytics/coremldata.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3412284b024b899a736cd77112d4b1a4a5faa19d954259e925ef429f58bd886b
|
3 |
+
size 243
|
Llama-2-7b-hf_chunk6.mlmodelc/coremldata.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:589729b2995d8ca8246bbb5d92b910207bab816ad67282b0a285bcd2de77f80e
|
3 |
+
size 791
|
Llama-2-7b-hf_chunk6.mlmodelc/metadata.json
ADDED
@@ -0,0 +1,218 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[
|
2 |
+
{
|
3 |
+
"metadataOutputVersion" : "3.0",
|
4 |
+
"storagePrecision" : "Mixed (Float16, Palettized (4 bits))",
|
5 |
+
"outputSchema" : [
|
6 |
+
{
|
7 |
+
"hasShapeFlexibility" : "0",
|
8 |
+
"isOptional" : "0",
|
9 |
+
"dataType" : "Float16",
|
10 |
+
"formattedType" : "MultiArray (Float16 1 × 4096 × 1 × 64)",
|
11 |
+
"shortDescription" : "",
|
12 |
+
"shape" : "[1, 4096, 1, 64]",
|
13 |
+
"name" : "new_x",
|
14 |
+
"type" : "MultiArray"
|
15 |
+
},
|
16 |
+
{
|
17 |
+
"hasShapeFlexibility" : "0",
|
18 |
+
"isOptional" : "0",
|
19 |
+
"dataType" : "Float16",
|
20 |
+
"formattedType" : "MultiArray (Float16 1 × 32 × 128 × 64)",
|
21 |
+
"shortDescription" : "",
|
22 |
+
"shape" : "[1, 32, 128, 64]",
|
23 |
+
"name" : "new_k_cache_0",
|
24 |
+
"type" : "MultiArray"
|
25 |
+
},
|
26 |
+
{
|
27 |
+
"hasShapeFlexibility" : "0",
|
28 |
+
"isOptional" : "0",
|
29 |
+
"dataType" : "Float16",
|
30 |
+
"formattedType" : "MultiArray (Float16 1 × 32 × 128 × 64)",
|
31 |
+
"shortDescription" : "",
|
32 |
+
"shape" : "[1, 32, 128, 64]",
|
33 |
+
"name" : "new_k_cache_1",
|
34 |
+
"type" : "MultiArray"
|
35 |
+
},
|
36 |
+
{
|
37 |
+
"hasShapeFlexibility" : "0",
|
38 |
+
"isOptional" : "0",
|
39 |
+
"dataType" : "Float16",
|
40 |
+
"formattedType" : "MultiArray (Float16 1 × 32 × 128 × 64)",
|
41 |
+
"shortDescription" : "",
|
42 |
+
"shape" : "[1, 32, 128, 64]",
|
43 |
+
"name" : "new_k_cache_2",
|
44 |
+
"type" : "MultiArray"
|
45 |
+
},
|
46 |
+
{
|
47 |
+
"hasShapeFlexibility" : "0",
|
48 |
+
"isOptional" : "0",
|
49 |
+
"dataType" : "Float16",
|
50 |
+
"formattedType" : "MultiArray (Float16 1 × 32 × 128 × 64)",
|
51 |
+
"shortDescription" : "",
|
52 |
+
"shape" : "[1, 32, 128, 64]",
|
53 |
+
"name" : "new_v_cache_0",
|
54 |
+
"type" : "MultiArray"
|
55 |
+
},
|
56 |
+
{
|
57 |
+
"hasShapeFlexibility" : "0",
|
58 |
+
"isOptional" : "0",
|
59 |
+
"dataType" : "Float16",
|
60 |
+
"formattedType" : "MultiArray (Float16 1 × 32 × 128 × 64)",
|
61 |
+
"shortDescription" : "",
|
62 |
+
"shape" : "[1, 32, 128, 64]",
|
63 |
+
"name" : "new_v_cache_1",
|
64 |
+
"type" : "MultiArray"
|
65 |
+
},
|
66 |
+
{
|
67 |
+
"hasShapeFlexibility" : "0",
|
68 |
+
"isOptional" : "0",
|
69 |
+
"dataType" : "Float16",
|
70 |
+
"formattedType" : "MultiArray (Float16 1 × 32 × 128 × 64)",
|
71 |
+
"shortDescription" : "",
|
72 |
+
"shape" : "[1, 32, 128, 64]",
|
73 |
+
"name" : "new_v_cache_2",
|
74 |
+
"type" : "MultiArray"
|
75 |
+
}
|
76 |
+
],
|
77 |
+
"modelParameters" : [
|
78 |
+
|
79 |
+
],
|
80 |
+
"specificationVersion" : 7,
|
81 |
+
"mlProgramOperationTypeHistogram" : {
|
82 |
+
"Concat" : 18,
|
83 |
+
"Ios16.rsqrt" : 6,
|
84 |
+
"Ios16.mul" : 63,
|
85 |
+
"SliceByIndex" : 12,
|
86 |
+
"Ios16.constexprLutToDense" : 21,
|
87 |
+
"Ios16.conv" : 21,
|
88 |
+
"Ios16.add" : 21,
|
89 |
+
"Ios16.reduceMean" : 6,
|
90 |
+
"Ios16.matmul" : 6,
|
91 |
+
"Ios16.softmax" : 3,
|
92 |
+
"Ios16.reshape" : 12,
|
93 |
+
"Ios16.silu" : 3
|
94 |
+
},
|
95 |
+
"computePrecision" : "Mixed (Float16, Int32)",
|
96 |
+
"isUpdatable" : "0",
|
97 |
+
"availability" : {
|
98 |
+
"macOS" : "13.0",
|
99 |
+
"tvOS" : "16.0",
|
100 |
+
"visionOS" : "1.0",
|
101 |
+
"watchOS" : "9.0",
|
102 |
+
"iOS" : "16.0",
|
103 |
+
"macCatalyst" : "16.0"
|
104 |
+
},
|
105 |
+
"modelType" : {
|
106 |
+
"name" : "MLModelType_mlProgram"
|
107 |
+
},
|
108 |
+
"userDefinedMetadata" : {
|
109 |
+
"com.github.apple.coremltools.source_dialect" : "TorchScript",
|
110 |
+
"com.github.apple.coremltools.source" : "torch==2.1.0",
|
111 |
+
"com.github.apple.coremltools.version" : "7.2"
|
112 |
+
},
|
113 |
+
"inputSchema" : [
|
114 |
+
{
|
115 |
+
"hasShapeFlexibility" : "0",
|
116 |
+
"isOptional" : "0",
|
117 |
+
"dataType" : "Float16",
|
118 |
+
"formattedType" : "MultiArray (Float16 1 × 4096 × 1 × 64)",
|
119 |
+
"shortDescription" : "",
|
120 |
+
"shape" : "[1, 4096, 1, 64]",
|
121 |
+
"name" : "x",
|
122 |
+
"type" : "MultiArray"
|
123 |
+
},
|
124 |
+
{
|
125 |
+
"hasShapeFlexibility" : "0",
|
126 |
+
"isOptional" : "0",
|
127 |
+
"dataType" : "Float16",
|
128 |
+
"formattedType" : "MultiArray (Float16 128 × 64)",
|
129 |
+
"shortDescription" : "",
|
130 |
+
"shape" : "[128, 64]",
|
131 |
+
"name" : "cos",
|
132 |
+
"type" : "MultiArray"
|
133 |
+
},
|
134 |
+
{
|
135 |
+
"hasShapeFlexibility" : "0",
|
136 |
+
"isOptional" : "0",
|
137 |
+
"dataType" : "Float16",
|
138 |
+
"formattedType" : "MultiArray (Float16 128 × 64)",
|
139 |
+
"shortDescription" : "",
|
140 |
+
"shape" : "[128, 64]",
|
141 |
+
"name" : "sin",
|
142 |
+
"type" : "MultiArray"
|
143 |
+
},
|
144 |
+
{
|
145 |
+
"hasShapeFlexibility" : "0",
|
146 |
+
"isOptional" : "0",
|
147 |
+
"dataType" : "Float16",
|
148 |
+
"formattedType" : "MultiArray (Float16 1 × 1 × 64 × 512)",
|
149 |
+
"shortDescription" : "",
|
150 |
+
"shape" : "[1, 1, 64, 512]",
|
151 |
+
"name" : "mask",
|
152 |
+
"type" : "MultiArray"
|
153 |
+
},
|
154 |
+
{
|
155 |
+
"hasShapeFlexibility" : "0",
|
156 |
+
"isOptional" : "1",
|
157 |
+
"dataType" : "Float16",
|
158 |
+
"formattedType" : "MultiArray (Float16 1 × 32 × 128 × 448)?",
|
159 |
+
"shortDescription" : "",
|
160 |
+
"shape" : "[1, 32, 128, 448]",
|
161 |
+
"name" : "k_cache_0",
|
162 |
+
"type" : "MultiArray"
|
163 |
+
},
|
164 |
+
{
|
165 |
+
"hasShapeFlexibility" : "0",
|
166 |
+
"isOptional" : "1",
|
167 |
+
"dataType" : "Float16",
|
168 |
+
"formattedType" : "MultiArray (Float16 1 × 32 × 128 × 448)?",
|
169 |
+
"shortDescription" : "",
|
170 |
+
"shape" : "[1, 32, 128, 448]",
|
171 |
+
"name" : "v_cache_0",
|
172 |
+
"type" : "MultiArray"
|
173 |
+
},
|
174 |
+
{
|
175 |
+
"hasShapeFlexibility" : "0",
|
176 |
+
"isOptional" : "1",
|
177 |
+
"dataType" : "Float16",
|
178 |
+
"formattedType" : "MultiArray (Float16 1 × 32 × 128 × 448)?",
|
179 |
+
"shortDescription" : "",
|
180 |
+
"shape" : "[1, 32, 128, 448]",
|
181 |
+
"name" : "k_cache_1",
|
182 |
+
"type" : "MultiArray"
|
183 |
+
},
|
184 |
+
{
|
185 |
+
"hasShapeFlexibility" : "0",
|
186 |
+
"isOptional" : "1",
|
187 |
+
"dataType" : "Float16",
|
188 |
+
"formattedType" : "MultiArray (Float16 1 × 32 × 128 × 448)?",
|
189 |
+
"shortDescription" : "",
|
190 |
+
"shape" : "[1, 32, 128, 448]",
|
191 |
+
"name" : "v_cache_1",
|
192 |
+
"type" : "MultiArray"
|
193 |
+
},
|
194 |
+
{
|
195 |
+
"hasShapeFlexibility" : "0",
|
196 |
+
"isOptional" : "1",
|
197 |
+
"dataType" : "Float16",
|
198 |
+
"formattedType" : "MultiArray (Float16 1 × 32 × 128 × 448)?",
|
199 |
+
"shortDescription" : "",
|
200 |
+
"shape" : "[1, 32, 128, 448]",
|
201 |
+
"name" : "k_cache_2",
|
202 |
+
"type" : "MultiArray"
|
203 |
+
},
|
204 |
+
{
|
205 |
+
"hasShapeFlexibility" : "0",
|
206 |
+
"isOptional" : "1",
|
207 |
+
"dataType" : "Float16",
|
208 |
+
"formattedType" : "MultiArray (Float16 1 × 32 × 128 × 448)?",
|
209 |
+
"shortDescription" : "",
|
210 |
+
"shape" : "[1, 32, 128, 448]",
|
211 |
+
"name" : "v_cache_2",
|
212 |
+
"type" : "MultiArray"
|
213 |
+
}
|
214 |
+
],
|
215 |
+
"generatedClassName" : "Llama_2_7b_hf_2024_05_25_14_03_55_chunk6",
|
216 |
+
"method" : "predict"
|
217 |
+
}
|
218 |
+
]
|
Llama-2-7b-hf_chunk6.mlmodelc/model.mil
ADDED
@@ -0,0 +1,429 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
program(1.0)
|
2 |
+
[buildInfo = dict<tensor<string, []>, tensor<string, []>>({{"coremlc-component-MIL", "5.33.5"}, {"coremlc-version", "1877.40.3"}, {"coremltools-component-torch", "2.1.0"}, {"coremltools-source-dialect", "TorchScript"}, {"coremltools-version", "7.2"}})]
|
3 |
+
{
|
4 |
+
func main<ios16>(tensor<fp16, [128, 64]> cos, tensor<fp16, [1, 32, 128, 448]> k_cache_0, tensor<fp16, [1, 32, 128, 448]> k_cache_1, tensor<fp16, [1, 32, 128, 448]> k_cache_2, tensor<fp16, [1, 1, 64, 512]> mask, tensor<fp16, [128, 64]> sin, tensor<fp16, [1, 32, 128, 448]> v_cache_0, tensor<fp16, [1, 32, 128, 448]> v_cache_1, tensor<fp16, [1, 32, 128, 448]> v_cache_2, tensor<fp16, [1, 4096, 1, 64]> x) [CoreML_InputDefaultValues = dict<tensor<string, []>, tensor<fp32, []>>({{"k_cache_0", 0}, {"k_cache_1", 0}, {"k_cache_2", 0}, {"v_cache_0", 0}, {"v_cache_1", 0}, {"v_cache_2", 0}})] {
|
5 |
+
tensor<fp16, [4096, 4096, 1, 1]> blocks_0_attn_q_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [8388608]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(64))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(8388736))), name = tensor<string, []>("blocks_0_attn_q_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 4096, 1, 1])];
|
6 |
+
tensor<fp16, [4096, 4096, 1, 1]> blocks_0_attn_k_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [8388608]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(8388864))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(16777536))), name = tensor<string, []>("blocks_0_attn_k_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 4096, 1, 1])];
|
7 |
+
tensor<fp16, [4096, 4096, 1, 1]> blocks_0_attn_v_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [8388608]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(16777664))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(25166336))), name = tensor<string, []>("blocks_0_attn_v_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 4096, 1, 1])];
|
8 |
+
tensor<fp16, [4096, 4096, 1, 1]> blocks_0_attn_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [8388608]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(25166464))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(33555136))), name = tensor<string, []>("blocks_0_attn_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 4096, 1, 1])];
|
9 |
+
tensor<fp16, [11008, 4096, 1, 1]> blocks_0_mlp_fc_1_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [22544384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(33555264))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(56099712))), name = tensor<string, []>("blocks_0_mlp_fc_1_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([11008, 4096, 1, 1])];
|
10 |
+
tensor<fp16, [11008, 4096, 1, 1]> blocks_0_mlp_fc_2_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [22544384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(56099840))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(78644288))), name = tensor<string, []>("blocks_0_mlp_fc_2_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([11008, 4096, 1, 1])];
|
11 |
+
tensor<fp16, [4096, 11008, 1, 1]> blocks_0_mlp_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [22544384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(78644416))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(101188864))), name = tensor<string, []>("blocks_0_mlp_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 11008, 1, 1])];
|
12 |
+
tensor<fp16, [4096, 4096, 1, 1]> blocks_1_attn_q_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [8388608]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(101188992))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(109577664))), name = tensor<string, []>("blocks_1_attn_q_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 4096, 1, 1])];
|
13 |
+
tensor<fp16, [4096, 4096, 1, 1]> blocks_1_attn_k_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [8388608]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(109577792))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(117966464))), name = tensor<string, []>("blocks_1_attn_k_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 4096, 1, 1])];
|
14 |
+
tensor<fp16, [4096, 4096, 1, 1]> blocks_1_attn_v_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [8388608]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(117966592))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(126355264))), name = tensor<string, []>("blocks_1_attn_v_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 4096, 1, 1])];
|
15 |
+
tensor<fp16, [4096, 4096, 1, 1]> blocks_1_attn_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [8388608]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(126355392))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(134744064))), name = tensor<string, []>("blocks_1_attn_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 4096, 1, 1])];
|
16 |
+
tensor<fp16, [11008, 4096, 1, 1]> blocks_1_mlp_fc_1_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [22544384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(134744192))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(157288640))), name = tensor<string, []>("blocks_1_mlp_fc_1_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([11008, 4096, 1, 1])];
|
17 |
+
tensor<fp16, [11008, 4096, 1, 1]> blocks_1_mlp_fc_2_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [22544384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(157288768))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(179833216))), name = tensor<string, []>("blocks_1_mlp_fc_2_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([11008, 4096, 1, 1])];
|
18 |
+
tensor<fp16, [4096, 11008, 1, 1]> blocks_1_mlp_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [22544384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(179833344))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(202377792))), name = tensor<string, []>("blocks_1_mlp_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 11008, 1, 1])];
|
19 |
+
tensor<fp16, [4096, 4096, 1, 1]> blocks_2_attn_q_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [8388608]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(202377920))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(210766592))), name = tensor<string, []>("blocks_2_attn_q_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 4096, 1, 1])];
|
20 |
+
tensor<fp16, [4096, 4096, 1, 1]> blocks_2_attn_k_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [8388608]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(210766720))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(219155392))), name = tensor<string, []>("blocks_2_attn_k_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 4096, 1, 1])];
|
21 |
+
tensor<fp16, [4096, 4096, 1, 1]> blocks_2_attn_v_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [8388608]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(219155520))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(227544192))), name = tensor<string, []>("blocks_2_attn_v_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 4096, 1, 1])];
|
22 |
+
tensor<fp16, [4096, 4096, 1, 1]> blocks_2_attn_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [8388608]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(227544320))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(235932992))), name = tensor<string, []>("blocks_2_attn_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 4096, 1, 1])];
|
23 |
+
tensor<fp16, [11008, 4096, 1, 1]> blocks_2_mlp_fc_1_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [22544384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(235933120))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(258477568))), name = tensor<string, []>("blocks_2_mlp_fc_1_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([11008, 4096, 1, 1])];
|
24 |
+
tensor<fp16, [11008, 4096, 1, 1]> blocks_2_mlp_fc_2_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [22544384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(258477696))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(281022144))), name = tensor<string, []>("blocks_2_mlp_fc_2_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([11008, 4096, 1, 1])];
|
25 |
+
tensor<fp16, [4096, 11008, 1, 1]> blocks_2_mlp_proj_weight_palettized_cast_fp16 = constexpr_lut_to_dense()[indices = tensor<uint8, [22544384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(281022272))), lut = tensor<fp16, [16]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303566720))), name = tensor<string, []>("blocks_2_mlp_proj_weight_palettized_cast_fp16"), shape = tensor<uint32, [4]>([4096, 11008, 1, 1])];
|
26 |
+
tensor<int32, []> var_18 = const()[name = tensor<string, []>("op_18"), val = tensor<int32, []>(3)];
|
27 |
+
tensor<int32, []> var_23 = const()[name = tensor<string, []>("op_23"), val = tensor<int32, []>(-2)];
|
28 |
+
tensor<int32, []> var_25 = const()[name = tensor<string, []>("op_25"), val = tensor<int32, []>(-1)];
|
29 |
+
tensor<int32, []> var_32 = const()[name = tensor<string, []>("op_32"), val = tensor<int32, []>(1)];
|
30 |
+
tensor<bool, []> var_33 = const()[name = tensor<string, []>("op_33"), val = tensor<bool, []>(true)];
|
31 |
+
tensor<fp16, [1, 4096, 1, 64]> var_41_cast_fp16 = mul(x = x, y = x)[name = tensor<string, []>("op_41_cast_fp16")];
|
32 |
+
tensor<int32, [1]> var_42 = const()[name = tensor<string, []>("op_42"), val = tensor<int32, [1]>([1])];
|
33 |
+
tensor<fp16, [1, 1, 1, 64]> norm_x_1_cast_fp16 = reduce_mean(axes = var_42, keep_dims = var_33, x = var_41_cast_fp16)[name = tensor<string, []>("norm_x_1_cast_fp16")];
|
34 |
+
tensor<fp16, []> var_44_to_fp16 = const()[name = tensor<string, []>("op_44_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
|
35 |
+
tensor<fp16, [1, 1, 1, 64]> var_45_cast_fp16 = add(x = norm_x_1_cast_fp16, y = var_44_to_fp16)[name = tensor<string, []>("op_45_cast_fp16")];
|
36 |
+
tensor<fp16, []> var_46_epsilon_0_to_fp16 = const()[name = tensor<string, []>("op_46_epsilon_0_to_fp16"), val = tensor<fp16, []>(0x1p-24)];
|
37 |
+
tensor<fp16, [1, 1, 1, 64]> var_46_cast_fp16 = rsqrt(epsilon = var_46_epsilon_0_to_fp16, x = var_45_cast_fp16)[name = tensor<string, []>("op_46_cast_fp16")];
|
38 |
+
tensor<fp16, [1, 4096, 1, 64]> x_normed_1_cast_fp16 = mul(x = x, y = var_46_cast_fp16)[name = tensor<string, []>("x_normed_1_cast_fp16")];
|
39 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_0_norm_1_weight_to_fp16 = const()[name = tensor<string, []>("blocks_0_norm_1_weight_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303566848)))];
|
40 |
+
tensor<fp16, [1, 4096, 1, 64]> x_5_cast_fp16 = mul(x = x_normed_1_cast_fp16, y = blocks_0_norm_1_weight_to_fp16)[name = tensor<string, []>("x_5_cast_fp16")];
|
41 |
+
tensor<int32, [2]> var_58 = const()[name = tensor<string, []>("op_58"), val = tensor<int32, [2]>([1, 1])];
|
42 |
+
tensor<int32, [2]> var_60 = const()[name = tensor<string, []>("op_60"), val = tensor<int32, [2]>([1, 1])];
|
43 |
+
tensor<string, []> var_62_pad_type_0 = const()[name = tensor<string, []>("op_62_pad_type_0"), val = tensor<string, []>("custom")];
|
44 |
+
tensor<int32, [4]> var_62_pad_0 = const()[name = tensor<string, []>("op_62_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
45 |
+
tensor<fp16, [1, 4096, 1, 64]> var_62_cast_fp16 = conv(dilations = var_60, groups = var_32, pad = var_62_pad_0, pad_type = var_62_pad_type_0, strides = var_58, weight = blocks_0_attn_q_proj_weight_palettized_cast_fp16, x = x_5_cast_fp16)[name = tensor<string, []>("op_62_cast_fp16")];
|
46 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_0_attn_q_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_0_attn_q_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303575104)))];
|
47 |
+
tensor<fp16, [1, 4096, 1, 64]> q_1_cast_fp16 = mul(x = var_62_cast_fp16, y = blocks_0_attn_q_proj_output_scales_to_fp16)[name = tensor<string, []>("q_1_cast_fp16")];
|
48 |
+
tensor<int32, [2]> var_66 = const()[name = tensor<string, []>("op_66"), val = tensor<int32, [2]>([1, 1])];
|
49 |
+
tensor<int32, [2]> var_68 = const()[name = tensor<string, []>("op_68"), val = tensor<int32, [2]>([1, 1])];
|
50 |
+
tensor<string, []> var_70_pad_type_0 = const()[name = tensor<string, []>("op_70_pad_type_0"), val = tensor<string, []>("custom")];
|
51 |
+
tensor<int32, [4]> var_70_pad_0 = const()[name = tensor<string, []>("op_70_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
52 |
+
tensor<fp16, [1, 4096, 1, 64]> var_70_cast_fp16 = conv(dilations = var_68, groups = var_32, pad = var_70_pad_0, pad_type = var_70_pad_type_0, strides = var_66, weight = blocks_0_attn_k_proj_weight_palettized_cast_fp16, x = x_5_cast_fp16)[name = tensor<string, []>("op_70_cast_fp16")];
|
53 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_0_attn_k_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_0_attn_k_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303583360)))];
|
54 |
+
tensor<fp16, [1, 4096, 1, 64]> k_1_cast_fp16 = mul(x = var_70_cast_fp16, y = blocks_0_attn_k_proj_output_scales_to_fp16)[name = tensor<string, []>("k_1_cast_fp16")];
|
55 |
+
tensor<int32, [2]> var_74 = const()[name = tensor<string, []>("op_74"), val = tensor<int32, [2]>([1, 1])];
|
56 |
+
tensor<int32, [2]> var_76 = const()[name = tensor<string, []>("op_76"), val = tensor<int32, [2]>([1, 1])];
|
57 |
+
tensor<string, []> var_78_pad_type_0 = const()[name = tensor<string, []>("op_78_pad_type_0"), val = tensor<string, []>("custom")];
|
58 |
+
tensor<int32, [4]> var_78_pad_0 = const()[name = tensor<string, []>("op_78_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
59 |
+
tensor<fp16, [1, 4096, 1, 64]> var_78_cast_fp16 = conv(dilations = var_76, groups = var_32, pad = var_78_pad_0, pad_type = var_78_pad_type_0, strides = var_74, weight = blocks_0_attn_v_proj_weight_palettized_cast_fp16, x = x_5_cast_fp16)[name = tensor<string, []>("op_78_cast_fp16")];
|
60 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_0_attn_v_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_0_attn_v_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303591616)))];
|
61 |
+
tensor<fp16, [1, 4096, 1, 64]> v_1_cast_fp16 = mul(x = var_78_cast_fp16, y = blocks_0_attn_v_proj_output_scales_to_fp16)[name = tensor<string, []>("v_1_cast_fp16")];
|
62 |
+
tensor<int32, [4]> var_80 = const()[name = tensor<string, []>("op_80"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
63 |
+
tensor<fp16, [1, 32, 128, 64]> q_3_cast_fp16 = reshape(shape = var_80, x = q_1_cast_fp16)[name = tensor<string, []>("q_3_cast_fp16")];
|
64 |
+
tensor<int32, [4]> var_82 = const()[name = tensor<string, []>("op_82"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
65 |
+
tensor<fp16, [1, 32, 128, 64]> k_3_cast_fp16 = reshape(shape = var_82, x = k_1_cast_fp16)[name = tensor<string, []>("k_3_cast_fp16")];
|
66 |
+
tensor<int32, [4]> var_84 = const()[name = tensor<string, []>("op_84"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
67 |
+
tensor<fp16, [1, 32, 128, 64]> new_v_cache_0 = reshape(shape = var_84, x = v_1_cast_fp16)[name = tensor<string, []>("v_3_cast_fp16")];
|
68 |
+
tensor<int32, [4]> var_96_begin_0 = const()[name = tensor<string, []>("op_96_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
69 |
+
tensor<int32, [4]> var_96_end_0 = const()[name = tensor<string, []>("op_96_end_0"), val = tensor<int32, [4]>([1, 32, 64, 64])];
|
70 |
+
tensor<bool, [4]> var_96_end_mask_0 = const()[name = tensor<string, []>("op_96_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
|
71 |
+
tensor<fp16, [1, 32, 64, 64]> var_96_cast_fp16 = slice_by_index(begin = var_96_begin_0, end = var_96_end_0, end_mask = var_96_end_mask_0, x = q_3_cast_fp16)[name = tensor<string, []>("op_96_cast_fp16")];
|
72 |
+
tensor<int32, [4]> var_102_begin_0 = const()[name = tensor<string, []>("op_102_begin_0"), val = tensor<int32, [4]>([0, 0, 64, 0])];
|
73 |
+
tensor<int32, [4]> var_102_end_0 = const()[name = tensor<string, []>("op_102_end_0"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
74 |
+
tensor<bool, [4]> var_102_end_mask_0 = const()[name = tensor<string, []>("op_102_end_mask_0"), val = tensor<bool, [4]>([true, true, true, true])];
|
75 |
+
tensor<fp16, [1, 32, 64, 64]> var_102_cast_fp16 = slice_by_index(begin = var_102_begin_0, end = var_102_end_0, end_mask = var_102_end_mask_0, x = q_3_cast_fp16)[name = tensor<string, []>("op_102_cast_fp16")];
|
76 |
+
tensor<fp16, []> const_3_promoted_to_fp16 = const()[name = tensor<string, []>("const_3_promoted_to_fp16"), val = tensor<fp16, []>(-0x1p+0)];
|
77 |
+
tensor<fp16, [1, 32, 64, 64]> var_104_cast_fp16 = mul(x = var_102_cast_fp16, y = const_3_promoted_to_fp16)[name = tensor<string, []>("op_104_cast_fp16")];
|
78 |
+
tensor<bool, []> rotated_1_interleave_0 = const()[name = tensor<string, []>("rotated_1_interleave_0"), val = tensor<bool, []>(false)];
|
79 |
+
tensor<fp16, [1, 32, 128, 64]> rotated_1_cast_fp16 = concat(axis = var_23, interleave = rotated_1_interleave_0, values = (var_104_cast_fp16, var_96_cast_fp16))[name = tensor<string, []>("rotated_1_cast_fp16")];
|
80 |
+
tensor<fp16, [1, 32, 128, 64]> var_107_cast_fp16 = mul(x = q_3_cast_fp16, y = cos)[name = tensor<string, []>("op_107_cast_fp16")];
|
81 |
+
tensor<fp16, [1, 32, 128, 64]> var_108_cast_fp16 = mul(x = rotated_1_cast_fp16, y = sin)[name = tensor<string, []>("op_108_cast_fp16")];
|
82 |
+
tensor<fp16, [1, 32, 128, 64]> roped_1_cast_fp16 = add(x = var_107_cast_fp16, y = var_108_cast_fp16)[name = tensor<string, []>("roped_1_cast_fp16")];
|
83 |
+
tensor<int32, [4]> var_121_begin_0 = const()[name = tensor<string, []>("op_121_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
84 |
+
tensor<int32, [4]> var_121_end_0 = const()[name = tensor<string, []>("op_121_end_0"), val = tensor<int32, [4]>([1, 32, 64, 64])];
|
85 |
+
tensor<bool, [4]> var_121_end_mask_0 = const()[name = tensor<string, []>("op_121_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
|
86 |
+
tensor<fp16, [1, 32, 64, 64]> var_121_cast_fp16 = slice_by_index(begin = var_121_begin_0, end = var_121_end_0, end_mask = var_121_end_mask_0, x = k_3_cast_fp16)[name = tensor<string, []>("op_121_cast_fp16")];
|
87 |
+
tensor<int32, [4]> var_127_begin_0 = const()[name = tensor<string, []>("op_127_begin_0"), val = tensor<int32, [4]>([0, 0, 64, 0])];
|
88 |
+
tensor<int32, [4]> var_127_end_0 = const()[name = tensor<string, []>("op_127_end_0"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
89 |
+
tensor<bool, [4]> var_127_end_mask_0 = const()[name = tensor<string, []>("op_127_end_mask_0"), val = tensor<bool, [4]>([true, true, true, true])];
|
90 |
+
tensor<fp16, [1, 32, 64, 64]> var_127_cast_fp16 = slice_by_index(begin = var_127_begin_0, end = var_127_end_0, end_mask = var_127_end_mask_0, x = k_3_cast_fp16)[name = tensor<string, []>("op_127_cast_fp16")];
|
91 |
+
tensor<fp16, []> const_5_promoted_to_fp16 = const()[name = tensor<string, []>("const_5_promoted_to_fp16"), val = tensor<fp16, []>(-0x1p+0)];
|
92 |
+
tensor<fp16, [1, 32, 64, 64]> var_129_cast_fp16 = mul(x = var_127_cast_fp16, y = const_5_promoted_to_fp16)[name = tensor<string, []>("op_129_cast_fp16")];
|
93 |
+
tensor<bool, []> rotated_3_interleave_0 = const()[name = tensor<string, []>("rotated_3_interleave_0"), val = tensor<bool, []>(false)];
|
94 |
+
tensor<fp16, [1, 32, 128, 64]> rotated_3_cast_fp16 = concat(axis = var_23, interleave = rotated_3_interleave_0, values = (var_129_cast_fp16, var_121_cast_fp16))[name = tensor<string, []>("rotated_3_cast_fp16")];
|
95 |
+
tensor<fp16, [1, 32, 128, 64]> var_132_cast_fp16 = mul(x = k_3_cast_fp16, y = cos)[name = tensor<string, []>("op_132_cast_fp16")];
|
96 |
+
tensor<fp16, [1, 32, 128, 64]> var_133_cast_fp16 = mul(x = rotated_3_cast_fp16, y = sin)[name = tensor<string, []>("op_133_cast_fp16")];
|
97 |
+
tensor<fp16, [1, 32, 128, 64]> roped_3_cast_fp16 = add(x = var_132_cast_fp16, y = var_133_cast_fp16)[name = tensor<string, []>("roped_3_cast_fp16")];
|
98 |
+
tensor<bool, []> q_5_interleave_0 = const()[name = tensor<string, []>("q_5_interleave_0"), val = tensor<bool, []>(false)];
|
99 |
+
tensor<fp16, [1, 32, 128, 64]> q_5_cast_fp16 = concat(axis = var_23, interleave = q_5_interleave_0, values = roped_1_cast_fp16)[name = tensor<string, []>("q_5_cast_fp16")];
|
100 |
+
tensor<bool, []> k_5_interleave_0 = const()[name = tensor<string, []>("k_5_interleave_0"), val = tensor<bool, []>(false)];
|
101 |
+
tensor<fp16, [1, 32, 128, 64]> new_k_cache_0 = concat(axis = var_23, interleave = k_5_interleave_0, values = roped_3_cast_fp16)[name = tensor<string, []>("k_5_cast_fp16")];
|
102 |
+
tensor<bool, []> k_7_interleave_0 = const()[name = tensor<string, []>("k_7_interleave_0"), val = tensor<bool, []>(false)];
|
103 |
+
tensor<fp16, [1, 32, 128, 512]> k_7_cast_fp16 = concat(axis = var_25, interleave = k_7_interleave_0, values = (k_cache_0, new_k_cache_0))[name = tensor<string, []>("k_7_cast_fp16")];
|
104 |
+
tensor<bool, []> v_5_interleave_0 = const()[name = tensor<string, []>("v_5_interleave_0"), val = tensor<bool, []>(false)];
|
105 |
+
tensor<fp16, [1, 32, 128, 512]> v_5_cast_fp16 = concat(axis = var_25, interleave = v_5_interleave_0, values = (v_cache_0, new_v_cache_0))[name = tensor<string, []>("v_5_cast_fp16")];
|
106 |
+
tensor<fp16, []> var_155_to_fp16 = const()[name = tensor<string, []>("op_155_to_fp16"), val = tensor<fp16, []>(0x1.6ap-4)];
|
107 |
+
tensor<fp16, [1, 32, 128, 64]> var_156_cast_fp16 = mul(x = q_5_cast_fp16, y = var_155_to_fp16)[name = tensor<string, []>("op_156_cast_fp16")];
|
108 |
+
tensor<bool, []> attn_weights_1_transpose_x_0 = const()[name = tensor<string, []>("attn_weights_1_transpose_x_0"), val = tensor<bool, []>(true)];
|
109 |
+
tensor<bool, []> attn_weights_1_transpose_y_0 = const()[name = tensor<string, []>("attn_weights_1_transpose_y_0"), val = tensor<bool, []>(false)];
|
110 |
+
tensor<fp16, [1, 32, 64, 512]> attn_weights_1_cast_fp16 = matmul(transpose_x = attn_weights_1_transpose_x_0, transpose_y = attn_weights_1_transpose_y_0, x = var_156_cast_fp16, y = k_7_cast_fp16)[name = tensor<string, []>("attn_weights_1_cast_fp16")];
|
111 |
+
tensor<fp16, [1, 32, 64, 512]> attn_weights_3_cast_fp16 = add(x = attn_weights_1_cast_fp16, y = mask)[name = tensor<string, []>("attn_weights_3_cast_fp16")];
|
112 |
+
tensor<fp16, [1, 32, 64, 512]> var_164_cast_fp16 = softmax(axis = var_18, x = attn_weights_3_cast_fp16)[name = tensor<string, []>("op_164_cast_fp16")];
|
113 |
+
tensor<bool, []> attn_1_transpose_x_0 = const()[name = tensor<string, []>("attn_1_transpose_x_0"), val = tensor<bool, []>(false)];
|
114 |
+
tensor<bool, []> attn_1_transpose_y_0 = const()[name = tensor<string, []>("attn_1_transpose_y_0"), val = tensor<bool, []>(true)];
|
115 |
+
tensor<fp16, [1, 32, 128, 64]> attn_1_cast_fp16 = matmul(transpose_x = attn_1_transpose_x_0, transpose_y = attn_1_transpose_y_0, x = v_5_cast_fp16, y = var_164_cast_fp16)[name = tensor<string, []>("attn_1_cast_fp16")];
|
116 |
+
tensor<int32, [4]> var_168 = const()[name = tensor<string, []>("op_168"), val = tensor<int32, [4]>([1, 4096, 1, -1])];
|
117 |
+
tensor<fp16, [1, 4096, 1, 64]> input_1_cast_fp16 = reshape(shape = var_168, x = attn_1_cast_fp16)[name = tensor<string, []>("input_1_cast_fp16")];
|
118 |
+
tensor<int32, [2]> var_172 = const()[name = tensor<string, []>("op_172"), val = tensor<int32, [2]>([1, 1])];
|
119 |
+
tensor<int32, [2]> var_174 = const()[name = tensor<string, []>("op_174"), val = tensor<int32, [2]>([1, 1])];
|
120 |
+
tensor<string, []> var_176_pad_type_0 = const()[name = tensor<string, []>("op_176_pad_type_0"), val = tensor<string, []>("custom")];
|
121 |
+
tensor<int32, [4]> var_176_pad_0 = const()[name = tensor<string, []>("op_176_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
122 |
+
tensor<fp16, [1, 4096, 1, 64]> var_176_cast_fp16 = conv(dilations = var_174, groups = var_32, pad = var_176_pad_0, pad_type = var_176_pad_type_0, strides = var_172, weight = blocks_0_attn_proj_weight_palettized_cast_fp16, x = input_1_cast_fp16)[name = tensor<string, []>("op_176_cast_fp16")];
|
123 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_0_attn_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_0_attn_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303599872)))];
|
124 |
+
tensor<fp16, [1, 4096, 1, 64]> attention_output_1_cast_fp16 = mul(x = var_176_cast_fp16, y = blocks_0_attn_proj_output_scales_to_fp16)[name = tensor<string, []>("attention_output_1_cast_fp16")];
|
125 |
+
tensor<fp16, [1, 4096, 1, 64]> x_11_cast_fp16 = add(x = attention_output_1_cast_fp16, y = x)[name = tensor<string, []>("x_11_cast_fp16")];
|
126 |
+
tensor<fp16, [1, 4096, 1, 64]> var_185_cast_fp16 = mul(x = x_11_cast_fp16, y = x_11_cast_fp16)[name = tensor<string, []>("op_185_cast_fp16")];
|
127 |
+
tensor<int32, [1]> var_186 = const()[name = tensor<string, []>("op_186"), val = tensor<int32, [1]>([1])];
|
128 |
+
tensor<fp16, [1, 1, 1, 64]> norm_x_3_cast_fp16 = reduce_mean(axes = var_186, keep_dims = var_33, x = var_185_cast_fp16)[name = tensor<string, []>("norm_x_3_cast_fp16")];
|
129 |
+
tensor<fp16, []> var_188_to_fp16 = const()[name = tensor<string, []>("op_188_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
|
130 |
+
tensor<fp16, [1, 1, 1, 64]> var_189_cast_fp16 = add(x = norm_x_3_cast_fp16, y = var_188_to_fp16)[name = tensor<string, []>("op_189_cast_fp16")];
|
131 |
+
tensor<fp16, []> var_190_epsilon_0_to_fp16 = const()[name = tensor<string, []>("op_190_epsilon_0_to_fp16"), val = tensor<fp16, []>(0x1p-24)];
|
132 |
+
tensor<fp16, [1, 1, 1, 64]> var_190_cast_fp16 = rsqrt(epsilon = var_190_epsilon_0_to_fp16, x = var_189_cast_fp16)[name = tensor<string, []>("op_190_cast_fp16")];
|
133 |
+
tensor<fp16, [1, 4096, 1, 64]> x_normed_5_cast_fp16 = mul(x = x_11_cast_fp16, y = var_190_cast_fp16)[name = tensor<string, []>("x_normed_5_cast_fp16")];
|
134 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_0_norm_2_weight_to_fp16 = const()[name = tensor<string, []>("blocks_0_norm_2_weight_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303608128)))];
|
135 |
+
tensor<fp16, [1, 4096, 1, 64]> input_3_cast_fp16 = mul(x = x_normed_5_cast_fp16, y = blocks_0_norm_2_weight_to_fp16)[name = tensor<string, []>("input_3_cast_fp16")];
|
136 |
+
tensor<int32, [2]> var_202 = const()[name = tensor<string, []>("op_202"), val = tensor<int32, [2]>([1, 1])];
|
137 |
+
tensor<int32, [2]> var_204 = const()[name = tensor<string, []>("op_204"), val = tensor<int32, [2]>([1, 1])];
|
138 |
+
tensor<string, []> var_206_pad_type_0 = const()[name = tensor<string, []>("op_206_pad_type_0"), val = tensor<string, []>("custom")];
|
139 |
+
tensor<int32, [4]> var_206_pad_0 = const()[name = tensor<string, []>("op_206_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
140 |
+
tensor<fp16, [1, 11008, 1, 64]> var_206_cast_fp16 = conv(dilations = var_204, groups = var_32, pad = var_206_pad_0, pad_type = var_206_pad_type_0, strides = var_202, weight = blocks_0_mlp_fc_1_weight_palettized_cast_fp16, x = input_3_cast_fp16)[name = tensor<string, []>("op_206_cast_fp16")];
|
141 |
+
tensor<fp16, [1, 11008, 1, 1]> blocks_0_mlp_fc_1_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_0_mlp_fc_1_output_scales_to_fp16"), val = tensor<fp16, [1, 11008, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303616384)))];
|
142 |
+
tensor<fp16, [1, 11008, 1, 64]> input_5_cast_fp16 = mul(x = var_206_cast_fp16, y = blocks_0_mlp_fc_1_output_scales_to_fp16)[name = tensor<string, []>("input_5_cast_fp16")];
|
143 |
+
tensor<int32, [2]> var_210 = const()[name = tensor<string, []>("op_210"), val = tensor<int32, [2]>([1, 1])];
|
144 |
+
tensor<int32, [2]> var_212 = const()[name = tensor<string, []>("op_212"), val = tensor<int32, [2]>([1, 1])];
|
145 |
+
tensor<string, []> var_214_pad_type_0 = const()[name = tensor<string, []>("op_214_pad_type_0"), val = tensor<string, []>("custom")];
|
146 |
+
tensor<int32, [4]> var_214_pad_0 = const()[name = tensor<string, []>("op_214_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
147 |
+
tensor<fp16, [1, 11008, 1, 64]> var_214_cast_fp16 = conv(dilations = var_212, groups = var_32, pad = var_214_pad_0, pad_type = var_214_pad_type_0, strides = var_210, weight = blocks_0_mlp_fc_2_weight_palettized_cast_fp16, x = input_3_cast_fp16)[name = tensor<string, []>("op_214_cast_fp16")];
|
148 |
+
tensor<fp16, [1, 11008, 1, 1]> blocks_0_mlp_fc_2_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_0_mlp_fc_2_output_scales_to_fp16"), val = tensor<fp16, [1, 11008, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303638464)))];
|
149 |
+
tensor<fp16, [1, 11008, 1, 64]> x_fc_2_1_cast_fp16 = mul(x = var_214_cast_fp16, y = blocks_0_mlp_fc_2_output_scales_to_fp16)[name = tensor<string, []>("x_fc_2_1_cast_fp16")];
|
150 |
+
tensor<fp16, [1, 11008, 1, 64]> var_216_cast_fp16 = silu(x = input_5_cast_fp16)[name = tensor<string, []>("op_216_cast_fp16")];
|
151 |
+
tensor<fp16, [1, 11008, 1, 64]> input_7_cast_fp16 = mul(x = var_216_cast_fp16, y = x_fc_2_1_cast_fp16)[name = tensor<string, []>("input_7_cast_fp16")];
|
152 |
+
tensor<int32, [2]> var_220 = const()[name = tensor<string, []>("op_220"), val = tensor<int32, [2]>([1, 1])];
|
153 |
+
tensor<int32, [2]> var_222 = const()[name = tensor<string, []>("op_222"), val = tensor<int32, [2]>([1, 1])];
|
154 |
+
tensor<string, []> var_224_pad_type_0 = const()[name = tensor<string, []>("op_224_pad_type_0"), val = tensor<string, []>("custom")];
|
155 |
+
tensor<int32, [4]> var_224_pad_0 = const()[name = tensor<string, []>("op_224_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
156 |
+
tensor<fp16, [1, 4096, 1, 64]> var_224_cast_fp16 = conv(dilations = var_222, groups = var_32, pad = var_224_pad_0, pad_type = var_224_pad_type_0, strides = var_220, weight = blocks_0_mlp_proj_weight_palettized_cast_fp16, x = input_7_cast_fp16)[name = tensor<string, []>("op_224_cast_fp16")];
|
157 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_0_mlp_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_0_mlp_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303660544)))];
|
158 |
+
tensor<fp16, [1, 4096, 1, 64]> var_225_cast_fp16 = mul(x = var_224_cast_fp16, y = blocks_0_mlp_proj_output_scales_to_fp16)[name = tensor<string, []>("op_225_cast_fp16")];
|
159 |
+
tensor<fp16, [1, 4096, 1, 64]> x_15_cast_fp16 = add(x = var_225_cast_fp16, y = x_11_cast_fp16)[name = tensor<string, []>("x_15_cast_fp16")];
|
160 |
+
tensor<int32, []> var_232 = const()[name = tensor<string, []>("op_232"), val = tensor<int32, []>(3)];
|
161 |
+
tensor<int32, []> var_237 = const()[name = tensor<string, []>("op_237"), val = tensor<int32, []>(-2)];
|
162 |
+
tensor<int32, []> var_239 = const()[name = tensor<string, []>("op_239"), val = tensor<int32, []>(-1)];
|
163 |
+
tensor<int32, []> var_246 = const()[name = tensor<string, []>("op_246"), val = tensor<int32, []>(1)];
|
164 |
+
tensor<bool, []> var_247 = const()[name = tensor<string, []>("op_247"), val = tensor<bool, []>(true)];
|
165 |
+
tensor<fp16, [1, 4096, 1, 64]> var_254_cast_fp16 = mul(x = x_15_cast_fp16, y = x_15_cast_fp16)[name = tensor<string, []>("op_254_cast_fp16")];
|
166 |
+
tensor<int32, [1]> var_255 = const()[name = tensor<string, []>("op_255"), val = tensor<int32, [1]>([1])];
|
167 |
+
tensor<fp16, [1, 1, 1, 64]> norm_x_5_cast_fp16 = reduce_mean(axes = var_255, keep_dims = var_247, x = var_254_cast_fp16)[name = tensor<string, []>("norm_x_5_cast_fp16")];
|
168 |
+
tensor<fp16, []> var_257_to_fp16 = const()[name = tensor<string, []>("op_257_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
|
169 |
+
tensor<fp16, [1, 1, 1, 64]> var_258_cast_fp16 = add(x = norm_x_5_cast_fp16, y = var_257_to_fp16)[name = tensor<string, []>("op_258_cast_fp16")];
|
170 |
+
tensor<fp16, []> var_259_epsilon_0_to_fp16 = const()[name = tensor<string, []>("op_259_epsilon_0_to_fp16"), val = tensor<fp16, []>(0x1p-24)];
|
171 |
+
tensor<fp16, [1, 1, 1, 64]> var_259_cast_fp16 = rsqrt(epsilon = var_259_epsilon_0_to_fp16, x = var_258_cast_fp16)[name = tensor<string, []>("op_259_cast_fp16")];
|
172 |
+
tensor<fp16, [1, 4096, 1, 64]> x_normed_9_cast_fp16 = mul(x = x_15_cast_fp16, y = var_259_cast_fp16)[name = tensor<string, []>("x_normed_9_cast_fp16")];
|
173 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_1_norm_1_weight_to_fp16 = const()[name = tensor<string, []>("blocks_1_norm_1_weight_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303668800)))];
|
174 |
+
tensor<fp16, [1, 4096, 1, 64]> x_19_cast_fp16 = mul(x = x_normed_9_cast_fp16, y = blocks_1_norm_1_weight_to_fp16)[name = tensor<string, []>("x_19_cast_fp16")];
|
175 |
+
tensor<int32, [2]> var_274 = const()[name = tensor<string, []>("op_274"), val = tensor<int32, [2]>([1, 1])];
|
176 |
+
tensor<int32, [2]> var_276 = const()[name = tensor<string, []>("op_276"), val = tensor<int32, [2]>([1, 1])];
|
177 |
+
tensor<string, []> var_278_pad_type_0 = const()[name = tensor<string, []>("op_278_pad_type_0"), val = tensor<string, []>("custom")];
|
178 |
+
tensor<int32, [4]> var_278_pad_0 = const()[name = tensor<string, []>("op_278_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
179 |
+
tensor<fp16, [1, 4096, 1, 64]> var_278_cast_fp16 = conv(dilations = var_276, groups = var_246, pad = var_278_pad_0, pad_type = var_278_pad_type_0, strides = var_274, weight = blocks_1_attn_q_proj_weight_palettized_cast_fp16, x = x_19_cast_fp16)[name = tensor<string, []>("op_278_cast_fp16")];
|
180 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_1_attn_q_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_1_attn_q_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303677056)))];
|
181 |
+
tensor<fp16, [1, 4096, 1, 64]> q_7_cast_fp16 = mul(x = var_278_cast_fp16, y = blocks_1_attn_q_proj_output_scales_to_fp16)[name = tensor<string, []>("q_7_cast_fp16")];
|
182 |
+
tensor<int32, [2]> var_282 = const()[name = tensor<string, []>("op_282"), val = tensor<int32, [2]>([1, 1])];
|
183 |
+
tensor<int32, [2]> var_284 = const()[name = tensor<string, []>("op_284"), val = tensor<int32, [2]>([1, 1])];
|
184 |
+
tensor<string, []> var_286_pad_type_0 = const()[name = tensor<string, []>("op_286_pad_type_0"), val = tensor<string, []>("custom")];
|
185 |
+
tensor<int32, [4]> var_286_pad_0 = const()[name = tensor<string, []>("op_286_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
186 |
+
tensor<fp16, [1, 4096, 1, 64]> var_286_cast_fp16 = conv(dilations = var_284, groups = var_246, pad = var_286_pad_0, pad_type = var_286_pad_type_0, strides = var_282, weight = blocks_1_attn_k_proj_weight_palettized_cast_fp16, x = x_19_cast_fp16)[name = tensor<string, []>("op_286_cast_fp16")];
|
187 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_1_attn_k_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_1_attn_k_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303685312)))];
|
188 |
+
tensor<fp16, [1, 4096, 1, 64]> k_9_cast_fp16 = mul(x = var_286_cast_fp16, y = blocks_1_attn_k_proj_output_scales_to_fp16)[name = tensor<string, []>("k_9_cast_fp16")];
|
189 |
+
tensor<int32, [2]> var_290 = const()[name = tensor<string, []>("op_290"), val = tensor<int32, [2]>([1, 1])];
|
190 |
+
tensor<int32, [2]> var_292 = const()[name = tensor<string, []>("op_292"), val = tensor<int32, [2]>([1, 1])];
|
191 |
+
tensor<string, []> var_294_pad_type_0 = const()[name = tensor<string, []>("op_294_pad_type_0"), val = tensor<string, []>("custom")];
|
192 |
+
tensor<int32, [4]> var_294_pad_0 = const()[name = tensor<string, []>("op_294_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
193 |
+
tensor<fp16, [1, 4096, 1, 64]> var_294_cast_fp16 = conv(dilations = var_292, groups = var_246, pad = var_294_pad_0, pad_type = var_294_pad_type_0, strides = var_290, weight = blocks_1_attn_v_proj_weight_palettized_cast_fp16, x = x_19_cast_fp16)[name = tensor<string, []>("op_294_cast_fp16")];
|
194 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_1_attn_v_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_1_attn_v_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303693568)))];
|
195 |
+
tensor<fp16, [1, 4096, 1, 64]> v_7_cast_fp16 = mul(x = var_294_cast_fp16, y = blocks_1_attn_v_proj_output_scales_to_fp16)[name = tensor<string, []>("v_7_cast_fp16")];
|
196 |
+
tensor<int32, [4]> var_296 = const()[name = tensor<string, []>("op_296"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
197 |
+
tensor<fp16, [1, 32, 128, 64]> q_9_cast_fp16 = reshape(shape = var_296, x = q_7_cast_fp16)[name = tensor<string, []>("q_9_cast_fp16")];
|
198 |
+
tensor<int32, [4]> var_298 = const()[name = tensor<string, []>("op_298"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
199 |
+
tensor<fp16, [1, 32, 128, 64]> k_11_cast_fp16 = reshape(shape = var_298, x = k_9_cast_fp16)[name = tensor<string, []>("k_11_cast_fp16")];
|
200 |
+
tensor<int32, [4]> var_300 = const()[name = tensor<string, []>("op_300"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
201 |
+
tensor<fp16, [1, 32, 128, 64]> new_v_cache_1 = reshape(shape = var_300, x = v_7_cast_fp16)[name = tensor<string, []>("v_9_cast_fp16")];
|
202 |
+
tensor<int32, [4]> var_312_begin_0 = const()[name = tensor<string, []>("op_312_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
203 |
+
tensor<int32, [4]> var_312_end_0 = const()[name = tensor<string, []>("op_312_end_0"), val = tensor<int32, [4]>([1, 32, 64, 64])];
|
204 |
+
tensor<bool, [4]> var_312_end_mask_0 = const()[name = tensor<string, []>("op_312_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
|
205 |
+
tensor<fp16, [1, 32, 64, 64]> var_312_cast_fp16 = slice_by_index(begin = var_312_begin_0, end = var_312_end_0, end_mask = var_312_end_mask_0, x = q_9_cast_fp16)[name = tensor<string, []>("op_312_cast_fp16")];
|
206 |
+
tensor<int32, [4]> var_318_begin_0 = const()[name = tensor<string, []>("op_318_begin_0"), val = tensor<int32, [4]>([0, 0, 64, 0])];
|
207 |
+
tensor<int32, [4]> var_318_end_0 = const()[name = tensor<string, []>("op_318_end_0"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
208 |
+
tensor<bool, [4]> var_318_end_mask_0 = const()[name = tensor<string, []>("op_318_end_mask_0"), val = tensor<bool, [4]>([true, true, true, true])];
|
209 |
+
tensor<fp16, [1, 32, 64, 64]> var_318_cast_fp16 = slice_by_index(begin = var_318_begin_0, end = var_318_end_0, end_mask = var_318_end_mask_0, x = q_9_cast_fp16)[name = tensor<string, []>("op_318_cast_fp16")];
|
210 |
+
tensor<fp16, []> const_10_promoted_to_fp16 = const()[name = tensor<string, []>("const_10_promoted_to_fp16"), val = tensor<fp16, []>(-0x1p+0)];
|
211 |
+
tensor<fp16, [1, 32, 64, 64]> var_320_cast_fp16 = mul(x = var_318_cast_fp16, y = const_10_promoted_to_fp16)[name = tensor<string, []>("op_320_cast_fp16")];
|
212 |
+
tensor<bool, []> rotated_5_interleave_0 = const()[name = tensor<string, []>("rotated_5_interleave_0"), val = tensor<bool, []>(false)];
|
213 |
+
tensor<fp16, [1, 32, 128, 64]> rotated_5_cast_fp16 = concat(axis = var_237, interleave = rotated_5_interleave_0, values = (var_320_cast_fp16, var_312_cast_fp16))[name = tensor<string, []>("rotated_5_cast_fp16")];
|
214 |
+
tensor<fp16, [1, 32, 128, 64]> var_323_cast_fp16 = mul(x = q_9_cast_fp16, y = cos)[name = tensor<string, []>("op_323_cast_fp16")];
|
215 |
+
tensor<fp16, [1, 32, 128, 64]> var_324_cast_fp16 = mul(x = rotated_5_cast_fp16, y = sin)[name = tensor<string, []>("op_324_cast_fp16")];
|
216 |
+
tensor<fp16, [1, 32, 128, 64]> roped_5_cast_fp16 = add(x = var_323_cast_fp16, y = var_324_cast_fp16)[name = tensor<string, []>("roped_5_cast_fp16")];
|
217 |
+
tensor<int32, [4]> var_337_begin_0 = const()[name = tensor<string, []>("op_337_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
218 |
+
tensor<int32, [4]> var_337_end_0 = const()[name = tensor<string, []>("op_337_end_0"), val = tensor<int32, [4]>([1, 32, 64, 64])];
|
219 |
+
tensor<bool, [4]> var_337_end_mask_0 = const()[name = tensor<string, []>("op_337_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
|
220 |
+
tensor<fp16, [1, 32, 64, 64]> var_337_cast_fp16 = slice_by_index(begin = var_337_begin_0, end = var_337_end_0, end_mask = var_337_end_mask_0, x = k_11_cast_fp16)[name = tensor<string, []>("op_337_cast_fp16")];
|
221 |
+
tensor<int32, [4]> var_343_begin_0 = const()[name = tensor<string, []>("op_343_begin_0"), val = tensor<int32, [4]>([0, 0, 64, 0])];
|
222 |
+
tensor<int32, [4]> var_343_end_0 = const()[name = tensor<string, []>("op_343_end_0"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
223 |
+
tensor<bool, [4]> var_343_end_mask_0 = const()[name = tensor<string, []>("op_343_end_mask_0"), val = tensor<bool, [4]>([true, true, true, true])];
|
224 |
+
tensor<fp16, [1, 32, 64, 64]> var_343_cast_fp16 = slice_by_index(begin = var_343_begin_0, end = var_343_end_0, end_mask = var_343_end_mask_0, x = k_11_cast_fp16)[name = tensor<string, []>("op_343_cast_fp16")];
|
225 |
+
tensor<fp16, []> const_12_promoted_to_fp16 = const()[name = tensor<string, []>("const_12_promoted_to_fp16"), val = tensor<fp16, []>(-0x1p+0)];
|
226 |
+
tensor<fp16, [1, 32, 64, 64]> var_345_cast_fp16 = mul(x = var_343_cast_fp16, y = const_12_promoted_to_fp16)[name = tensor<string, []>("op_345_cast_fp16")];
|
227 |
+
tensor<bool, []> rotated_7_interleave_0 = const()[name = tensor<string, []>("rotated_7_interleave_0"), val = tensor<bool, []>(false)];
|
228 |
+
tensor<fp16, [1, 32, 128, 64]> rotated_7_cast_fp16 = concat(axis = var_237, interleave = rotated_7_interleave_0, values = (var_345_cast_fp16, var_337_cast_fp16))[name = tensor<string, []>("rotated_7_cast_fp16")];
|
229 |
+
tensor<fp16, [1, 32, 128, 64]> var_348_cast_fp16 = mul(x = k_11_cast_fp16, y = cos)[name = tensor<string, []>("op_348_cast_fp16")];
|
230 |
+
tensor<fp16, [1, 32, 128, 64]> var_349_cast_fp16 = mul(x = rotated_7_cast_fp16, y = sin)[name = tensor<string, []>("op_349_cast_fp16")];
|
231 |
+
tensor<fp16, [1, 32, 128, 64]> roped_7_cast_fp16 = add(x = var_348_cast_fp16, y = var_349_cast_fp16)[name = tensor<string, []>("roped_7_cast_fp16")];
|
232 |
+
tensor<bool, []> q_11_interleave_0 = const()[name = tensor<string, []>("q_11_interleave_0"), val = tensor<bool, []>(false)];
|
233 |
+
tensor<fp16, [1, 32, 128, 64]> q_11_cast_fp16 = concat(axis = var_237, interleave = q_11_interleave_0, values = roped_5_cast_fp16)[name = tensor<string, []>("q_11_cast_fp16")];
|
234 |
+
tensor<bool, []> k_13_interleave_0 = const()[name = tensor<string, []>("k_13_interleave_0"), val = tensor<bool, []>(false)];
|
235 |
+
tensor<fp16, [1, 32, 128, 64]> new_k_cache_1 = concat(axis = var_237, interleave = k_13_interleave_0, values = roped_7_cast_fp16)[name = tensor<string, []>("k_13_cast_fp16")];
|
236 |
+
tensor<bool, []> k_15_interleave_0 = const()[name = tensor<string, []>("k_15_interleave_0"), val = tensor<bool, []>(false)];
|
237 |
+
tensor<fp16, [1, 32, 128, 512]> k_15_cast_fp16 = concat(axis = var_239, interleave = k_15_interleave_0, values = (k_cache_1, new_k_cache_1))[name = tensor<string, []>("k_15_cast_fp16")];
|
238 |
+
tensor<bool, []> v_11_interleave_0 = const()[name = tensor<string, []>("v_11_interleave_0"), val = tensor<bool, []>(false)];
|
239 |
+
tensor<fp16, [1, 32, 128, 512]> v_11_cast_fp16 = concat(axis = var_239, interleave = v_11_interleave_0, values = (v_cache_1, new_v_cache_1))[name = tensor<string, []>("v_11_cast_fp16")];
|
240 |
+
tensor<fp16, []> var_371_to_fp16 = const()[name = tensor<string, []>("op_371_to_fp16"), val = tensor<fp16, []>(0x1.6ap-4)];
|
241 |
+
tensor<fp16, [1, 32, 128, 64]> var_372_cast_fp16 = mul(x = q_11_cast_fp16, y = var_371_to_fp16)[name = tensor<string, []>("op_372_cast_fp16")];
|
242 |
+
tensor<bool, []> attn_weights_5_transpose_x_0 = const()[name = tensor<string, []>("attn_weights_5_transpose_x_0"), val = tensor<bool, []>(true)];
|
243 |
+
tensor<bool, []> attn_weights_5_transpose_y_0 = const()[name = tensor<string, []>("attn_weights_5_transpose_y_0"), val = tensor<bool, []>(false)];
|
244 |
+
tensor<fp16, [1, 32, 64, 512]> attn_weights_5_cast_fp16 = matmul(transpose_x = attn_weights_5_transpose_x_0, transpose_y = attn_weights_5_transpose_y_0, x = var_372_cast_fp16, y = k_15_cast_fp16)[name = tensor<string, []>("attn_weights_5_cast_fp16")];
|
245 |
+
tensor<fp16, [1, 32, 64, 512]> attn_weights_7_cast_fp16 = add(x = attn_weights_5_cast_fp16, y = mask)[name = tensor<string, []>("attn_weights_7_cast_fp16")];
|
246 |
+
tensor<fp16, [1, 32, 64, 512]> var_380_cast_fp16 = softmax(axis = var_232, x = attn_weights_7_cast_fp16)[name = tensor<string, []>("op_380_cast_fp16")];
|
247 |
+
tensor<bool, []> attn_3_transpose_x_0 = const()[name = tensor<string, []>("attn_3_transpose_x_0"), val = tensor<bool, []>(false)];
|
248 |
+
tensor<bool, []> attn_3_transpose_y_0 = const()[name = tensor<string, []>("attn_3_transpose_y_0"), val = tensor<bool, []>(true)];
|
249 |
+
tensor<fp16, [1, 32, 128, 64]> attn_3_cast_fp16 = matmul(transpose_x = attn_3_transpose_x_0, transpose_y = attn_3_transpose_y_0, x = v_11_cast_fp16, y = var_380_cast_fp16)[name = tensor<string, []>("attn_3_cast_fp16")];
|
250 |
+
tensor<int32, [4]> var_384 = const()[name = tensor<string, []>("op_384"), val = tensor<int32, [4]>([1, 4096, 1, -1])];
|
251 |
+
tensor<fp16, [1, 4096, 1, 64]> input_9_cast_fp16 = reshape(shape = var_384, x = attn_3_cast_fp16)[name = tensor<string, []>("input_9_cast_fp16")];
|
252 |
+
tensor<int32, [2]> var_388 = const()[name = tensor<string, []>("op_388"), val = tensor<int32, [2]>([1, 1])];
|
253 |
+
tensor<int32, [2]> var_390 = const()[name = tensor<string, []>("op_390"), val = tensor<int32, [2]>([1, 1])];
|
254 |
+
tensor<string, []> var_392_pad_type_0 = const()[name = tensor<string, []>("op_392_pad_type_0"), val = tensor<string, []>("custom")];
|
255 |
+
tensor<int32, [4]> var_392_pad_0 = const()[name = tensor<string, []>("op_392_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
256 |
+
tensor<fp16, [1, 4096, 1, 64]> var_392_cast_fp16 = conv(dilations = var_390, groups = var_246, pad = var_392_pad_0, pad_type = var_392_pad_type_0, strides = var_388, weight = blocks_1_attn_proj_weight_palettized_cast_fp16, x = input_9_cast_fp16)[name = tensor<string, []>("op_392_cast_fp16")];
|
257 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_1_attn_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_1_attn_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303701824)))];
|
258 |
+
tensor<fp16, [1, 4096, 1, 64]> attention_output_3_cast_fp16 = mul(x = var_392_cast_fp16, y = blocks_1_attn_proj_output_scales_to_fp16)[name = tensor<string, []>("attention_output_3_cast_fp16")];
|
259 |
+
tensor<fp16, [1, 4096, 1, 64]> x_25_cast_fp16 = add(x = attention_output_3_cast_fp16, y = x_15_cast_fp16)[name = tensor<string, []>("x_25_cast_fp16")];
|
260 |
+
tensor<fp16, [1, 4096, 1, 64]> var_401_cast_fp16 = mul(x = x_25_cast_fp16, y = x_25_cast_fp16)[name = tensor<string, []>("op_401_cast_fp16")];
|
261 |
+
tensor<int32, [1]> var_402 = const()[name = tensor<string, []>("op_402"), val = tensor<int32, [1]>([1])];
|
262 |
+
tensor<fp16, [1, 1, 1, 64]> norm_x_7_cast_fp16 = reduce_mean(axes = var_402, keep_dims = var_247, x = var_401_cast_fp16)[name = tensor<string, []>("norm_x_7_cast_fp16")];
|
263 |
+
tensor<fp16, []> var_404_to_fp16 = const()[name = tensor<string, []>("op_404_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
|
264 |
+
tensor<fp16, [1, 1, 1, 64]> var_405_cast_fp16 = add(x = norm_x_7_cast_fp16, y = var_404_to_fp16)[name = tensor<string, []>("op_405_cast_fp16")];
|
265 |
+
tensor<fp16, []> var_406_epsilon_0_to_fp16 = const()[name = tensor<string, []>("op_406_epsilon_0_to_fp16"), val = tensor<fp16, []>(0x1p-24)];
|
266 |
+
tensor<fp16, [1, 1, 1, 64]> var_406_cast_fp16 = rsqrt(epsilon = var_406_epsilon_0_to_fp16, x = var_405_cast_fp16)[name = tensor<string, []>("op_406_cast_fp16")];
|
267 |
+
tensor<fp16, [1, 4096, 1, 64]> x_normed_13_cast_fp16 = mul(x = x_25_cast_fp16, y = var_406_cast_fp16)[name = tensor<string, []>("x_normed_13_cast_fp16")];
|
268 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_1_norm_2_weight_to_fp16 = const()[name = tensor<string, []>("blocks_1_norm_2_weight_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303710080)))];
|
269 |
+
tensor<fp16, [1, 4096, 1, 64]> input_11_cast_fp16 = mul(x = x_normed_13_cast_fp16, y = blocks_1_norm_2_weight_to_fp16)[name = tensor<string, []>("input_11_cast_fp16")];
|
270 |
+
tensor<int32, [2]> var_418 = const()[name = tensor<string, []>("op_418"), val = tensor<int32, [2]>([1, 1])];
|
271 |
+
tensor<int32, [2]> var_420 = const()[name = tensor<string, []>("op_420"), val = tensor<int32, [2]>([1, 1])];
|
272 |
+
tensor<string, []> var_422_pad_type_0 = const()[name = tensor<string, []>("op_422_pad_type_0"), val = tensor<string, []>("custom")];
|
273 |
+
tensor<int32, [4]> var_422_pad_0 = const()[name = tensor<string, []>("op_422_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
274 |
+
tensor<fp16, [1, 11008, 1, 64]> var_422_cast_fp16 = conv(dilations = var_420, groups = var_246, pad = var_422_pad_0, pad_type = var_422_pad_type_0, strides = var_418, weight = blocks_1_mlp_fc_1_weight_palettized_cast_fp16, x = input_11_cast_fp16)[name = tensor<string, []>("op_422_cast_fp16")];
|
275 |
+
tensor<fp16, [1, 11008, 1, 1]> blocks_1_mlp_fc_1_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_1_mlp_fc_1_output_scales_to_fp16"), val = tensor<fp16, [1, 11008, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303718336)))];
|
276 |
+
tensor<fp16, [1, 11008, 1, 64]> input_13_cast_fp16 = mul(x = var_422_cast_fp16, y = blocks_1_mlp_fc_1_output_scales_to_fp16)[name = tensor<string, []>("input_13_cast_fp16")];
|
277 |
+
tensor<int32, [2]> var_426 = const()[name = tensor<string, []>("op_426"), val = tensor<int32, [2]>([1, 1])];
|
278 |
+
tensor<int32, [2]> var_428 = const()[name = tensor<string, []>("op_428"), val = tensor<int32, [2]>([1, 1])];
|
279 |
+
tensor<string, []> var_430_pad_type_0 = const()[name = tensor<string, []>("op_430_pad_type_0"), val = tensor<string, []>("custom")];
|
280 |
+
tensor<int32, [4]> var_430_pad_0 = const()[name = tensor<string, []>("op_430_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
281 |
+
tensor<fp16, [1, 11008, 1, 64]> var_430_cast_fp16 = conv(dilations = var_428, groups = var_246, pad = var_430_pad_0, pad_type = var_430_pad_type_0, strides = var_426, weight = blocks_1_mlp_fc_2_weight_palettized_cast_fp16, x = input_11_cast_fp16)[name = tensor<string, []>("op_430_cast_fp16")];
|
282 |
+
tensor<fp16, [1, 11008, 1, 1]> blocks_1_mlp_fc_2_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_1_mlp_fc_2_output_scales_to_fp16"), val = tensor<fp16, [1, 11008, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303740416)))];
|
283 |
+
tensor<fp16, [1, 11008, 1, 64]> x_fc_2_3_cast_fp16 = mul(x = var_430_cast_fp16, y = blocks_1_mlp_fc_2_output_scales_to_fp16)[name = tensor<string, []>("x_fc_2_3_cast_fp16")];
|
284 |
+
tensor<fp16, [1, 11008, 1, 64]> var_432_cast_fp16 = silu(x = input_13_cast_fp16)[name = tensor<string, []>("op_432_cast_fp16")];
|
285 |
+
tensor<fp16, [1, 11008, 1, 64]> input_15_cast_fp16 = mul(x = var_432_cast_fp16, y = x_fc_2_3_cast_fp16)[name = tensor<string, []>("input_15_cast_fp16")];
|
286 |
+
tensor<int32, [2]> var_436 = const()[name = tensor<string, []>("op_436"), val = tensor<int32, [2]>([1, 1])];
|
287 |
+
tensor<int32, [2]> var_438 = const()[name = tensor<string, []>("op_438"), val = tensor<int32, [2]>([1, 1])];
|
288 |
+
tensor<string, []> var_440_pad_type_0 = const()[name = tensor<string, []>("op_440_pad_type_0"), val = tensor<string, []>("custom")];
|
289 |
+
tensor<int32, [4]> var_440_pad_0 = const()[name = tensor<string, []>("op_440_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
290 |
+
tensor<fp16, [1, 4096, 1, 64]> var_440_cast_fp16 = conv(dilations = var_438, groups = var_246, pad = var_440_pad_0, pad_type = var_440_pad_type_0, strides = var_436, weight = blocks_1_mlp_proj_weight_palettized_cast_fp16, x = input_15_cast_fp16)[name = tensor<string, []>("op_440_cast_fp16")];
|
291 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_1_mlp_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_1_mlp_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303762496)))];
|
292 |
+
tensor<fp16, [1, 4096, 1, 64]> var_441_cast_fp16 = mul(x = var_440_cast_fp16, y = blocks_1_mlp_proj_output_scales_to_fp16)[name = tensor<string, []>("op_441_cast_fp16")];
|
293 |
+
tensor<fp16, [1, 4096, 1, 64]> x_29_cast_fp16 = add(x = var_441_cast_fp16, y = x_25_cast_fp16)[name = tensor<string, []>("x_29_cast_fp16")];
|
294 |
+
tensor<int32, []> var_448 = const()[name = tensor<string, []>("op_448"), val = tensor<int32, []>(3)];
|
295 |
+
tensor<int32, []> var_453 = const()[name = tensor<string, []>("op_453"), val = tensor<int32, []>(-2)];
|
296 |
+
tensor<int32, []> var_455 = const()[name = tensor<string, []>("op_455"), val = tensor<int32, []>(-1)];
|
297 |
+
tensor<int32, []> var_462 = const()[name = tensor<string, []>("op_462"), val = tensor<int32, []>(1)];
|
298 |
+
tensor<bool, []> var_463 = const()[name = tensor<string, []>("op_463"), val = tensor<bool, []>(true)];
|
299 |
+
tensor<fp16, [1, 4096, 1, 64]> var_470_cast_fp16 = mul(x = x_29_cast_fp16, y = x_29_cast_fp16)[name = tensor<string, []>("op_470_cast_fp16")];
|
300 |
+
tensor<int32, [1]> var_471 = const()[name = tensor<string, []>("op_471"), val = tensor<int32, [1]>([1])];
|
301 |
+
tensor<fp16, [1, 1, 1, 64]> norm_x_9_cast_fp16 = reduce_mean(axes = var_471, keep_dims = var_463, x = var_470_cast_fp16)[name = tensor<string, []>("norm_x_9_cast_fp16")];
|
302 |
+
tensor<fp16, []> var_473_to_fp16 = const()[name = tensor<string, []>("op_473_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
|
303 |
+
tensor<fp16, [1, 1, 1, 64]> var_474_cast_fp16 = add(x = norm_x_9_cast_fp16, y = var_473_to_fp16)[name = tensor<string, []>("op_474_cast_fp16")];
|
304 |
+
tensor<fp16, []> var_475_epsilon_0_to_fp16 = const()[name = tensor<string, []>("op_475_epsilon_0_to_fp16"), val = tensor<fp16, []>(0x1p-24)];
|
305 |
+
tensor<fp16, [1, 1, 1, 64]> var_475_cast_fp16 = rsqrt(epsilon = var_475_epsilon_0_to_fp16, x = var_474_cast_fp16)[name = tensor<string, []>("op_475_cast_fp16")];
|
306 |
+
tensor<fp16, [1, 4096, 1, 64]> x_normed_17_cast_fp16 = mul(x = x_29_cast_fp16, y = var_475_cast_fp16)[name = tensor<string, []>("x_normed_17_cast_fp16")];
|
307 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_2_norm_1_weight_to_fp16 = const()[name = tensor<string, []>("blocks_2_norm_1_weight_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303770752)))];
|
308 |
+
tensor<fp16, [1, 4096, 1, 64]> x_33_cast_fp16 = mul(x = x_normed_17_cast_fp16, y = blocks_2_norm_1_weight_to_fp16)[name = tensor<string, []>("x_33_cast_fp16")];
|
309 |
+
tensor<int32, [2]> var_490 = const()[name = tensor<string, []>("op_490"), val = tensor<int32, [2]>([1, 1])];
|
310 |
+
tensor<int32, [2]> var_492 = const()[name = tensor<string, []>("op_492"), val = tensor<int32, [2]>([1, 1])];
|
311 |
+
tensor<string, []> var_494_pad_type_0 = const()[name = tensor<string, []>("op_494_pad_type_0"), val = tensor<string, []>("custom")];
|
312 |
+
tensor<int32, [4]> var_494_pad_0 = const()[name = tensor<string, []>("op_494_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
313 |
+
tensor<fp16, [1, 4096, 1, 64]> var_494_cast_fp16 = conv(dilations = var_492, groups = var_462, pad = var_494_pad_0, pad_type = var_494_pad_type_0, strides = var_490, weight = blocks_2_attn_q_proj_weight_palettized_cast_fp16, x = x_33_cast_fp16)[name = tensor<string, []>("op_494_cast_fp16")];
|
314 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_2_attn_q_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_2_attn_q_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303779008)))];
|
315 |
+
tensor<fp16, [1, 4096, 1, 64]> q_13_cast_fp16 = mul(x = var_494_cast_fp16, y = blocks_2_attn_q_proj_output_scales_to_fp16)[name = tensor<string, []>("q_13_cast_fp16")];
|
316 |
+
tensor<int32, [2]> var_498 = const()[name = tensor<string, []>("op_498"), val = tensor<int32, [2]>([1, 1])];
|
317 |
+
tensor<int32, [2]> var_500 = const()[name = tensor<string, []>("op_500"), val = tensor<int32, [2]>([1, 1])];
|
318 |
+
tensor<string, []> var_502_pad_type_0 = const()[name = tensor<string, []>("op_502_pad_type_0"), val = tensor<string, []>("custom")];
|
319 |
+
tensor<int32, [4]> var_502_pad_0 = const()[name = tensor<string, []>("op_502_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
320 |
+
tensor<fp16, [1, 4096, 1, 64]> var_502_cast_fp16 = conv(dilations = var_500, groups = var_462, pad = var_502_pad_0, pad_type = var_502_pad_type_0, strides = var_498, weight = blocks_2_attn_k_proj_weight_palettized_cast_fp16, x = x_33_cast_fp16)[name = tensor<string, []>("op_502_cast_fp16")];
|
321 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_2_attn_k_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_2_attn_k_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303787264)))];
|
322 |
+
tensor<fp16, [1, 4096, 1, 64]> k_17_cast_fp16 = mul(x = var_502_cast_fp16, y = blocks_2_attn_k_proj_output_scales_to_fp16)[name = tensor<string, []>("k_17_cast_fp16")];
|
323 |
+
tensor<int32, [2]> var_506 = const()[name = tensor<string, []>("op_506"), val = tensor<int32, [2]>([1, 1])];
|
324 |
+
tensor<int32, [2]> var_508 = const()[name = tensor<string, []>("op_508"), val = tensor<int32, [2]>([1, 1])];
|
325 |
+
tensor<string, []> var_510_pad_type_0 = const()[name = tensor<string, []>("op_510_pad_type_0"), val = tensor<string, []>("custom")];
|
326 |
+
tensor<int32, [4]> var_510_pad_0 = const()[name = tensor<string, []>("op_510_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
327 |
+
tensor<fp16, [1, 4096, 1, 64]> var_510_cast_fp16 = conv(dilations = var_508, groups = var_462, pad = var_510_pad_0, pad_type = var_510_pad_type_0, strides = var_506, weight = blocks_2_attn_v_proj_weight_palettized_cast_fp16, x = x_33_cast_fp16)[name = tensor<string, []>("op_510_cast_fp16")];
|
328 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_2_attn_v_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_2_attn_v_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303795520)))];
|
329 |
+
tensor<fp16, [1, 4096, 1, 64]> v_13_cast_fp16 = mul(x = var_510_cast_fp16, y = blocks_2_attn_v_proj_output_scales_to_fp16)[name = tensor<string, []>("v_13_cast_fp16")];
|
330 |
+
tensor<int32, [4]> var_512 = const()[name = tensor<string, []>("op_512"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
331 |
+
tensor<fp16, [1, 32, 128, 64]> q_15_cast_fp16 = reshape(shape = var_512, x = q_13_cast_fp16)[name = tensor<string, []>("q_15_cast_fp16")];
|
332 |
+
tensor<int32, [4]> var_514 = const()[name = tensor<string, []>("op_514"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
333 |
+
tensor<fp16, [1, 32, 128, 64]> k_19_cast_fp16 = reshape(shape = var_514, x = k_17_cast_fp16)[name = tensor<string, []>("k_19_cast_fp16")];
|
334 |
+
tensor<int32, [4]> var_516 = const()[name = tensor<string, []>("op_516"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
335 |
+
tensor<fp16, [1, 32, 128, 64]> new_v_cache_2 = reshape(shape = var_516, x = v_13_cast_fp16)[name = tensor<string, []>("v_15_cast_fp16")];
|
336 |
+
tensor<int32, [4]> var_528_begin_0 = const()[name = tensor<string, []>("op_528_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
337 |
+
tensor<int32, [4]> var_528_end_0 = const()[name = tensor<string, []>("op_528_end_0"), val = tensor<int32, [4]>([1, 32, 64, 64])];
|
338 |
+
tensor<bool, [4]> var_528_end_mask_0 = const()[name = tensor<string, []>("op_528_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
|
339 |
+
tensor<fp16, [1, 32, 64, 64]> var_528_cast_fp16 = slice_by_index(begin = var_528_begin_0, end = var_528_end_0, end_mask = var_528_end_mask_0, x = q_15_cast_fp16)[name = tensor<string, []>("op_528_cast_fp16")];
|
340 |
+
tensor<int32, [4]> var_534_begin_0 = const()[name = tensor<string, []>("op_534_begin_0"), val = tensor<int32, [4]>([0, 0, 64, 0])];
|
341 |
+
tensor<int32, [4]> var_534_end_0 = const()[name = tensor<string, []>("op_534_end_0"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
342 |
+
tensor<bool, [4]> var_534_end_mask_0 = const()[name = tensor<string, []>("op_534_end_mask_0"), val = tensor<bool, [4]>([true, true, true, true])];
|
343 |
+
tensor<fp16, [1, 32, 64, 64]> var_534_cast_fp16 = slice_by_index(begin = var_534_begin_0, end = var_534_end_0, end_mask = var_534_end_mask_0, x = q_15_cast_fp16)[name = tensor<string, []>("op_534_cast_fp16")];
|
344 |
+
tensor<fp16, []> const_17_promoted_to_fp16 = const()[name = tensor<string, []>("const_17_promoted_to_fp16"), val = tensor<fp16, []>(-0x1p+0)];
|
345 |
+
tensor<fp16, [1, 32, 64, 64]> var_536_cast_fp16 = mul(x = var_534_cast_fp16, y = const_17_promoted_to_fp16)[name = tensor<string, []>("op_536_cast_fp16")];
|
346 |
+
tensor<bool, []> rotated_9_interleave_0 = const()[name = tensor<string, []>("rotated_9_interleave_0"), val = tensor<bool, []>(false)];
|
347 |
+
tensor<fp16, [1, 32, 128, 64]> rotated_9_cast_fp16 = concat(axis = var_453, interleave = rotated_9_interleave_0, values = (var_536_cast_fp16, var_528_cast_fp16))[name = tensor<string, []>("rotated_9_cast_fp16")];
|
348 |
+
tensor<fp16, [1, 32, 128, 64]> var_539_cast_fp16 = mul(x = q_15_cast_fp16, y = cos)[name = tensor<string, []>("op_539_cast_fp16")];
|
349 |
+
tensor<fp16, [1, 32, 128, 64]> var_540_cast_fp16 = mul(x = rotated_9_cast_fp16, y = sin)[name = tensor<string, []>("op_540_cast_fp16")];
|
350 |
+
tensor<fp16, [1, 32, 128, 64]> roped_9_cast_fp16 = add(x = var_539_cast_fp16, y = var_540_cast_fp16)[name = tensor<string, []>("roped_9_cast_fp16")];
|
351 |
+
tensor<int32, [4]> var_553_begin_0 = const()[name = tensor<string, []>("op_553_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
352 |
+
tensor<int32, [4]> var_553_end_0 = const()[name = tensor<string, []>("op_553_end_0"), val = tensor<int32, [4]>([1, 32, 64, 64])];
|
353 |
+
tensor<bool, [4]> var_553_end_mask_0 = const()[name = tensor<string, []>("op_553_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
|
354 |
+
tensor<fp16, [1, 32, 64, 64]> var_553_cast_fp16 = slice_by_index(begin = var_553_begin_0, end = var_553_end_0, end_mask = var_553_end_mask_0, x = k_19_cast_fp16)[name = tensor<string, []>("op_553_cast_fp16")];
|
355 |
+
tensor<int32, [4]> var_559_begin_0 = const()[name = tensor<string, []>("op_559_begin_0"), val = tensor<int32, [4]>([0, 0, 64, 0])];
|
356 |
+
tensor<int32, [4]> var_559_end_0 = const()[name = tensor<string, []>("op_559_end_0"), val = tensor<int32, [4]>([1, 32, 128, 64])];
|
357 |
+
tensor<bool, [4]> var_559_end_mask_0 = const()[name = tensor<string, []>("op_559_end_mask_0"), val = tensor<bool, [4]>([true, true, true, true])];
|
358 |
+
tensor<fp16, [1, 32, 64, 64]> var_559_cast_fp16 = slice_by_index(begin = var_559_begin_0, end = var_559_end_0, end_mask = var_559_end_mask_0, x = k_19_cast_fp16)[name = tensor<string, []>("op_559_cast_fp16")];
|
359 |
+
tensor<fp16, []> const_19_promoted_to_fp16 = const()[name = tensor<string, []>("const_19_promoted_to_fp16"), val = tensor<fp16, []>(-0x1p+0)];
|
360 |
+
tensor<fp16, [1, 32, 64, 64]> var_561_cast_fp16 = mul(x = var_559_cast_fp16, y = const_19_promoted_to_fp16)[name = tensor<string, []>("op_561_cast_fp16")];
|
361 |
+
tensor<bool, []> rotated_interleave_0 = const()[name = tensor<string, []>("rotated_interleave_0"), val = tensor<bool, []>(false)];
|
362 |
+
tensor<fp16, [1, 32, 128, 64]> rotated_cast_fp16 = concat(axis = var_453, interleave = rotated_interleave_0, values = (var_561_cast_fp16, var_553_cast_fp16))[name = tensor<string, []>("rotated_cast_fp16")];
|
363 |
+
tensor<fp16, [1, 32, 128, 64]> var_564_cast_fp16 = mul(x = k_19_cast_fp16, y = cos)[name = tensor<string, []>("op_564_cast_fp16")];
|
364 |
+
tensor<fp16, [1, 32, 128, 64]> var_565_cast_fp16 = mul(x = rotated_cast_fp16, y = sin)[name = tensor<string, []>("op_565_cast_fp16")];
|
365 |
+
tensor<fp16, [1, 32, 128, 64]> roped_cast_fp16 = add(x = var_564_cast_fp16, y = var_565_cast_fp16)[name = tensor<string, []>("roped_cast_fp16")];
|
366 |
+
tensor<bool, []> q_interleave_0 = const()[name = tensor<string, []>("q_interleave_0"), val = tensor<bool, []>(false)];
|
367 |
+
tensor<fp16, [1, 32, 128, 64]> q_cast_fp16 = concat(axis = var_453, interleave = q_interleave_0, values = roped_9_cast_fp16)[name = tensor<string, []>("q_cast_fp16")];
|
368 |
+
tensor<bool, []> k_21_interleave_0 = const()[name = tensor<string, []>("k_21_interleave_0"), val = tensor<bool, []>(false)];
|
369 |
+
tensor<fp16, [1, 32, 128, 64]> new_k_cache_2 = concat(axis = var_453, interleave = k_21_interleave_0, values = roped_cast_fp16)[name = tensor<string, []>("k_21_cast_fp16")];
|
370 |
+
tensor<bool, []> k_interleave_0 = const()[name = tensor<string, []>("k_interleave_0"), val = tensor<bool, []>(false)];
|
371 |
+
tensor<fp16, [1, 32, 128, 512]> k_cast_fp16 = concat(axis = var_455, interleave = k_interleave_0, values = (k_cache_2, new_k_cache_2))[name = tensor<string, []>("k_cast_fp16")];
|
372 |
+
tensor<bool, []> v_interleave_0 = const()[name = tensor<string, []>("v_interleave_0"), val = tensor<bool, []>(false)];
|
373 |
+
tensor<fp16, [1, 32, 128, 512]> v_cast_fp16 = concat(axis = var_455, interleave = v_interleave_0, values = (v_cache_2, new_v_cache_2))[name = tensor<string, []>("v_cast_fp16")];
|
374 |
+
tensor<fp16, []> var_587_to_fp16 = const()[name = tensor<string, []>("op_587_to_fp16"), val = tensor<fp16, []>(0x1.6ap-4)];
|
375 |
+
tensor<fp16, [1, 32, 128, 64]> var_588_cast_fp16 = mul(x = q_cast_fp16, y = var_587_to_fp16)[name = tensor<string, []>("op_588_cast_fp16")];
|
376 |
+
tensor<bool, []> attn_weights_9_transpose_x_0 = const()[name = tensor<string, []>("attn_weights_9_transpose_x_0"), val = tensor<bool, []>(true)];
|
377 |
+
tensor<bool, []> attn_weights_9_transpose_y_0 = const()[name = tensor<string, []>("attn_weights_9_transpose_y_0"), val = tensor<bool, []>(false)];
|
378 |
+
tensor<fp16, [1, 32, 64, 512]> attn_weights_9_cast_fp16 = matmul(transpose_x = attn_weights_9_transpose_x_0, transpose_y = attn_weights_9_transpose_y_0, x = var_588_cast_fp16, y = k_cast_fp16)[name = tensor<string, []>("attn_weights_9_cast_fp16")];
|
379 |
+
tensor<fp16, [1, 32, 64, 512]> attn_weights_cast_fp16 = add(x = attn_weights_9_cast_fp16, y = mask)[name = tensor<string, []>("attn_weights_cast_fp16")];
|
380 |
+
tensor<fp16, [1, 32, 64, 512]> var_596_cast_fp16 = softmax(axis = var_448, x = attn_weights_cast_fp16)[name = tensor<string, []>("op_596_cast_fp16")];
|
381 |
+
tensor<bool, []> attn_5_transpose_x_0 = const()[name = tensor<string, []>("attn_5_transpose_x_0"), val = tensor<bool, []>(false)];
|
382 |
+
tensor<bool, []> attn_5_transpose_y_0 = const()[name = tensor<string, []>("attn_5_transpose_y_0"), val = tensor<bool, []>(true)];
|
383 |
+
tensor<fp16, [1, 32, 128, 64]> attn_5_cast_fp16 = matmul(transpose_x = attn_5_transpose_x_0, transpose_y = attn_5_transpose_y_0, x = v_cast_fp16, y = var_596_cast_fp16)[name = tensor<string, []>("attn_5_cast_fp16")];
|
384 |
+
tensor<int32, [4]> var_600 = const()[name = tensor<string, []>("op_600"), val = tensor<int32, [4]>([1, 4096, 1, -1])];
|
385 |
+
tensor<fp16, [1, 4096, 1, 64]> input_17_cast_fp16 = reshape(shape = var_600, x = attn_5_cast_fp16)[name = tensor<string, []>("input_17_cast_fp16")];
|
386 |
+
tensor<int32, [2]> var_604 = const()[name = tensor<string, []>("op_604"), val = tensor<int32, [2]>([1, 1])];
|
387 |
+
tensor<int32, [2]> var_606 = const()[name = tensor<string, []>("op_606"), val = tensor<int32, [2]>([1, 1])];
|
388 |
+
tensor<string, []> var_608_pad_type_0 = const()[name = tensor<string, []>("op_608_pad_type_0"), val = tensor<string, []>("custom")];
|
389 |
+
tensor<int32, [4]> var_608_pad_0 = const()[name = tensor<string, []>("op_608_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
390 |
+
tensor<fp16, [1, 4096, 1, 64]> var_608_cast_fp16 = conv(dilations = var_606, groups = var_462, pad = var_608_pad_0, pad_type = var_608_pad_type_0, strides = var_604, weight = blocks_2_attn_proj_weight_palettized_cast_fp16, x = input_17_cast_fp16)[name = tensor<string, []>("op_608_cast_fp16")];
|
391 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_2_attn_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_2_attn_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303803776)))];
|
392 |
+
tensor<fp16, [1, 4096, 1, 64]> attention_output_cast_fp16 = mul(x = var_608_cast_fp16, y = blocks_2_attn_proj_output_scales_to_fp16)[name = tensor<string, []>("attention_output_cast_fp16")];
|
393 |
+
tensor<fp16, [1, 4096, 1, 64]> x_39_cast_fp16 = add(x = attention_output_cast_fp16, y = x_29_cast_fp16)[name = tensor<string, []>("x_39_cast_fp16")];
|
394 |
+
tensor<fp16, [1, 4096, 1, 64]> var_617_cast_fp16 = mul(x = x_39_cast_fp16, y = x_39_cast_fp16)[name = tensor<string, []>("op_617_cast_fp16")];
|
395 |
+
tensor<int32, [1]> var_618 = const()[name = tensor<string, []>("op_618"), val = tensor<int32, [1]>([1])];
|
396 |
+
tensor<fp16, [1, 1, 1, 64]> norm_x_cast_fp16 = reduce_mean(axes = var_618, keep_dims = var_463, x = var_617_cast_fp16)[name = tensor<string, []>("norm_x_cast_fp16")];
|
397 |
+
tensor<fp16, []> var_620_to_fp16 = const()[name = tensor<string, []>("op_620_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
|
398 |
+
tensor<fp16, [1, 1, 1, 64]> var_621_cast_fp16 = add(x = norm_x_cast_fp16, y = var_620_to_fp16)[name = tensor<string, []>("op_621_cast_fp16")];
|
399 |
+
tensor<fp16, []> var_622_epsilon_0_to_fp16 = const()[name = tensor<string, []>("op_622_epsilon_0_to_fp16"), val = tensor<fp16, []>(0x1p-24)];
|
400 |
+
tensor<fp16, [1, 1, 1, 64]> var_622_cast_fp16 = rsqrt(epsilon = var_622_epsilon_0_to_fp16, x = var_621_cast_fp16)[name = tensor<string, []>("op_622_cast_fp16")];
|
401 |
+
tensor<fp16, [1, 4096, 1, 64]> x_normed_21_cast_fp16 = mul(x = x_39_cast_fp16, y = var_622_cast_fp16)[name = tensor<string, []>("x_normed_21_cast_fp16")];
|
402 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_2_norm_2_weight_to_fp16 = const()[name = tensor<string, []>("blocks_2_norm_2_weight_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303812032)))];
|
403 |
+
tensor<fp16, [1, 4096, 1, 64]> input_19_cast_fp16 = mul(x = x_normed_21_cast_fp16, y = blocks_2_norm_2_weight_to_fp16)[name = tensor<string, []>("input_19_cast_fp16")];
|
404 |
+
tensor<int32, [2]> var_634 = const()[name = tensor<string, []>("op_634"), val = tensor<int32, [2]>([1, 1])];
|
405 |
+
tensor<int32, [2]> var_636 = const()[name = tensor<string, []>("op_636"), val = tensor<int32, [2]>([1, 1])];
|
406 |
+
tensor<string, []> var_638_pad_type_0 = const()[name = tensor<string, []>("op_638_pad_type_0"), val = tensor<string, []>("custom")];
|
407 |
+
tensor<int32, [4]> var_638_pad_0 = const()[name = tensor<string, []>("op_638_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
408 |
+
tensor<fp16, [1, 11008, 1, 64]> var_638_cast_fp16 = conv(dilations = var_636, groups = var_462, pad = var_638_pad_0, pad_type = var_638_pad_type_0, strides = var_634, weight = blocks_2_mlp_fc_1_weight_palettized_cast_fp16, x = input_19_cast_fp16)[name = tensor<string, []>("op_638_cast_fp16")];
|
409 |
+
tensor<fp16, [1, 11008, 1, 1]> blocks_2_mlp_fc_1_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_2_mlp_fc_1_output_scales_to_fp16"), val = tensor<fp16, [1, 11008, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303820288)))];
|
410 |
+
tensor<fp16, [1, 11008, 1, 64]> input_21_cast_fp16 = mul(x = var_638_cast_fp16, y = blocks_2_mlp_fc_1_output_scales_to_fp16)[name = tensor<string, []>("input_21_cast_fp16")];
|
411 |
+
tensor<int32, [2]> var_642 = const()[name = tensor<string, []>("op_642"), val = tensor<int32, [2]>([1, 1])];
|
412 |
+
tensor<int32, [2]> var_644 = const()[name = tensor<string, []>("op_644"), val = tensor<int32, [2]>([1, 1])];
|
413 |
+
tensor<string, []> var_646_pad_type_0 = const()[name = tensor<string, []>("op_646_pad_type_0"), val = tensor<string, []>("custom")];
|
414 |
+
tensor<int32, [4]> var_646_pad_0 = const()[name = tensor<string, []>("op_646_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
415 |
+
tensor<fp16, [1, 11008, 1, 64]> var_646_cast_fp16 = conv(dilations = var_644, groups = var_462, pad = var_646_pad_0, pad_type = var_646_pad_type_0, strides = var_642, weight = blocks_2_mlp_fc_2_weight_palettized_cast_fp16, x = input_19_cast_fp16)[name = tensor<string, []>("op_646_cast_fp16")];
|
416 |
+
tensor<fp16, [1, 11008, 1, 1]> blocks_2_mlp_fc_2_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_2_mlp_fc_2_output_scales_to_fp16"), val = tensor<fp16, [1, 11008, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303842368)))];
|
417 |
+
tensor<fp16, [1, 11008, 1, 64]> x_fc_2_cast_fp16 = mul(x = var_646_cast_fp16, y = blocks_2_mlp_fc_2_output_scales_to_fp16)[name = tensor<string, []>("x_fc_2_cast_fp16")];
|
418 |
+
tensor<fp16, [1, 11008, 1, 64]> var_648_cast_fp16 = silu(x = input_21_cast_fp16)[name = tensor<string, []>("op_648_cast_fp16")];
|
419 |
+
tensor<fp16, [1, 11008, 1, 64]> input_cast_fp16 = mul(x = var_648_cast_fp16, y = x_fc_2_cast_fp16)[name = tensor<string, []>("input_cast_fp16")];
|
420 |
+
tensor<int32, [2]> var_652 = const()[name = tensor<string, []>("op_652"), val = tensor<int32, [2]>([1, 1])];
|
421 |
+
tensor<int32, [2]> var_654 = const()[name = tensor<string, []>("op_654"), val = tensor<int32, [2]>([1, 1])];
|
422 |
+
tensor<string, []> var_656_pad_type_0 = const()[name = tensor<string, []>("op_656_pad_type_0"), val = tensor<string, []>("custom")];
|
423 |
+
tensor<int32, [4]> var_656_pad_0 = const()[name = tensor<string, []>("op_656_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
424 |
+
tensor<fp16, [1, 4096, 1, 64]> var_656_cast_fp16 = conv(dilations = var_654, groups = var_462, pad = var_656_pad_0, pad_type = var_656_pad_type_0, strides = var_652, weight = blocks_2_mlp_proj_weight_palettized_cast_fp16, x = input_cast_fp16)[name = tensor<string, []>("op_656_cast_fp16")];
|
425 |
+
tensor<fp16, [1, 4096, 1, 1]> blocks_2_mlp_proj_output_scales_to_fp16 = const()[name = tensor<string, []>("blocks_2_mlp_proj_output_scales_to_fp16"), val = tensor<fp16, [1, 4096, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(303864448)))];
|
426 |
+
tensor<fp16, [1, 4096, 1, 64]> var_657_cast_fp16 = mul(x = var_656_cast_fp16, y = blocks_2_mlp_proj_output_scales_to_fp16)[name = tensor<string, []>("op_657_cast_fp16")];
|
427 |
+
tensor<fp16, [1, 4096, 1, 64]> new_x = add(x = var_657_cast_fp16, y = x_39_cast_fp16)[name = tensor<string, []>("op_658_cast_fp16")];
|
428 |
+
} -> (new_x, new_k_cache_0, new_k_cache_1, new_k_cache_2, new_v_cache_0, new_v_cache_1, new_v_cache_2);
|
429 |
+
}
|
Llama-2-7b-hf_chunk6.mlmodelc/weights/weight.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:646d17c5d6d62e055abb88615254cb2d8205cd46a7b98faa734136f30c8ca26a
|
3 |
+
size 303872704
|