andylizf commited on
Commit
00cdecc
·
verified ·
1 Parent(s): b2f9819

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. recall_acc_results/_a/nprobe_vs_metrics_nlist8192.png +3 -0
  2. sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/cutlass/test/unit/gemm/device/gemm_f16n_f16n_f16t_tensor_op_f32_sm75.cu +307 -0
  3. sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/cutlass/test/unit/gemm/device/gemm_f16t_f16n_f16t_tensor_op_f16_slicedk_sm75.cu +88 -0
  4. sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/cutlass/test/unit/gemm/device/gemm_universal_s8t_f16n_f32t_mixed_input_tensor_op_f32_sm80.cu +97 -0
  5. sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/cutlass/test/unit/gemm/device/hemm_cf32h_cf32n_tensor_op_fast_f32_ls_sm80.cu +175 -0
  6. sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/cutlass/test/unit/gemm/device/herk_cf64h_cf64n_tensor_op_f64_sm80.cu +175 -0
  7. sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/cutlass/test/unit/gemm/device/sm90_gemm_f16_f16_f16_tensor_op_f32_cluster_warpspecialized_cooperative_dag.cu +170 -0
  8. sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/cutlass/test/unit/gemm/device/sm90_gemm_tf32_tf32_f32_alignx_tensor_op_f32_warpspecialized.cu +167 -0
  9. sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/cutlass/test/unit/gemm/device/syr2k_cf64t_cf64n_tensor_op_f64_grouped_sm80.cu +168 -0
  10. sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/cutlass/test/unit/gemm/device/trmm_f64_f64_f64_tensor_op_f64_sm90.cu +126 -0
  11. sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/cutlass/test/unit/gemm/device/trmm_f64n_f64n_f64t_tensor_op_f64_ls_sm80.cu +414 -0
  12. sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/cutlass/test/unit/gemm/threadblock/mma_multistage_sparse_testbed.h +435 -0
  13. sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/cutlass/test/unit/gemm/threadblock/mma_pipelined_sm75.cu +2128 -0
  14. sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/cutlass/tools/profiler/include/cutlass/profiler/conv3d_operation_profiler.h +449 -0
  15. sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/cutlass/tools/profiler/include/cutlass/profiler/cublas_helpers.h +456 -0
  16. sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/cutlass/tools/profiler/include/cutlass/profiler/cudnn_helpers.h +590 -0
  17. sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/cutlass/tools/profiler/include/cutlass/profiler/cutlass_profiler.h +93 -0
  18. sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/cutlass/tools/profiler/include/cutlass/profiler/debug.h +56 -0
  19. sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/cutlass/tools/profiler/include/cutlass/profiler/device_context.h +136 -0
  20. sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/cutlass/tools/profiler/include/cutlass/profiler/enumerated_types.h +169 -0
  21. sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/cutlass/tools/profiler/include/cutlass/profiler/gemm_operation_profiler.h +268 -0
  22. sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/cutlass/tools/profiler/include/cutlass/profiler/gpu_timer.h +72 -0
  23. sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/cutlass/tools/profiler/include/cutlass/profiler/options.h +345 -0
  24. sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/cutlass/tools/profiler/include/cutlass/profiler/performance_report.h +127 -0
  25. sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/cutlass/tools/profiler/include/cutlass/profiler/performance_result.h +128 -0
  26. sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/cutlass/tools/profiler/include/cutlass/profiler/rank_2k_operation_profiler.h +229 -0
  27. sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/cutlass/tools/profiler/include/cutlass/profiler/rank_k_operation_profiler.h +227 -0
  28. sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/cutlass/tools/profiler/include/cutlass/profiler/sparse_gemm_operation_profiler.h +214 -0
  29. sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/cutlass/tools/profiler/include/cutlass/profiler/trmm_operation_profiler.h +222 -0
  30. sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/cutlass/tools/profiler/src/conv2d_operation_profiler.cu +1510 -0
  31. sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/cutlass/tools/profiler/src/cudnn_helpers.cpp +496 -0
  32. sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/cutlass/tools/profiler/src/cutlass_profiler.cu +214 -0
  33. sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/cutlass/tools/profiler/src/device_allocation.cu +2483 -0
  34. sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/cutlass/tools/profiler/src/device_context.cu +245 -0
  35. sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/cutlass/tools/profiler/src/enumerated_types.cpp +275 -0
  36. sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/cutlass/tools/profiler/src/gemm_operation_profiler.cu +1298 -0
  37. sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/cutlass/tools/profiler/src/gpu_timer.cpp +113 -0
  38. sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/cutlass/tools/profiler/src/options.cu +899 -0
  39. sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/cutlass/tools/profiler/src/performance_report.cpp +505 -0
  40. sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/cutlass/tools/profiler/src/performance_result.cu +61 -0
  41. sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/cutlass/tools/profiler/src/problem_space.cpp +1263 -0
  42. sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/cutlass/tools/profiler/src/rank_2k_operation_profiler.cu +752 -0
  43. sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/cutlass/tools/profiler/src/rank_k_operation_profiler.cu +737 -0
  44. sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/cutlass/tools/profiler/src/sparse_gemm_operation_profiler.cu +598 -0
  45. sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/cutlass/tools/profiler/src/symm_operation_profiler.cu +790 -0
  46. sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/cutlass/tools/profiler/src/trmm_operation_profiler.cu +728 -0
  47. sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/cutlass/tools/util/include/cutlass/util/GPU_Clock.hpp +67 -0
  48. sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/cutlass/tools/util/include/cutlass/util/command_line.h +313 -0
  49. sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/cutlass/tools/util/include/cutlass/util/cublas_wrappers.hpp +526 -0
  50. sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/cutlass/tools/util/include/cutlass/util/debug.h +143 -0
recall_acc_results/_a/nprobe_vs_metrics_nlist8192.png ADDED

Git LFS Details

  • SHA256: 4d5ca2fb92d5a3f7462a46481af81e9d0a1f057c37ca2d218494d65716c49a04
  • Pointer size: 131 Bytes
  • Size of remote file: 168 kB
sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/cutlass/test/unit/gemm/device/gemm_f16n_f16n_f16t_tensor_op_f32_sm75.cu ADDED
@@ -0,0 +1,307 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ * SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice, this
9
+ * list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holder nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
+ *
30
+ **************************************************************************************************/
31
+ /*! \file
32
+ \brief Tests for device-wide GEMM interface
33
+ */
34
+
35
+ #include <iostream>
36
+
37
+ #include "cutlass/cutlass.h"
38
+ #include "cutlass/gemm/device/gemm.h"
39
+
40
+ #include "../../common/cutlass_unit_test.h"
41
+
42
+ #include "cutlass/util/host_tensor.h"
43
+ #include "cutlass/util/tensor_view_io.h"
44
+ #include "cutlass/util/reference/host/tensor_fill.h"
45
+ #include "cutlass/util/reference/host/tensor_copy.h"
46
+ #include "cutlass/util/reference/host/tensor_compare.h"
47
+ #include "cutlass/util/reference/host/gemm.h"
48
+
49
+ #include "testbed.h"
50
+
51
+ #if defined(CUTLASS_ARCH_MMA_SM75_SUPPORTED)
52
+
53
+ /////////////////////////////////////////////////////////////////////////////////////////////////
54
+
55
+ TEST(SM75_Device_Gemm_f16n_f16n_f16t_tensor_op_f32, 128x256x32_64x64x32) {
56
+
57
+ using ElementOutput = cutlass::half_t;
58
+ using ElementAccumulator = float;
59
+
60
+ using Gemm = cutlass::gemm::device::Gemm<
61
+ cutlass::half_t,
62
+ cutlass::layout::ColumnMajor,
63
+ cutlass::half_t,
64
+ cutlass::layout::ColumnMajor,
65
+ ElementOutput,
66
+ cutlass::layout::RowMajor,
67
+ ElementAccumulator,
68
+ cutlass::arch::OpClassTensorOp,
69
+ cutlass::arch::Sm75,
70
+ cutlass::gemm::GemmShape<128, 256, 32>,
71
+ cutlass::gemm::GemmShape<64, 64, 32>,
72
+ cutlass::gemm::GemmShape<16, 8, 8>,
73
+ cutlass::epilogue::thread::LinearCombination<
74
+ ElementOutput,
75
+ 128 / cutlass::sizeof_bits<ElementOutput>::value,
76
+ ElementAccumulator,
77
+ ElementAccumulator
78
+ >,
79
+ cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>,
80
+ 2
81
+ >;
82
+
83
+ EXPECT_TRUE(test::gemm::device::TestAllGemm<Gemm>());
84
+ }
85
+
86
+ TEST(SM75_Device_Gemm_f16n_f16n_f16t_tensor_op_f32, 128x256x32_64x64x32_brief) {
87
+
88
+ using ElementOutput = cutlass::half_t;
89
+ using ElementAccumulator = float;
90
+
91
+ using Gemm = cutlass::gemm::device::Gemm<
92
+ cutlass::half_t,
93
+ cutlass::layout::ColumnMajor,
94
+ cutlass::half_t,
95
+ cutlass::layout::ColumnMajor,
96
+ ElementOutput,
97
+ cutlass::layout::RowMajor,
98
+ ElementAccumulator,
99
+ cutlass::arch::OpClassTensorOp,
100
+ cutlass::arch::Sm75,
101
+ cutlass::gemm::GemmShape<128, 256, 32>
102
+ >;
103
+
104
+ EXPECT_TRUE(test::gemm::device::TestAllGemm<Gemm>());
105
+ }
106
+
107
+ TEST(SM75_Device_Gemm_f16n_f16n_f16t_tensor_op_f32, 256x128x32_64x64x32) {
108
+
109
+ using ElementOutput = cutlass::half_t;
110
+ using ElementAccumulator = float;
111
+
112
+ using Gemm = cutlass::gemm::device::Gemm<
113
+ cutlass::half_t,
114
+ cutlass::layout::ColumnMajor,
115
+ cutlass::half_t,
116
+ cutlass::layout::ColumnMajor,
117
+ ElementOutput,
118
+ cutlass::layout::RowMajor,
119
+ ElementAccumulator,
120
+ cutlass::arch::OpClassTensorOp,
121
+ cutlass::arch::Sm75,
122
+ cutlass::gemm::GemmShape<256, 128, 32>,
123
+ cutlass::gemm::GemmShape<64, 64, 32>,
124
+ cutlass::gemm::GemmShape<16, 8, 8>,
125
+ cutlass::epilogue::thread::LinearCombination<
126
+ ElementOutput,
127
+ 128 / cutlass::sizeof_bits<ElementOutput>::value,
128
+ ElementAccumulator,
129
+ ElementAccumulator
130
+ >,
131
+ cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>,
132
+ 2
133
+ >;
134
+
135
+ EXPECT_TRUE(test::gemm::device::TestAllGemm<Gemm>());
136
+ }
137
+
138
+ TEST(SM75_Device_Gemm_f16n_f16n_f16t_tensor_op_f32, 128x128x32_64x64x32) {
139
+
140
+ using ElementOutput = cutlass::half_t;
141
+ using ElementAccumulator = float;
142
+
143
+ using Gemm = cutlass::gemm::device::Gemm<
144
+ cutlass::half_t,
145
+ cutlass::layout::ColumnMajor,
146
+ cutlass::half_t,
147
+ cutlass::layout::ColumnMajor,
148
+ ElementOutput,
149
+ cutlass::layout::RowMajor,
150
+ ElementAccumulator,
151
+ cutlass::arch::OpClassTensorOp,
152
+ cutlass::arch::Sm75,
153
+ cutlass::gemm::GemmShape<128, 128, 32>,
154
+ cutlass::gemm::GemmShape<64, 64, 32>,
155
+ cutlass::gemm::GemmShape<16, 8, 8>,
156
+ cutlass::epilogue::thread::LinearCombination<
157
+ ElementOutput,
158
+ 128 / cutlass::sizeof_bits<ElementOutput>::value,
159
+ ElementAccumulator,
160
+ ElementAccumulator
161
+ >,
162
+ cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>,
163
+ 2
164
+ >;
165
+
166
+ EXPECT_TRUE(test::gemm::device::TestAllGemm<Gemm>());
167
+ }
168
+
169
+ TEST(SM75_Device_Gemm_f16n_f16n_f16t_tensor_op_f32, 128x128x32_64x64x32_brief) {
170
+
171
+ using ElementOutput = cutlass::half_t;
172
+ using ElementAccumulator = float;
173
+
174
+ using Gemm = cutlass::gemm::device::Gemm<
175
+ cutlass::half_t,
176
+ cutlass::layout::ColumnMajor,
177
+ cutlass::half_t,
178
+ cutlass::layout::ColumnMajor,
179
+ ElementOutput,
180
+ cutlass::layout::RowMajor,
181
+ ElementAccumulator,
182
+ cutlass::arch::OpClassTensorOp,
183
+ cutlass::arch::Sm75,
184
+ cutlass::gemm::GemmShape<128, 128, 32>
185
+ >;
186
+
187
+ EXPECT_TRUE(test::gemm::device::TestAllGemm<Gemm>());
188
+ }
189
+
190
+ TEST(SM75_Device_Gemm_f16n_f16n_f16t_tensor_op_f32, 64x128x32_32x64x32) {
191
+
192
+ using ElementOutput = cutlass::half_t;
193
+ using ElementAccumulator = float;
194
+
195
+ using Gemm = cutlass::gemm::device::Gemm<
196
+ cutlass::half_t,
197
+ cutlass::layout::ColumnMajor,
198
+ cutlass::half_t,
199
+ cutlass::layout::ColumnMajor,
200
+ ElementOutput,
201
+ cutlass::layout::RowMajor,
202
+ ElementAccumulator,
203
+ cutlass::arch::OpClassTensorOp,
204
+ cutlass::arch::Sm75,
205
+ cutlass::gemm::GemmShape<64, 128, 32>,
206
+ cutlass::gemm::GemmShape<32, 64, 32>,
207
+ cutlass::gemm::GemmShape<16, 8, 8>,
208
+ cutlass::epilogue::thread::LinearCombination<
209
+ ElementOutput,
210
+ 128 / cutlass::sizeof_bits<ElementOutput>::value,
211
+ ElementAccumulator,
212
+ ElementAccumulator
213
+ >,
214
+ cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>,
215
+ 2
216
+ >;
217
+
218
+ EXPECT_TRUE(test::gemm::device::TestAllGemm<Gemm>());
219
+ }
220
+
221
+ TEST(SM75_Device_Gemm_f16n_f16n_f16t_tensor_op_f32, 64x128x32_32x64x32_brief) {
222
+
223
+ using ElementOutput = cutlass::half_t;
224
+ using ElementAccumulator = float;
225
+
226
+ using Gemm = cutlass::gemm::device::Gemm<
227
+ cutlass::half_t,
228
+ cutlass::layout::ColumnMajor,
229
+ cutlass::half_t,
230
+ cutlass::layout::ColumnMajor,
231
+ ElementOutput,
232
+ cutlass::layout::RowMajor,
233
+ ElementAccumulator,
234
+ cutlass::arch::OpClassTensorOp,
235
+ cutlass::arch::Sm75,
236
+ cutlass::gemm::GemmShape<64, 128, 32>,
237
+ cutlass::gemm::GemmShape<32, 64, 32>
238
+ >;
239
+
240
+ EXPECT_TRUE(test::gemm::device::TestAllGemm<Gemm>());
241
+ }
242
+
243
+ TEST(SM75_Device_Gemm_f16n_f16n_f16t_tensor_op_f32, 128x64x32_64x32x32) {
244
+
245
+ using ElementOutput = cutlass::half_t;
246
+ using ElementAccumulator = float;
247
+
248
+ using Gemm = cutlass::gemm::device::Gemm<
249
+ cutlass::half_t,
250
+ cutlass::layout::ColumnMajor,
251
+ cutlass::half_t,
252
+ cutlass::layout::ColumnMajor,
253
+ ElementOutput,
254
+ cutlass::layout::RowMajor,
255
+ ElementAccumulator,
256
+ cutlass::arch::OpClassTensorOp,
257
+ cutlass::arch::Sm75,
258
+ cutlass::gemm::GemmShape<128, 64, 32>,
259
+ cutlass::gemm::GemmShape<64, 32, 32>,
260
+ cutlass::gemm::GemmShape<16, 8, 8>,
261
+ cutlass::epilogue::thread::LinearCombination<
262
+ ElementOutput,
263
+ 128 / cutlass::sizeof_bits<ElementOutput>::value,
264
+ ElementAccumulator,
265
+ ElementAccumulator
266
+ >,
267
+ cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>,
268
+ 2
269
+ >;
270
+
271
+ EXPECT_TRUE(test::gemm::device::TestAllGemm<Gemm>());
272
+ }
273
+
274
+ TEST(SM75_Device_Gemm_f16n_f16n_f16t_tensor_op_f32, 64x64x32_32x32x32) {
275
+
276
+ using ElementOutput = cutlass::half_t;
277
+ using ElementAccumulator = float;
278
+
279
+ using Gemm = cutlass::gemm::device::Gemm<
280
+ cutlass::half_t,
281
+ cutlass::layout::ColumnMajor,
282
+ cutlass::half_t,
283
+ cutlass::layout::ColumnMajor,
284
+ ElementOutput,
285
+ cutlass::layout::RowMajor,
286
+ ElementAccumulator,
287
+ cutlass::arch::OpClassTensorOp,
288
+ cutlass::arch::Sm75,
289
+ cutlass::gemm::GemmShape<64, 64, 32>,
290
+ cutlass::gemm::GemmShape<32, 32, 32>,
291
+ cutlass::gemm::GemmShape<16, 8, 8>,
292
+ cutlass::epilogue::thread::LinearCombination<
293
+ ElementOutput,
294
+ 128 / cutlass::sizeof_bits<ElementOutput>::value,
295
+ ElementAccumulator,
296
+ ElementAccumulator
297
+ >,
298
+ cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>,
299
+ 2
300
+ >;
301
+
302
+ EXPECT_TRUE(test::gemm::device::TestAllGemm<Gemm>());
303
+ }
304
+
305
+ /////////////////////////////////////////////////////////////////////////////////////////////////
306
+
307
+ #endif
sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/cutlass/test/unit/gemm/device/gemm_f16t_f16n_f16t_tensor_op_f16_slicedk_sm75.cu ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ * SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice, this
9
+ * list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holder nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
+ *
30
+ **************************************************************************************************/
31
+ /*! \file
32
+ \brief Tests for device-wide GEMM interface
33
+ */
34
+
35
+ #include <iostream>
36
+
37
+ #include "cutlass/cutlass.h"
38
+ #include "cutlass/gemm/device/gemm.h"
39
+
40
+ #include "../../common/cutlass_unit_test.h"
41
+
42
+ #include "cutlass/util/host_tensor.h"
43
+ #include "cutlass/util/tensor_view_io.h"
44
+ #include "cutlass/util/reference/host/tensor_fill.h"
45
+ #include "cutlass/util/reference/host/tensor_copy.h"
46
+ #include "cutlass/util/reference/host/tensor_compare.h"
47
+ #include "cutlass/util/reference/host/gemm.h"
48
+
49
+ #include "testbed.h"
50
+
51
+ #if defined(CUTLASS_ARCH_MMA_SM75_SUPPORTED)
52
+
53
+ /////////////////////////////////////////////////////////////////////////////////////////////////
54
+
55
+ TEST(SM75_Device_Gemm_f16t_f16n_f16t_tensor_op_f16_sliced_k, 64x64x64_64x32x32) {
56
+
57
+ using ElementOutput = cutlass::half_t;
58
+ using ElementAccumulator = cutlass::half_t;
59
+
60
+ using Gemm = cutlass::gemm::device::Gemm<
61
+ cutlass::half_t,
62
+ cutlass::layout::RowMajor,
63
+ cutlass::half_t,
64
+ cutlass::layout::ColumnMajor,
65
+ ElementOutput,
66
+ cutlass::layout::RowMajor,
67
+ ElementAccumulator,
68
+ cutlass::arch::OpClassTensorOp,
69
+ cutlass::arch::Sm75,
70
+ cutlass::gemm::GemmShape<64, 64, 64>,
71
+ cutlass::gemm::GemmShape<64, 32, 32>,
72
+ cutlass::gemm::GemmShape<16, 8, 8>,
73
+ cutlass::epilogue::thread::LinearCombination<
74
+ ElementOutput,
75
+ 64 / cutlass::sizeof_bits<ElementOutput>::value,
76
+ ElementAccumulator,
77
+ ElementAccumulator
78
+ >,
79
+ cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>,
80
+ 2
81
+ >;
82
+
83
+ EXPECT_TRUE(test::gemm::device::TestAllGemm<Gemm>());
84
+ }
85
+
86
+ /////////////////////////////////////////////////////////////////////////////////////////////////
87
+
88
+ #endif // if (CUTLASS_ARCH_MMA_SM75_SUPPORTED)
sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/cutlass/test/unit/gemm/device/gemm_universal_s8t_f16n_f32t_mixed_input_tensor_op_f32_sm80.cu ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ * SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice, this
9
+ * list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holder nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
+ *
30
+ **************************************************************************************************/
31
+ /*! \file
32
+ \brief Tests for device-wide GEMM interface
33
+
34
+ */
35
+
36
+ #include <iostream>
37
+
38
+ #include "../../common/cutlass_unit_test.h"
39
+ #include "cutlass/cutlass.h"
40
+
41
+ #include "cutlass/gemm/device/gemm_universal.h"
42
+
43
+ #include "cutlass/util/host_tensor.h"
44
+ #include "cutlass/util/reference/host/gemm.h"
45
+ #include "cutlass/util/reference/host/tensor_compare.h"
46
+ #include "cutlass/util/reference/host/tensor_copy.h"
47
+ #include "cutlass/util/reference/host/tensor_fill.h"
48
+ #include "cutlass/util/tensor_view_io.h"
49
+
50
+ #include "testbed_universal.h"
51
+
52
+ ////////////////////////////////////////////////////////////////////////////////
53
+
54
+ #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED)
55
+
56
+ ////////////////////////////////////////////////////////////////////////////////
57
+
58
+
59
+ TEST(SM80_Device_GemmUniversal_s8t_f16n_f32t_mixed_input_tensor_op_f32, 128x128x64_64x64x64) {
60
+
61
+ using ElementA = int8_t;
62
+ using ElementB = cutlass::half_t;
63
+ using ElementOutput = float;
64
+ using ElementAccumulator = float;
65
+
66
+ using Gemm = cutlass::gemm::device::GemmUniversal<
67
+ ElementA,
68
+ cutlass::layout::RowMajor,
69
+ ElementB,
70
+ cutlass::layout::ColumnMajor,
71
+ ElementOutput,
72
+ cutlass::layout::RowMajor,
73
+ ElementAccumulator,
74
+ cutlass::arch::OpClassTensorOp,
75
+ cutlass::arch::Sm80,
76
+ cutlass::gemm::GemmShape<128, 128, 64>,
77
+ cutlass::gemm::GemmShape<64, 64, 64>,
78
+ cutlass::gemm::GemmShape<16, 8, 16>,
79
+ cutlass::epilogue::thread::LinearCombination<
80
+ ElementOutput, 128 / cutlass::sizeof_bits<ElementOutput>::value,
81
+ ElementAccumulator, ElementAccumulator>,
82
+ cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>,
83
+ 4, // Stages
84
+ 16, // AlignmentA
85
+ 8, // AlignmentB
86
+ cutlass::arch::OpMultiplyAddMixedInputUpcast,
87
+ cutlass::ComplexTransform::kNone,
88
+ cutlass::ComplexTransform::kNone
89
+ >;
90
+
91
+ EXPECT_TRUE(test::gemm::device::TestAllGemmUniversal<Gemm>());
92
+ }
93
+ ////////////////////////////////////////////////////////////////////////////////
94
+
95
+ #endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED)
96
+
97
+ ////////////////////////////////////////////////////////////////////////////////
sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/cutlass/test/unit/gemm/device/hemm_cf32h_cf32n_tensor_op_fast_f32_ls_sm80.cu ADDED
@@ -0,0 +1,175 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ * SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice, this
9
+ * list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holder nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
+ *
30
+ **************************************************************************************************/
31
+ /*! \file
32
+ \brief Tests for device-wide HEMM interface
33
+
34
+
35
+ */
36
+
37
+ #include <iostream>
38
+
39
+ #include "../../common/cutlass_unit_test.h"
40
+ #include "cutlass/blas3.h"
41
+ #include "cutlass/gemm/device/symm.h"
42
+ #include "cutlass/util/host_tensor.h"
43
+ #include "cutlass/util/reference/host/symm_complex.h"
44
+ #include "cutlass/util/reference/host/tensor_compare.h"
45
+ #include "cutlass/util/reference/host/tensor_copy.h"
46
+ #include "cutlass/util/reference/host/tensor_fill.h"
47
+ #include "cutlass/util/tensor_view_io.h"
48
+
49
+ #include "testbed_symm_universal.h"
50
+
51
+ #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED)
52
+
53
+ /////////////////////////////////////////////////////////////////////////////////////////////////
54
+
55
+ TEST(SM80_Device_Hemm_cf32h_cf32n_ls_l_tensor_op_fast_f32, 32x32x16_16x16x16) {
56
+
57
+ using ElementOutput = cutlass::complex<float>;
58
+ using ElementAccumulator = cutlass::complex<float>;
59
+
60
+ using Hemm = cutlass::gemm::device::Symm<
61
+ cutlass::complex<float>,
62
+ cutlass::layout::ColumnMajor,
63
+ cutlass::SideMode::kLeft,
64
+ cutlass::FillMode::kLower,
65
+ cutlass::complex<float>,
66
+ cutlass::layout::ColumnMajor,
67
+ ElementOutput,
68
+ cutlass::layout::ColumnMajor,
69
+ ElementAccumulator,
70
+ cutlass::arch::OpClassTensorOp,
71
+ cutlass::arch::Sm80,
72
+ cutlass::gemm::GemmShape<32, 32, 16>,
73
+ cutlass::gemm::GemmShape<16, 16, 16>,
74
+ cutlass::gemm::GemmShape<16, 8, 8>,
75
+ cutlass::epilogue::thread::LinearCombination<
76
+ ElementOutput,
77
+ 1,
78
+ ElementAccumulator,
79
+ ElementAccumulator
80
+ >,
81
+ cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>,
82
+ 3,
83
+ 1,
84
+ 1,
85
+ false,
86
+ cutlass::arch::OpMultiplyAddComplexFastF32,
87
+ cutlass::BlasMode::kHermitian
88
+ >;
89
+
90
+ EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal<Hemm>());
91
+ }
92
+
93
+ /////////////////////////////////////////////////////////////////////////////////////////////////
94
+
95
+ TEST(SM80_Device_Hemm_cf32h_cf32n_ls_u_tensor_op_fast_f32, 32x32x16_16x16x16) {
96
+
97
+ using ElementOutput = cutlass::complex<float>;
98
+ using ElementAccumulator = cutlass::complex<float>;
99
+
100
+ using Hemm = cutlass::gemm::device::Symm<
101
+ cutlass::complex<float>,
102
+ cutlass::layout::ColumnMajor,
103
+ cutlass::SideMode::kLeft,
104
+ cutlass::FillMode::kUpper,
105
+ cutlass::complex<float>,
106
+ cutlass::layout::ColumnMajor,
107
+ ElementOutput,
108
+ cutlass::layout::ColumnMajor,
109
+ ElementAccumulator,
110
+ cutlass::arch::OpClassTensorOp,
111
+ cutlass::arch::Sm80,
112
+ cutlass::gemm::GemmShape<32, 32, 16>,
113
+ cutlass::gemm::GemmShape<16, 16, 16>,
114
+ cutlass::gemm::GemmShape<16, 8, 8>,
115
+ cutlass::epilogue::thread::LinearCombination<
116
+ ElementOutput,
117
+ 1,
118
+ ElementAccumulator,
119
+ ElementAccumulator
120
+ >,
121
+ cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>,
122
+ 3,
123
+ 1,
124
+ 1,
125
+ false,
126
+ cutlass::arch::OpMultiplyAddComplexFastF32,
127
+ cutlass::BlasMode::kHermitian
128
+ >;
129
+
130
+ EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal<Hemm>());
131
+ }
132
+
133
+ /////////////////////////////////////////////////////////////////////////////////////////////////
134
+
135
+ TEST(SM80_Device_Hemm_cf32h_cf32n_ls_u_tensor_op_fast_f32, 64x64x16_32x32x16) {
136
+
137
+ using ElementOutput = cutlass::complex<float>;
138
+ using ElementAccumulator = cutlass::complex<float>;
139
+
140
+ using Hemm = cutlass::gemm::device::Symm<
141
+ cutlass::complex<float>,
142
+ cutlass::layout::ColumnMajor,
143
+ cutlass::SideMode::kLeft,
144
+ cutlass::FillMode::kUpper,
145
+ cutlass::complex<float>,
146
+ cutlass::layout::ColumnMajor,
147
+ ElementOutput,
148
+ cutlass::layout::ColumnMajor,
149
+ ElementAccumulator,
150
+ cutlass::arch::OpClassTensorOp,
151
+ cutlass::arch::Sm80,
152
+ cutlass::gemm::GemmShape<64, 64, 16>,
153
+ cutlass::gemm::GemmShape<32, 32, 16>,
154
+ cutlass::gemm::GemmShape<16, 8, 8>,
155
+ cutlass::epilogue::thread::LinearCombination<
156
+ ElementOutput,
157
+ 1,
158
+ ElementAccumulator,
159
+ ElementAccumulator
160
+ >,
161
+ cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>,
162
+ 3,
163
+ 1,
164
+ 1,
165
+ false,
166
+ cutlass::arch::OpMultiplyAddComplexFastF32,
167
+ cutlass::BlasMode::kHermitian
168
+ >;
169
+
170
+ EXPECT_TRUE(test::gemm::device::TestAllSymmUniversal<Hemm>());
171
+ }
172
+
173
+ /////////////////////////////////////////////////////////////////////////////////////////////////
174
+
175
+ #endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED)
sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/cutlass/test/unit/gemm/device/herk_cf64h_cf64n_tensor_op_f64_sm80.cu ADDED
@@ -0,0 +1,175 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ * SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice, this
9
+ * list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holder nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
+ *
30
+ **************************************************************************************************/
31
+ /*! \file
32
+ \brief Tests for device-wide HERK interface
33
+ */
34
+
35
+ #include <iostream>
36
+
37
+ #include "../../common/cutlass_unit_test.h"
38
+ #include "cutlass/blas3.h"
39
+ #include "cutlass/gemm/device/rank_k.h"
40
+ #include "cutlass/util/host_tensor.h"
41
+ #include "cutlass/util/reference/host/rank_k_complex.h"
42
+ #include "cutlass/util/reference/host/tensor_compare.h"
43
+ #include "cutlass/util/reference/host/tensor_copy.h"
44
+ #include "cutlass/util/reference/host/tensor_fill.h"
45
+ #include "cutlass/util/tensor_view_io.h"
46
+
47
+ #include "testbed_rank_k_universal.h"
48
+
49
+ #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED)
50
+
51
+ /////////////////////////////////////////////////////////////////////////////////////////////////
52
+ // HERK operator on CUBLAS_OP_N (column-major) input layouts
53
+ TEST(SM80_Device_Herk_cf64n_cf64n_l_tensor_op_f64, 32x32x16_16x16x16) {
54
+
55
+ using ElementA = cutlass::complex<double>;
56
+ using LayoutA = cutlass::layout::ColumnMajor;
57
+
58
+ using ElementC = cutlass::complex<double>;
59
+ using LayoutC = cutlass::layout::ColumnMajor;
60
+ using ElementAccumulator = cutlass::complex<double>;
61
+
62
+ using RankK = cutlass::gemm::device::RankK<
63
+ ElementA,
64
+ LayoutA,
65
+ ElementC,
66
+ LayoutC,
67
+ cutlass::FillMode::kLower,
68
+ ElementAccumulator,
69
+ cutlass::arch::OpClassTensorOp,
70
+ cutlass::arch::Sm80,
71
+ cutlass::gemm::GemmShape<32, 32, 16>,
72
+ cutlass::gemm::GemmShape<16, 16, 16>,
73
+ cutlass::gemm::GemmShape<8, 8, 4>,
74
+ cutlass::epilogue::thread::LinearCombination<
75
+ ElementC,
76
+ 1,
77
+ ElementAccumulator,
78
+ ElementAccumulator
79
+ >,
80
+ cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>,
81
+ 4, // kStages
82
+ 1, // AlignmentA
83
+ false, // SplitKSerial
84
+ cutlass::arch::OpMultiplyAddComplex,
85
+ cutlass::ComplexTransform::kNone,
86
+ cutlass::BlasMode::kHermitian
87
+ >;
88
+
89
+ EXPECT_TRUE(test::gemm::device::TestAllRankKUniversal<RankK>());
90
+ }
91
+
92
+ /////////////////////////////////////////////////////////////////////////////////////////////////
93
+ // HERK operator on CUBLAS_OP_N (column-major) input layouts
94
+ TEST(SM80_Device_Herk_cf64n_cf64n_u_tensor_op_f64, 32x32x16_16x16x16) {
95
+
96
+ using ElementA = cutlass::complex<double>;
97
+ using LayoutA = cutlass::layout::ColumnMajor;
98
+
99
+ using ElementC = cutlass::complex<double>;
100
+ using LayoutC = cutlass::layout::ColumnMajor;
101
+ using ElementAccumulator = cutlass::complex<double>;
102
+
103
+ using RankK = cutlass::gemm::device::RankK<
104
+ ElementA,
105
+ LayoutA,
106
+ ElementC,
107
+ LayoutC,
108
+ cutlass::FillMode::kUpper,
109
+ ElementAccumulator,
110
+ cutlass::arch::OpClassTensorOp,
111
+ cutlass::arch::Sm80,
112
+ cutlass::gemm::GemmShape<32, 32, 16>,
113
+ cutlass::gemm::GemmShape<16, 16, 16>,
114
+ cutlass::gemm::GemmShape<8, 8, 4>,
115
+ cutlass::epilogue::thread::LinearCombination<
116
+ ElementC,
117
+ 1,
118
+ ElementAccumulator,
119
+ ElementAccumulator
120
+ >,
121
+ cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>,
122
+ 4, // kStages
123
+ 1, // AlignmentA
124
+ false, // SplitKSerial
125
+ cutlass::arch::OpMultiplyAddComplex,
126
+ cutlass::ComplexTransform::kNone,
127
+ cutlass::BlasMode::kHermitian
128
+ >;
129
+
130
+ EXPECT_TRUE(test::gemm::device::TestAllRankKUniversal<RankK>());
131
+ }
132
+
133
+ /////////////////////////////////////////////////////////////////////////////////////////////////
134
+ // HERK operator on CUBLAS_OP_C (row-major + conj) input layouts
135
+ TEST(SM80_Device_Herk_cf64h_cf64n_l_tensor_op_f64, 64x64x16_32x32x16) {
136
+
137
+ using ElementA = cutlass::complex<double>;
138
+ using LayoutA = cutlass::layout::RowMajor;
139
+
140
+ using ElementC = cutlass::complex<double>;
141
+ using LayoutC = cutlass::layout::ColumnMajor;
142
+ using ElementAccumulator = cutlass::complex<double>;
143
+
144
+ using RankK = cutlass::gemm::device::RankK<
145
+ ElementA,
146
+ LayoutA,
147
+ ElementC,
148
+ LayoutC,
149
+ cutlass::FillMode::kLower,
150
+ ElementAccumulator,
151
+ cutlass::arch::OpClassTensorOp,
152
+ cutlass::arch::Sm80,
153
+ cutlass::gemm::GemmShape<32, 32, 16>,
154
+ cutlass::gemm::GemmShape<16, 16, 16>,
155
+ cutlass::gemm::GemmShape<8, 8, 4>,
156
+ cutlass::epilogue::thread::LinearCombination<
157
+ ElementC,
158
+ 1,
159
+ ElementAccumulator,
160
+ ElementAccumulator
161
+ >,
162
+ cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>,
163
+ 4, // kStages
164
+ 1, // AlignmentA
165
+ false, // SplitKSerial
166
+ cutlass::arch::OpMultiplyAddComplex,
167
+ cutlass::ComplexTransform::kConjugate,
168
+ cutlass::BlasMode::kHermitian
169
+ >;
170
+
171
+ EXPECT_TRUE(test::gemm::device::TestAllRankKUniversal<RankK>());
172
+ }
173
+ /////////////////////////////////////////////////////////////////////////////////////////////////
174
+
175
+ #endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED)
sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/cutlass/test/unit/gemm/device/sm90_gemm_f16_f16_f16_tensor_op_f32_cluster_warpspecialized_cooperative_dag.cu ADDED
@@ -0,0 +1,170 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ * SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice, this
9
+ * list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holder nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
+ *
30
+ **************************************************************************************************/
31
+ /*! \file
32
+ \brief Tests for Sm90 f16_f16_f16 cooperative DAG epilogue
33
+ EVTDAG: D = beta * C + Graph(relu(alpha * acc + aux) + aux)
34
+ DAGEVT: EVT = alpha * acc + C, D = Graph(maximum(EVT + per-row bias, EVT))
35
+ */
36
+
37
+ #include <iostream>
38
+
39
+ #include "cutlass/cutlass.h"
40
+ #include "cute/tensor.hpp"
41
+ #include "cute/atom/mma_atom.hpp"
42
+
43
+ #include "cutlass/numeric_types.h"
44
+
45
+ #include "cutlass/gemm/device/gemm_universal_adapter.h"
46
+ #include "cutlass/gemm/kernel/gemm_universal.hpp"
47
+ #include "cutlass/epilogue/collective/collective_builder.hpp"
48
+ #include "cutlass/gemm/collective/collective_builder.hpp"
49
+ #include "cutlass/epilogue/collective/sm70_epilogue_vectorized.hpp"
50
+ #include "cutlass/epilogue/collective/default_epilogue.hpp"
51
+ #include "cutlass/epilogue/thread/linear_combination.h"
52
+ #include "cutlass/epilogue/thread/linear_combination_bias_elementwise.h"
53
+
54
+ #include "../../common/cutlass_unit_test.h"
55
+
56
+ #include "gemm_testbed_3x_evt.hpp"
57
+ #include "sm90_evt_operations.hpp"
58
+
59
+
60
+ #if defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED)
61
+
62
+ using namespace cute;
63
+
64
+
65
+ TEST(SM90_Device_Gemm_f16t_f16n_f32t_tensor_op_gmma_f32_cooperative_epilogue, 256x128x64_2x2x1_EVTDAG) {
66
+ using LayoutA = cutlass::layout::RowMajor;
67
+ using LayoutB = cutlass::layout::ColumnMajor;
68
+ using LayoutC = cutlass::layout::RowMajor;
69
+ using TileShape_MNK = Shape<_256,_128,_64>;
70
+ using ClusterShape_MNK = Shape<_2,_2,_1>;
71
+
72
+ using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecializedCooperative;
73
+ using EpilogueTileType = cutlass::epilogue::collective::EpilogueTileAuto;
74
+
75
+ using EpilogueDescriptor = cutlass::epilogue::collective::detail::EpilogueDescriptor<
76
+ TileShape_MNK, EpilogueTileType, cutlass::half_t, cutlass::half_t, EpilogueSchedule>;
77
+
78
+ using AuxLoadDescriptor = cutlass::epilogue::collective::detail::AuxLoadDescriptor<
79
+ EpilogueDescriptor, cutlass::layout::RowMajor, cutlass::half_t>;
80
+
81
+ using FusionCallbacks = cutlass::epilogue::fusion::Sm90LinCombEVTDAG<
82
+ EpilogueDescriptor, AuxLoadDescriptor, cutlass::half_t, float, float>;
83
+
84
+ using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder<
85
+ cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
86
+ TileShape_MNK, ClusterShape_MNK,
87
+ EpilogueTileType,
88
+ float, float,
89
+ cutlass::half_t, LayoutC, 8,
90
+ cutlass::half_t, LayoutC, 8,
91
+ EpilogueSchedule,
92
+ FusionCallbacks
93
+ >::CollectiveOp;
94
+
95
+ using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder<
96
+ cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
97
+ cutlass::half_t, LayoutA, 8,
98
+ cutlass::half_t, LayoutB, 8,
99
+ float,
100
+ TileShape_MNK, ClusterShape_MNK,
101
+ cutlass::gemm::collective::StageCountAutoCarveout<static_cast<int>(sizeof(typename CollectiveEpilogue::SharedStorage))>,
102
+ cutlass::gemm::KernelTmaWarpSpecializedCooperative
103
+ >::CollectiveOp;
104
+
105
+ using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
106
+ Shape<int,int,int,int>,
107
+ CollectiveMainloop,
108
+ CollectiveEpilogue
109
+ >;
110
+
111
+ using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
112
+
113
+ // Host reference
114
+ bool passed = test::gemm::device::TestAllEVT<Gemm, test::gemm::device::HostEVTDAG<Gemm>>();
115
+ EXPECT_TRUE(passed);
116
+ }
117
+
118
+ TEST(SM90_Device_Gemm_f16t_f16n_f32t_tensor_op_gmma_f32_cooperative_epilogue, 128x128x64_2x2x1_DAGEVT) {
119
+ using LayoutA = cutlass::layout::RowMajor;
120
+ using LayoutB = cutlass::layout::ColumnMajor;
121
+ using LayoutC = cutlass::layout::RowMajor;
122
+ using TileShape_MNK = Shape<_256,_128,_64>;
123
+ using ClusterShape_MNK = Shape<_2,_2,_1>;
124
+
125
+ using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecializedCooperative;
126
+ using EpilogueTileType = cutlass::epilogue::collective::EpilogueTileAuto;
127
+
128
+ using EpilogueDescriptor = cutlass::epilogue::collective::detail::EpilogueDescriptor<
129
+ TileShape_MNK, EpilogueTileType, cutlass::half_t, cutlass::half_t, EpilogueSchedule>;
130
+
131
+ using AuxStoreDescriptor = cutlass::epilogue::collective::detail::AuxStoreDescriptor<
132
+ EpilogueDescriptor, cutlass::layout::RowMajor, cutlass::half_t>;
133
+
134
+ using FusionCallbacks = cutlass::epilogue::fusion::Sm90LinCombDAGEVT<
135
+ EpilogueDescriptor, AuxStoreDescriptor, cutlass::half_t, float, cutlass::half_t, float>;
136
+
137
+ using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder<
138
+ cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
139
+ TileShape_MNK, ClusterShape_MNK,
140
+ EpilogueTileType,
141
+ float, float,
142
+ cutlass::half_t, LayoutC, 8,
143
+ cutlass::half_t, LayoutC, 8,
144
+ EpilogueSchedule,
145
+ FusionCallbacks
146
+ >::CollectiveOp;
147
+
148
+ using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder<
149
+ cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
150
+ cutlass::half_t, LayoutA, 8,
151
+ cutlass::half_t, LayoutB, 8,
152
+ float,
153
+ TileShape_MNK, ClusterShape_MNK,
154
+ cutlass::gemm::collective::StageCountAutoCarveout<static_cast<int>(sizeof(typename CollectiveEpilogue::SharedStorage))>,
155
+ cutlass::gemm::KernelTmaWarpSpecializedCooperative
156
+ >::CollectiveOp;
157
+
158
+ using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
159
+ Shape<int,int,int,int>,
160
+ CollectiveMainloop,
161
+ CollectiveEpilogue
162
+ >;
163
+
164
+ using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
165
+
166
+ // Host reference
167
+ bool passed = test::gemm::device::TestAllEVT<Gemm, test::gemm::device::HostDAGEVT<Gemm>>();
168
+ EXPECT_TRUE(passed);
169
+ }
170
+ #endif // defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED)
sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/cutlass/test/unit/gemm/device/sm90_gemm_tf32_tf32_f32_alignx_tensor_op_f32_warpspecialized.cu ADDED
@@ -0,0 +1,167 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ * SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice, this
9
+ * list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holder nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
+ *
30
+ **************************************************************************************************/
31
+ /*! \file
32
+ \brief Tests for device-wide GEMM interface
33
+ */
34
+
35
+ #include <iostream>
36
+
37
+ #include "cutlass/cutlass.h"
38
+ #include "cute/tensor.hpp"
39
+ #include "cute/atom/mma_atom.hpp"
40
+
41
+ #include "cutlass/numeric_types.h"
42
+
43
+ #include "cutlass/gemm/device/gemm_universal_adapter.h"
44
+ #include "cutlass/gemm/kernel/gemm_universal.hpp"
45
+ #include "cutlass/gemm/collective/collective_builder.hpp"
46
+ #include "cutlass/epilogue/collective/collective_builder.hpp"
47
+ #include "cutlass/epilogue/collective/default_epilogue.hpp"
48
+ #include "cutlass/epilogue/thread/linear_combination.h"
49
+
50
+ #include "../../common/cutlass_unit_test.h"
51
+
52
+ #include "gemm_testbed_3x.hpp"
53
+
54
+ #if defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED)
55
+
56
+ using namespace cute;
57
+
58
+ ///////////////////////////////////////////////////////////////////////////////
59
+
60
+ TEST(SM90_Device_Gemm_tf32t_tf32n_f32n_align4_tensor_op_gmma_f32_warpspecialized, 128x64x32) {
61
+ using LayoutA = cutlass::layout::RowMajor;
62
+ using LayoutB = cutlass::layout::ColumnMajor;
63
+ using LayoutC = cutlass::layout::ColumnMajor;
64
+
65
+ using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder<
66
+ cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
67
+ tfloat32_t, LayoutA, 4,
68
+ tfloat32_t, LayoutB, 4,
69
+ float,
70
+ Shape<_128,_64,_32>, Shape<_1,_1,_1>,
71
+ cutlass::gemm::collective::StageCountAuto,
72
+ cutlass::gemm::KernelCpAsyncWarpSpecialized
73
+ >::CollectiveOp;
74
+
75
+ using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder<
76
+ cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
77
+ Shape<_128,_64,_32>, Shape<_1,_1,_1>,
78
+ cutlass::epilogue::collective::EpilogueTileAuto,
79
+ float, float,
80
+ float, LayoutC, 4,
81
+ float, LayoutC, 4,
82
+ cutlass::epilogue::NoSmemWarpSpecialized
83
+ >::CollectiveOp;
84
+
85
+ using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
86
+ Shape<int,int,int,int>,
87
+ CollectiveOp,
88
+ CollectiveEpilogue
89
+ >;
90
+
91
+ using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
92
+ EXPECT_TRUE(test::gemm::device::TestAll<Gemm>());
93
+ }
94
+
95
+ TEST(SM90_Device_Gemm_tf32t_tf32n_f32n_align2_tensor_op_gmma_f32_warpspecialized, 128x64x32) {
96
+ using LayoutA = cutlass::layout::RowMajor;
97
+ using LayoutB = cutlass::layout::ColumnMajor;
98
+ using LayoutC = cutlass::layout::ColumnMajor;
99
+
100
+ using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder<
101
+ cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
102
+ cutlass::tfloat32_t, LayoutA, 2,
103
+ cutlass::tfloat32_t, LayoutB, 2,
104
+ float,
105
+ Shape<_128,_64,_32>, Shape<_1,_1,_1>,
106
+ cutlass::gemm::collective::StageCountAuto,
107
+ cutlass::gemm::KernelCpAsyncWarpSpecialized
108
+ >::CollectiveOp;
109
+
110
+ using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder<
111
+ cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
112
+ Shape<_128,_64,_32>, Shape<_1,_1,_1>,
113
+ cutlass::epilogue::collective::EpilogueTileAuto,
114
+ float, float,
115
+ float, LayoutC, 2,
116
+ float, LayoutC, 2,
117
+ cutlass::epilogue::collective::EpilogueScheduleAuto
118
+ >::CollectiveOp;
119
+
120
+ using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
121
+ Shape<int,int,int,int>,
122
+ CollectiveOp,
123
+ CollectiveEpilogue
124
+ >;
125
+
126
+ using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
127
+ EXPECT_TRUE(test::gemm::device::TestAll<Gemm>());
128
+ }
129
+
130
+ TEST(SM90_Device_Gemm_tf32t_tf32n_f32n_align1_tensor_op_gmma_f32_warpspecialized, 128x64x32) {
131
+ using LayoutA = cutlass::layout::RowMajor;
132
+ using LayoutB = cutlass::layout::ColumnMajor;
133
+ using LayoutC = cutlass::layout::ColumnMajor;
134
+
135
+ using CollectiveOp = typename cutlass::gemm::collective::CollectiveBuilder<
136
+ cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
137
+ cutlass::tfloat32_t, LayoutA, 1,
138
+ cutlass::tfloat32_t, LayoutB, 1,
139
+ float,
140
+ Shape<_128,_64,_32>, Shape<_1,_1,_1>,
141
+ cutlass::gemm::collective::StageCountAuto,
142
+ cutlass::gemm::KernelCpAsyncWarpSpecialized
143
+ >::CollectiveOp;
144
+
145
+ using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder<
146
+ cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
147
+ Shape<_128,_64,_32>, Shape<_1,_1,_1>,
148
+ cutlass::epilogue::collective::EpilogueTileAuto,
149
+ float, float,
150
+ float, LayoutC, 1,
151
+ float, LayoutC, 1,
152
+ cutlass::epilogue::collective::EpilogueScheduleAuto
153
+ >::CollectiveOp;
154
+
155
+ using GemmKernel = cutlass::gemm::kernel::GemmUniversal<
156
+ Shape<int,int,int,int>,
157
+ CollectiveOp,
158
+ CollectiveEpilogue
159
+ >;
160
+
161
+ using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
162
+ EXPECT_TRUE(test::gemm::device::TestAll<Gemm>());
163
+ }
164
+
165
+ ///////////////////////////////////////////////////////////////////////////////
166
+
167
+ #endif // defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED)
sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/cutlass/test/unit/gemm/device/syr2k_cf64t_cf64n_tensor_op_f64_grouped_sm80.cu ADDED
@@ -0,0 +1,168 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ * SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice, this
9
+ * list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holder nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
+ *
30
+ **************************************************************************************************/
31
+ /*! \file
32
+ \brief Tests for grouped Rank2K interface
33
+ */
34
+
35
+ #include <iostream>
36
+
37
+ #include "../../common/cutlass_unit_test.h"
38
+ #include "cutlass/cutlass.h"
39
+
40
+ #include "cutlass/blas3.h"
41
+ #include "cutlass/gemm/gemm.h"
42
+ #include "cutlass/gemm/kernel/rank_2k_grouped.h"
43
+ #include "cutlass/gemm/kernel/default_rank_2k_grouped.h"
44
+ #include "cutlass/gemm/device/rank_2k_grouped.h"
45
+
46
+ #include "cutlass/util/host_tensor.h"
47
+ #include "cutlass/util/reference/host/gemm.h"
48
+ #include "cutlass/util/reference/host/tensor_compare.h"
49
+ #include "cutlass/util/reference/host/tensor_copy.h"
50
+ #include "cutlass/util/reference/host/tensor_fill.h"
51
+ #include "cutlass/util/tensor_view_io.h"
52
+
53
+ #include "testbed_grouped_rank_2k.h"
54
+
55
+ /////////////////////////////////////////////////////////////////////////////////////////////////
56
+
57
+ #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED)
58
+
59
+ /////////////////////////////////////////////////////////////////////////////////////////////////
60
+
61
+ TEST(SM80_Device_Syr2kGrouped_cf64n_cf64n_l_tensor_op_f64, 32x32x16_16x16x16) {
62
+
63
+ using ElementA = cutlass::complex<double>;
64
+ using LayoutA = cutlass::layout::RowMajor;
65
+ using ElementB = cutlass::complex<double>;
66
+ using LayoutB = cutlass::layout::RowMajor;
67
+ using ElementC = cutlass::complex<double>;
68
+ using LayoutC = cutlass::layout::ColumnMajor;
69
+ using ElementAccumulator = cutlass::complex<double>;
70
+
71
+ using Rank2Kkernel = typename cutlass::gemm::kernel::DefaultRank2KGrouped<
72
+ ElementA, LayoutA, cutlass::ComplexTransform::kNone, 1,
73
+ ElementB, LayoutB, cutlass::ComplexTransform::kNone, 1,
74
+ ElementC, LayoutC, cutlass::FillMode::kLower,
75
+ ElementAccumulator,
76
+ cutlass::arch::OpClassTensorOp,
77
+ cutlass::arch::Sm80,
78
+ cutlass::gemm::GemmShape<32, 32, 16>,
79
+ cutlass::gemm::GemmShape<16, 16, 16>,
80
+ cutlass::gemm::GemmShape<8, 8, 4>,
81
+ cutlass::epilogue::thread::LinearCombination<ElementC, 1, ElementAccumulator, ElementAccumulator>,
82
+ cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>,
83
+ 3, // kStages
84
+ cutlass::arch::OpMultiplyAddComplex,
85
+ cutlass::BlasMode::kSymmetric>::Rank2Kkernel;
86
+
87
+ using Rank2K = cutlass::gemm::device::Rank2KGrouped<Rank2Kkernel>;
88
+
89
+ test::gemm::device::TestbedGrouped<Rank2K> testbed;
90
+ bool passed = testbed.run(24);
91
+ EXPECT_TRUE(passed);
92
+ }
93
+
94
+ /////////////////////////////////////////////////////////////////////////////////////////////////
95
+
96
+ TEST(SM80_Device_Syr2kGrouped_cf64n_cf64n_l_tensor_op_f64, 64x64x16_32x32x16) {
97
+
98
+ using ElementA = cutlass::complex<double>;
99
+ using LayoutA = cutlass::layout::RowMajor;
100
+ using ElementB = cutlass::complex<double>;
101
+ using LayoutB = cutlass::layout::RowMajor;
102
+ using ElementC = cutlass::complex<double>;
103
+ using LayoutC = cutlass::layout::ColumnMajor;
104
+ using ElementAccumulator = cutlass::complex<double>;
105
+
106
+ using Rank2Kkernel = typename cutlass::gemm::kernel::DefaultRank2KGrouped<
107
+ ElementA, LayoutA, cutlass::ComplexTransform::kNone, 1,
108
+ ElementB, LayoutB, cutlass::ComplexTransform::kNone, 1,
109
+ ElementC, LayoutC, cutlass::FillMode::kLower,
110
+ ElementAccumulator,
111
+ cutlass::arch::OpClassTensorOp,
112
+ cutlass::arch::Sm80,
113
+ cutlass::gemm::GemmShape<64, 64, 16>,
114
+ cutlass::gemm::GemmShape<32, 32, 16>,
115
+ cutlass::gemm::GemmShape<8, 8, 4>,
116
+ cutlass::epilogue::thread::LinearCombination<ElementC, 1, ElementAccumulator, ElementAccumulator>,
117
+ cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>,
118
+ 3, // kStages
119
+ cutlass::arch::OpMultiplyAddComplex,
120
+ cutlass::BlasMode::kSymmetric>::Rank2Kkernel;
121
+
122
+ using Rank2K = cutlass::gemm::device::Rank2KGrouped<Rank2Kkernel>;
123
+
124
+ test::gemm::device::TestbedGrouped<Rank2K> testbed;
125
+ bool passed = testbed.run(24);
126
+ EXPECT_TRUE(passed);
127
+ }
128
+
129
+ /////////////////////////////////////////////////////////////////////////////////////////////////
130
+
131
+ TEST(SM80_Device_Syr2kGrouped_cf64n_cf64n_u_tensor_op_f64, 32x32x16_16x16x16) {
132
+
133
+ using ElementA = cutlass::complex<double>;
134
+ using LayoutA = cutlass::layout::RowMajor;
135
+ using ElementB = cutlass::complex<double>;
136
+ using LayoutB = cutlass::layout::RowMajor;
137
+ using ElementC = cutlass::complex<double>;
138
+ using LayoutC = cutlass::layout::ColumnMajor;
139
+ using ElementAccumulator = cutlass::complex<double>;
140
+
141
+ using Rank2Kkernel = typename cutlass::gemm::kernel::DefaultRank2KGrouped<
142
+ ElementA, LayoutA, cutlass::ComplexTransform::kNone, 1,
143
+ ElementB, LayoutB, cutlass::ComplexTransform::kNone, 1,
144
+ ElementC, LayoutC, cutlass::FillMode::kUpper,
145
+ ElementAccumulator,
146
+ cutlass::arch::OpClassTensorOp,
147
+ cutlass::arch::Sm80,
148
+ cutlass::gemm::GemmShape<32, 32, 16>,
149
+ cutlass::gemm::GemmShape<16, 16, 16>,
150
+ cutlass::gemm::GemmShape<8, 8, 4>,
151
+ cutlass::epilogue::thread::LinearCombination<ElementC, 1, ElementAccumulator, ElementAccumulator>,
152
+ cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>,
153
+ 3, // kStages
154
+ cutlass::arch::OpMultiplyAddComplex,
155
+ cutlass::BlasMode::kSymmetric>::Rank2Kkernel;
156
+
157
+ using Rank2K = cutlass::gemm::device::Rank2KGrouped<Rank2Kkernel>;
158
+
159
+ test::gemm::device::TestbedGrouped<Rank2K> testbed;
160
+ bool passed = testbed.run(24);
161
+ EXPECT_TRUE(passed);
162
+ }
163
+
164
+ /////////////////////////////////////////////////////////////////////////////////////////////////
165
+
166
+ #endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED)
167
+
168
+ /////////////////////////////////////////////////////////////////////////////////////////////////
sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/cutlass/test/unit/gemm/device/trmm_f64_f64_f64_tensor_op_f64_sm90.cu ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ * SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice, this
9
+ * list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holder nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
+ *
30
+ **************************************************************************************************/
31
+ /*! \file
32
+ \brief Tests for device-wide TRMM interface
33
+
34
+
35
+ */
36
+
37
+ #include <iostream>
38
+
39
+ #include "../../common/cutlass_unit_test.h"
40
+ #include "cutlass/blas3.h"
41
+ #include "cutlass/gemm/device/trmm.h"
42
+ #include "cutlass/util/host_tensor.h"
43
+ #include "cutlass/util/reference/host/trmm.h"
44
+ #include "cutlass/util/reference/host/tensor_compare.h"
45
+ #include "cutlass/util/reference/host/tensor_copy.h"
46
+ #include "cutlass/util/reference/host/tensor_fill.h"
47
+ #include "cutlass/util/tensor_view_io.h"
48
+
49
+ #include "testbed_trmm_universal.h"
50
+
51
+ #if defined(CUTLASS_ARCH_MMA_SM90_F64_MMA_ENABLED)
52
+ /////////////////////////////////////////////////////////////////////////////////////////////////
53
+
54
+ TEST(SM90_Device_Trmm_f64n_f64n_f64t_rs_l_nu_tensor_op_f64, 32x32x16_16x16x16) {
55
+
56
+ using ElementOutput = double;
57
+ using ElementAccumulator = double;
58
+
59
+ using Trmm = cutlass::gemm::device::Trmm<
60
+ double,
61
+ cutlass::layout::ColumnMajor,
62
+ cutlass::SideMode::kRight,
63
+ cutlass::FillMode::kLower,
64
+ cutlass::DiagType::kNonUnit,
65
+ double,
66
+ cutlass::layout::ColumnMajor,
67
+ ElementOutput,
68
+ cutlass::layout::RowMajor,
69
+ ElementAccumulator,
70
+ cutlass::arch::OpClassTensorOp,
71
+ cutlass::arch::Sm90,
72
+ cutlass::gemm::GemmShape<32, 32, 16>,
73
+ cutlass::gemm::GemmShape<16, 16, 16>,
74
+ cutlass::gemm::GemmShape<16, 8, 4>,
75
+ cutlass::epilogue::thread::LinearCombination<
76
+ ElementOutput,
77
+ 1,
78
+ ElementAccumulator,
79
+ ElementAccumulator
80
+ >,
81
+ cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>,
82
+ 4
83
+ >;
84
+
85
+ EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal<Trmm>());
86
+ }
87
+
88
+ /////////////////////////////////////////////////////////////////////////////////////////////////
89
+
90
+ TEST(SM90_Device_Trmm_f64t_f64t_f64n_rs_l_nu_tensor_op_f64, 64x64x16_32x32x16) {
91
+
92
+ using ElementOutput = double;
93
+ using ElementAccumulator = double;
94
+
95
+ using Trmm = cutlass::gemm::device::Trmm<
96
+ double,
97
+ cutlass::layout::RowMajor,
98
+ cutlass::SideMode::kRight,
99
+ cutlass::FillMode::kLower,
100
+ cutlass::DiagType::kNonUnit,
101
+ double,
102
+ cutlass::layout::RowMajor,
103
+ ElementOutput,
104
+ cutlass::layout::ColumnMajor,
105
+ ElementAccumulator,
106
+ cutlass::arch::OpClassTensorOp,
107
+ cutlass::arch::Sm90,
108
+ cutlass::gemm::GemmShape<64, 64, 16>,
109
+ cutlass::gemm::GemmShape<32, 32, 16>,
110
+ cutlass::gemm::GemmShape<16, 8, 4>,
111
+ cutlass::epilogue::thread::LinearCombination<
112
+ ElementOutput,
113
+ 1,
114
+ ElementAccumulator,
115
+ ElementAccumulator
116
+ >,
117
+ cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>,
118
+ 4
119
+ >;
120
+
121
+ EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal<Trmm>());
122
+ }
123
+
124
+ /////////////////////////////////////////////////////////////////////////////////////////////////
125
+
126
+ #endif // #if defined(CUTLASS_ARCH_MMA_SM90_F64_MMA_ENABLED)
sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/cutlass/test/unit/gemm/device/trmm_f64n_f64n_f64t_tensor_op_f64_ls_sm80.cu ADDED
@@ -0,0 +1,414 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ * SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice, this
9
+ * list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holder nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
+ *
30
+ **************************************************************************************************/
31
+ /*! \file
32
+ \brief Tests for device-wide TRMM interface
33
+
34
+
35
+ */
36
+
37
+ #include <iostream>
38
+
39
+ #include "../../common/cutlass_unit_test.h"
40
+ #include "cutlass/blas3.h"
41
+ #include "cutlass/gemm/device/trmm.h"
42
+ #include "cutlass/util/host_tensor.h"
43
+ #include "cutlass/util/reference/host/trmm.h"
44
+ #include "cutlass/util/reference/host/tensor_compare.h"
45
+ #include "cutlass/util/reference/host/tensor_copy.h"
46
+ #include "cutlass/util/reference/host/tensor_fill.h"
47
+ #include "cutlass/util/tensor_view_io.h"
48
+
49
+ #include "testbed_trmm_universal.h"
50
+
51
+ #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED)
52
+
53
+ /////////////////////////////////////////////////////////////////////////////////////////////////
54
+
55
+ TEST(SM80_Device_Trmm_f64n_f64n_f64t_ls_l_nu_tensor_op_f64, 32x32x16_16x16x16) {
56
+
57
+ using ElementOutput = double;
58
+ using ElementAccumulator = double;
59
+
60
+ using Trmm = cutlass::gemm::device::Trmm<
61
+ double,
62
+ cutlass::layout::ColumnMajor,
63
+ cutlass::SideMode::kLeft,
64
+ cutlass::FillMode::kLower,
65
+ cutlass::DiagType::kNonUnit,
66
+ double,
67
+ cutlass::layout::ColumnMajor,
68
+ ElementOutput,
69
+ cutlass::layout::RowMajor,
70
+ ElementAccumulator,
71
+ cutlass::arch::OpClassTensorOp,
72
+ cutlass::arch::Sm80,
73
+ cutlass::gemm::GemmShape<32, 32, 16>,
74
+ cutlass::gemm::GemmShape<16, 16, 16>,
75
+ cutlass::gemm::GemmShape<8, 8, 4>,
76
+ cutlass::epilogue::thread::LinearCombination<
77
+ ElementOutput,
78
+ 1,
79
+ ElementAccumulator,
80
+ ElementAccumulator
81
+ >,
82
+ cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>,
83
+ 4
84
+ >;
85
+
86
+ EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal<Trmm>());
87
+ }
88
+
89
+ /////////////////////////////////////////////////////////////////////////////////////////////////
90
+
91
+ TEST(SM80_Device_Trmm_f64n_f64n_f64t_ls_l_nu_tensor_op_f64, 64x64x16_32x32x16) {
92
+
93
+ using ElementOutput = double;
94
+ using ElementAccumulator = double;
95
+
96
+ using Trmm = cutlass::gemm::device::Trmm<
97
+ double,
98
+ cutlass::layout::ColumnMajor,
99
+ cutlass::SideMode::kLeft,
100
+ cutlass::FillMode::kLower,
101
+ cutlass::DiagType::kNonUnit,
102
+ double,
103
+ cutlass::layout::ColumnMajor,
104
+ ElementOutput,
105
+ cutlass::layout::RowMajor,
106
+ ElementAccumulator,
107
+ cutlass::arch::OpClassTensorOp,
108
+ cutlass::arch::Sm80,
109
+ cutlass::gemm::GemmShape<64, 64, 16>,
110
+ cutlass::gemm::GemmShape<32, 32, 16>,
111
+ cutlass::gemm::GemmShape<8, 8, 4>,
112
+ cutlass::epilogue::thread::LinearCombination<
113
+ ElementOutput,
114
+ 1,
115
+ ElementAccumulator,
116
+ ElementAccumulator
117
+ >,
118
+ cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>,
119
+ 4
120
+ >;
121
+
122
+ EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal<Trmm>());
123
+ }
124
+
125
+ /////////////////////////////////////////////////////////////////////////////////////////////////
126
+
127
+ TEST(SM80_Device_Trmm_f64n_f64n_f64t_ls_l_nu_tensor_op_f64, 128x64x16_64x32x16) {
128
+
129
+ using ElementOutput = double;
130
+ using ElementAccumulator = double;
131
+
132
+ using Trmm = cutlass::gemm::device::Trmm<
133
+ double,
134
+ cutlass::layout::ColumnMajor,
135
+ cutlass::SideMode::kLeft,
136
+ cutlass::FillMode::kLower,
137
+ cutlass::DiagType::kNonUnit,
138
+ double,
139
+ cutlass::layout::ColumnMajor,
140
+ ElementOutput,
141
+ cutlass::layout::RowMajor,
142
+ ElementAccumulator,
143
+ cutlass::arch::OpClassTensorOp,
144
+ cutlass::arch::Sm80,
145
+ cutlass::gemm::GemmShape<128, 64, 16>,
146
+ cutlass::gemm::GemmShape<64, 32, 16>,
147
+ cutlass::gemm::GemmShape<8, 8, 4>,
148
+ cutlass::epilogue::thread::LinearCombination<
149
+ ElementOutput,
150
+ 1,
151
+ ElementAccumulator,
152
+ ElementAccumulator
153
+ >,
154
+ cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>,
155
+ 4
156
+ >;
157
+
158
+ EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal<Trmm>());
159
+ }
160
+
161
+ /////////////////////////////////////////////////////////////////////////////////////////////////
162
+
163
+ TEST(SM80_Device_Trmm_f64n_f64n_f64t_ls_l_nu_tensor_op_f64, 64x128x16_32x64x16) {
164
+
165
+ using ElementOutput = double;
166
+ using ElementAccumulator = double;
167
+
168
+ using Trmm = cutlass::gemm::device::Trmm<
169
+ double,
170
+ cutlass::layout::ColumnMajor,
171
+ cutlass::SideMode::kLeft,
172
+ cutlass::FillMode::kLower,
173
+ cutlass::DiagType::kNonUnit,
174
+ double,
175
+ cutlass::layout::ColumnMajor,
176
+ ElementOutput,
177
+ cutlass::layout::RowMajor,
178
+ ElementAccumulator,
179
+ cutlass::arch::OpClassTensorOp,
180
+ cutlass::arch::Sm80,
181
+ cutlass::gemm::GemmShape<64, 128, 16>,
182
+ cutlass::gemm::GemmShape<32, 64, 16>,
183
+ cutlass::gemm::GemmShape<8, 8, 4>,
184
+ cutlass::epilogue::thread::LinearCombination<
185
+ ElementOutput,
186
+ 1,
187
+ ElementAccumulator,
188
+ ElementAccumulator
189
+ >,
190
+ cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>,
191
+ 3
192
+ >;
193
+
194
+ EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal<Trmm>());
195
+ }
196
+
197
+ /////////////////////////////////////////////////////////////////////////////////////////////////
198
+
199
+ TEST(SM80_Device_Trmm_f64n_f64n_f64t_ls_l_nu_tensor_op_f64, 128x128x16_32x64x16) {
200
+
201
+ using ElementOutput = double;
202
+ using ElementAccumulator = double;
203
+
204
+ using Trmm = cutlass::gemm::device::Trmm<
205
+ double,
206
+ cutlass::layout::ColumnMajor,
207
+ cutlass::SideMode::kLeft,
208
+ cutlass::FillMode::kLower,
209
+ cutlass::DiagType::kNonUnit,
210
+ double,
211
+ cutlass::layout::ColumnMajor,
212
+ ElementOutput,
213
+ cutlass::layout::RowMajor,
214
+ ElementAccumulator,
215
+ cutlass::arch::OpClassTensorOp,
216
+ cutlass::arch::Sm80,
217
+ cutlass::gemm::GemmShape<128, 128, 16>,
218
+ cutlass::gemm::GemmShape<32, 64, 16>,
219
+ cutlass::gemm::GemmShape<8, 8, 4>,
220
+ cutlass::epilogue::thread::LinearCombination<
221
+ ElementOutput,
222
+ 1,
223
+ ElementAccumulator,
224
+ ElementAccumulator
225
+ >,
226
+ cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>,
227
+ 3
228
+ >;
229
+
230
+ EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal<Trmm>());
231
+ }
232
+ /////////////////////////////////////////////////////////////////////////////////////////////////
233
+
234
+ TEST(SM80_Device_Trmm_f64n_f64n_f64t_ls_u_nu_tensor_op_f64, 32x32x16_16x16x16) {
235
+
236
+ using ElementOutput = double;
237
+ using ElementAccumulator = double;
238
+
239
+ using Trmm = cutlass::gemm::device::Trmm<
240
+ double,
241
+ cutlass::layout::ColumnMajor,
242
+ cutlass::SideMode::kLeft,
243
+ cutlass::FillMode::kUpper,
244
+ cutlass::DiagType::kNonUnit,
245
+ double,
246
+ cutlass::layout::ColumnMajor,
247
+ ElementOutput,
248
+ cutlass::layout::RowMajor,
249
+ ElementAccumulator,
250
+ cutlass::arch::OpClassTensorOp,
251
+ cutlass::arch::Sm80,
252
+ cutlass::gemm::GemmShape<32, 32, 16>,
253
+ cutlass::gemm::GemmShape<16, 16, 16>,
254
+ cutlass::gemm::GemmShape<8, 8, 4>,
255
+ cutlass::epilogue::thread::LinearCombination<
256
+ ElementOutput,
257
+ 1,
258
+ ElementAccumulator,
259
+ ElementAccumulator
260
+ >,
261
+ cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>,
262
+ 4
263
+ >;
264
+
265
+ EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal<Trmm>());
266
+ }
267
+
268
+ /////////////////////////////////////////////////////////////////////////////////////////////////
269
+
270
+ TEST(SM80_Device_Trmm_f64n_f64n_f64t_ls_u_nu_tensor_op_f64, 64x64x16_32x32x16) {
271
+
272
+ using ElementOutput = double;
273
+ using ElementAccumulator = double;
274
+
275
+ using Trmm = cutlass::gemm::device::Trmm<
276
+ double,
277
+ cutlass::layout::ColumnMajor,
278
+ cutlass::SideMode::kLeft,
279
+ cutlass::FillMode::kUpper,
280
+ cutlass::DiagType::kNonUnit,
281
+ double,
282
+ cutlass::layout::ColumnMajor,
283
+ ElementOutput,
284
+ cutlass::layout::RowMajor,
285
+ ElementAccumulator,
286
+ cutlass::arch::OpClassTensorOp,
287
+ cutlass::arch::Sm80,
288
+ cutlass::gemm::GemmShape<64, 64, 16>,
289
+ cutlass::gemm::GemmShape<32, 32, 16>,
290
+ cutlass::gemm::GemmShape<8, 8, 4>,
291
+ cutlass::epilogue::thread::LinearCombination<
292
+ ElementOutput,
293
+ 1,
294
+ ElementAccumulator,
295
+ ElementAccumulator
296
+ >,
297
+ cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>,
298
+ 4
299
+ >;
300
+
301
+ EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal<Trmm>());
302
+ }
303
+
304
+ /////////////////////////////////////////////////////////////////////////////////////////////////
305
+
306
+ TEST(SM80_Device_Trmm_f64n_f64n_f64t_ls_u_nu_tensor_op_f64, 128x64x16_64x32x16) {
307
+
308
+ using ElementOutput = double;
309
+ using ElementAccumulator = double;
310
+
311
+ using Trmm = cutlass::gemm::device::Trmm<
312
+ double,
313
+ cutlass::layout::ColumnMajor,
314
+ cutlass::SideMode::kLeft,
315
+ cutlass::FillMode::kUpper,
316
+ cutlass::DiagType::kNonUnit,
317
+ double,
318
+ cutlass::layout::ColumnMajor,
319
+ ElementOutput,
320
+ cutlass::layout::RowMajor,
321
+ ElementAccumulator,
322
+ cutlass::arch::OpClassTensorOp,
323
+ cutlass::arch::Sm80,
324
+ cutlass::gemm::GemmShape<128, 64, 16>,
325
+ cutlass::gemm::GemmShape<64, 32, 16>,
326
+ cutlass::gemm::GemmShape<8, 8, 4>,
327
+ cutlass::epilogue::thread::LinearCombination<
328
+ ElementOutput,
329
+ 1,
330
+ ElementAccumulator,
331
+ ElementAccumulator
332
+ >,
333
+ cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>,
334
+ 4
335
+ >;
336
+
337
+ EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal<Trmm>());
338
+ }
339
+
340
+ /////////////////////////////////////////////////////////////////////////////////////////////////
341
+
342
+ TEST(SM80_Device_Trmm_f64n_f64n_f64t_ls_u_nu_tensor_op_f64, 64x128x16_32x64x16) {
343
+
344
+ using ElementOutput = double;
345
+ using ElementAccumulator = double;
346
+
347
+ using Trmm = cutlass::gemm::device::Trmm<
348
+ double,
349
+ cutlass::layout::ColumnMajor,
350
+ cutlass::SideMode::kLeft,
351
+ cutlass::FillMode::kUpper,
352
+ cutlass::DiagType::kNonUnit,
353
+ double,
354
+ cutlass::layout::ColumnMajor,
355
+ ElementOutput,
356
+ cutlass::layout::RowMajor,
357
+ ElementAccumulator,
358
+ cutlass::arch::OpClassTensorOp,
359
+ cutlass::arch::Sm80,
360
+ cutlass::gemm::GemmShape<64, 128, 16>,
361
+ cutlass::gemm::GemmShape<32, 64, 16>,
362
+ cutlass::gemm::GemmShape<8, 8, 4>,
363
+ cutlass::epilogue::thread::LinearCombination<
364
+ ElementOutput,
365
+ 1,
366
+ ElementAccumulator,
367
+ ElementAccumulator
368
+ >,
369
+ cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>,
370
+ 3
371
+ >;
372
+
373
+ EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal<Trmm>());
374
+ }
375
+
376
+ /////////////////////////////////////////////////////////////////////////////////////////////////
377
+
378
+ TEST(SM80_Device_Trmm_f64n_f64n_f64t_ls_u_nu_tensor_op_f64, 128x128x16_32x64x16) {
379
+
380
+ using ElementOutput = double;
381
+ using ElementAccumulator = double;
382
+
383
+ using Trmm = cutlass::gemm::device::Trmm<
384
+ double,
385
+ cutlass::layout::ColumnMajor,
386
+ cutlass::SideMode::kLeft,
387
+ cutlass::FillMode::kUpper,
388
+ cutlass::DiagType::kNonUnit,
389
+ double,
390
+ cutlass::layout::ColumnMajor,
391
+ ElementOutput,
392
+ cutlass::layout::RowMajor,
393
+ ElementAccumulator,
394
+ cutlass::arch::OpClassTensorOp,
395
+ cutlass::arch::Sm80,
396
+ cutlass::gemm::GemmShape<128, 128, 16>,
397
+ cutlass::gemm::GemmShape<32, 64, 16>,
398
+ cutlass::gemm::GemmShape<8, 8, 4>,
399
+ cutlass::epilogue::thread::LinearCombination<
400
+ ElementOutput,
401
+ 1,
402
+ ElementAccumulator,
403
+ ElementAccumulator
404
+ >,
405
+ cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>,
406
+ 3
407
+ >;
408
+
409
+ EXPECT_TRUE(test::gemm::device::TestAllTrmmUniversal<Trmm>());
410
+ }
411
+
412
+ /////////////////////////////////////////////////////////////////////////////////////////////////
413
+
414
+ #endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED)
sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/cutlass/test/unit/gemm/threadblock/mma_multistage_sparse_testbed.h ADDED
@@ -0,0 +1,435 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ * SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice, this
9
+ * list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holder nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
+ *
30
+ **************************************************************************************************/
31
+ /*! \file
32
+ \brief Unit testbed for kernel-level GEMM
33
+ */
34
+
35
+ #pragma once
36
+
37
+ #include "../../common/cutlass_unit_test.h"
38
+ #include "cutlass/aligned_buffer.h"
39
+ #include "cutlass/array.h"
40
+ #include "cutlass/core_io.h"
41
+ #include "cutlass/gemm/gemm.h"
42
+ #include "cutlass/gemm/threadblock/default_mma_core_sparse_sm80.h"
43
+ #include "cutlass/layout/matrix.h"
44
+ #include "cutlass/numeric_types.h"
45
+ #include "cutlass/transform/threadblock/predicated_tile_access_iterator.h"
46
+ #include "cutlass/util/distribution.h"
47
+ #include "cutlass/util/host_tensor.h"
48
+ #include "cutlass/util/reference/host/gemm.h"
49
+ #include "cutlass/util/reference/host/tensor_compare.h"
50
+ #include "cutlass/util/reference/host/tensor_norm.h"
51
+ #include "cutlass/util/reference/host/tensor_fill.h"
52
+ #include "cutlass/util/tensor_view_io.h"
53
+ #include "cutlass/util/host_reorder.h"
54
+ #include "cutlass/util/host_uncompress.h"
55
+
56
+ namespace test {
57
+ namespace gemm {
58
+ namespace threadblock {
59
+
60
+ ////////////////////////////////////////////////////////////////////////////////
61
+
62
+ template <typename Mma>
63
+ __global__ void kernel_multistage_mma_sparse(cutlass::gemm::GemmCoord problem_size,
64
+ typename Mma::IteratorA::Params params_A,
65
+ typename Mma::IteratorA::TensorRef ref_A,
66
+ typename Mma::IteratorB::Params params_B,
67
+ typename Mma::IteratorB::TensorRef ref_B,
68
+ typename Mma::ElementC *ptr_C,
69
+ typename Mma::LayoutC::Stride::Index ldc,
70
+ typename Mma::IteratorE::Params params_E,
71
+ typename Mma::IteratorE::TensorRef ref_E) {
72
+ // Shared storage needed by threadblock-scoped matrix multiply-
73
+ // Dynamic shared memory base pointer
74
+ extern __shared__ int GemmSharedStorageBase[];
75
+
76
+ // Declare pointer to dynamic shared memory.
77
+ typename Mma::SharedStorage *shared_storage =
78
+ reinterpret_cast<typename Mma::SharedStorage *>(GemmSharedStorageBase);
79
+
80
+ // Compute threadblock location
81
+ cutlass::gemm::GemmCoord tb_tile_offset = {int(blockIdx.x), int(blockIdx.y),
82
+ 0};
83
+
84
+ cutlass::MatrixCoord tb_offset_A{tb_tile_offset.m() * Mma::Shape::kM,
85
+ tb_tile_offset.k() / Mma::kSparse};
86
+
87
+ cutlass::MatrixCoord tb_offset_B{tb_tile_offset.k(),
88
+ tb_tile_offset.n() * Mma::Shape::kN};
89
+
90
+ cutlass::MatrixCoord tb_offset_E{tb_tile_offset.m() * Mma::Shape::kM,
91
+ tb_tile_offset.k() / Mma::kSparse};
92
+
93
+ // Compute position within threadblock
94
+ int tb_thread_id = threadIdx.y * blockDim.x + threadIdx.x;
95
+
96
+ // Construct iterators to A and B operands
97
+ typename Mma::IteratorA iterator_A(params_A, ref_A.data(),
98
+ {problem_size.m(), problem_size.k() / Mma::kSparse},
99
+ tb_thread_id, tb_offset_A);
100
+
101
+ typename Mma::IteratorB iterator_B(params_B, ref_B.data(),
102
+ {problem_size.k(), problem_size.n()},
103
+ tb_thread_id, tb_offset_B);
104
+
105
+ typename Mma::IteratorE iterator_E(
106
+ params_E, ref_E.data(),
107
+ {problem_size.m(),
108
+ problem_size.k() / Mma::kSparse / Mma::kElementsPerElementE},
109
+ tb_thread_id, tb_offset_E);
110
+
111
+ int warp_id = __shfl_sync(0xffffffff, threadIdx.y, 0);
112
+
113
+ // Construct thread-scoped matrix multiply
114
+ Mma mma(*shared_storage, tb_thread_id, warp_id, threadIdx.x);
115
+
116
+ typename Mma::FragmentC accum;
117
+
118
+ accum.clear();
119
+
120
+ int gemm_k_iterations = (problem_size.k() + Mma::Shape::kK - 1) / Mma::Shape::kK;
121
+
122
+ // Compute threadblock-scoped matrix multiply-add
123
+ mma(gemm_k_iterations, accum, iterator_A, iterator_B, iterator_E, accum);
124
+
125
+ // Output results
126
+ typename Mma::Operator::IteratorC iterator_C({ptr_C, ldc}, threadIdx.x);
127
+
128
+ iterator_C.add_tile_offset(
129
+ {(tb_tile_offset.m() * Mma::WarpCount::kM) +
130
+ (warp_id % Mma::WarpCount::kM),
131
+ (tb_tile_offset.n() * Mma::WarpCount::kN) +
132
+ (warp_id / Mma::WarpCount::kM)});
133
+
134
+ iterator_C.store(accum);
135
+ }
136
+
137
+ ////////////////////////////////////////////////////////////////////////////////
138
+
139
+ /// Structure to compute the matrix product
140
+ template <
141
+ /// Threadblock-level matrix multiply-accumulate
142
+ typename MmaCore_>
143
+ struct SparseTestbed {
144
+ /// Threadblock-level GEMM implementation
145
+ using MmaCore = MmaCore_;
146
+ using ThreadblockShape = typename MmaCore::Shape;
147
+ using WarpShape = typename MmaCore::WarpShape;
148
+ using InstructionShape = typename MmaCore::InstructionShape;
149
+ using ElementA = typename MmaCore::ElementA;
150
+ using LayoutA = typename MmaCore::LayoutA;
151
+ using ElementB = typename MmaCore::ElementB;
152
+ using LayoutB = typename MmaCore::LayoutB;
153
+ using ElementC = typename MmaCore::ElementC;
154
+ using LayoutC = typename MmaCore::LayoutC;
155
+ using ElementE = typename MmaCore::ElementE;
156
+ using ThreadMapA = typename MmaCore::IteratorThreadMapA;
157
+ using ThreadMapB = typename MmaCore::IteratorThreadMapB;
158
+ using ThreadMapE = typename MmaCore::IteratorThreadMapE;
159
+ using AccessTypeA = cutlass::Array<ElementA, ThreadMapA::kElementsPerAccess>;
160
+ using AccessTypeB = cutlass::Array<ElementB, ThreadMapB::kElementsPerAccess>;
161
+ using AccessTypeE = cutlass::Array<ElementE, ThreadMapE::kElementsPerAccess>;
162
+ static int const Stages = MmaCore::kStages;
163
+ static cutlass::arch::CacheOperation::Kind const CacheOpA =
164
+ MmaCore::kCacheOpA;
165
+ static cutlass::arch::CacheOperation::Kind const CacheOpB =
166
+ MmaCore::kCacheOpB;
167
+ static cutlass::arch::CacheOperation::Kind const CacheOpE =
168
+ MmaCore::kCacheOpE;
169
+
170
+ static int const Sparse = MmaCore::kSparse;
171
+ static int const MetaSizeInBits = MmaCore::kMetaSizeInBits;
172
+ static int const MaxID2 = MmaCore::kMaxID2;
173
+
174
+ using LayoutE = cutlass::layout::RowMajor;
175
+ using ReorderedLayoutE = typename MmaCore::GmemLayoutE;
176
+
177
+ static int const ElementsPerElementE = MmaCore::kElementsPerElementE;
178
+
179
+ // Define iterators over tiles from the A operand
180
+ using IteratorA =
181
+ cutlass::transform::threadblock::PredicatedTileAccessIterator<
182
+ cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK / Sparse>,
183
+ ElementA, LayoutA, 1, ThreadMapA, AccessTypeA>;
184
+
185
+ // Define iterators over tiles from the B operand
186
+ using IteratorB =
187
+ cutlass::transform::threadblock::PredicatedTileAccessIterator<
188
+ cutlass::MatrixShape<ThreadblockShape::kK, ThreadblockShape::kN>,
189
+ ElementB, LayoutB, 0, ThreadMapB, AccessTypeB>;
190
+
191
+ // Define iterators over tiles from the E operand
192
+ using IteratorE =
193
+ cutlass::transform::threadblock::PredicatedTileAccessIterator<
194
+ cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kK /
195
+ Sparse /
196
+ ElementsPerElementE>,
197
+ ElementE, ReorderedLayoutE, 1, ThreadMapE, AccessTypeE>;
198
+
199
+ // Define the threadblock-scoped pipelined matrix multiply
200
+ using Mma = cutlass::gemm::threadblock::SparseMmaMultistage<
201
+ typename MmaCore::Shape, IteratorA, typename MmaCore::SmemIteratorA,
202
+ CacheOpA, IteratorB, typename MmaCore::SmemIteratorB, CacheOpB, ElementC,
203
+ LayoutC, IteratorE, typename MmaCore::SmemIteratorE, CacheOpE,
204
+ typename MmaCore::MmaPolicy, Stages>;
205
+
206
+ //
207
+ // Data members
208
+ //
209
+
210
+ cutlass::HostTensor<ElementA, LayoutA> matrix_A;
211
+ cutlass::HostTensor<ElementA, LayoutA> matrix_A_uncompressed;
212
+ cutlass::HostTensor<ElementB, LayoutB> matrix_B;
213
+ cutlass::HostTensor<ElementC, LayoutC> matrix_C_computed;
214
+ cutlass::HostTensor<ElementC, LayoutC> matrix_C_reference;
215
+ cutlass::HostTensor<ElementE, LayoutE> matrix_E;
216
+ cutlass::HostTensor<ElementE, ReorderedLayoutE> matrix_E_reordered;
217
+
218
+ cutlass::gemm::GemmCoord problem_size;
219
+ float alpha, beta;
220
+
221
+ //
222
+ // Methods
223
+ //
224
+
225
+ /// Allocates workspace in device memory
226
+ SparseTestbed(int m, int n, int k, float alpha_ = float(1), float beta_ = float(0))
227
+ : problem_size(m, n, k), alpha(alpha_), beta(beta_) {
228
+ matrix_A.reset(cutlass::make_Coord(m, k / Sparse));
229
+ matrix_A_uncompressed.reset(cutlass::make_Coord(m, k));
230
+ matrix_B.reset(cutlass::make_Coord(k, n));
231
+ matrix_C_computed.reset(cutlass::make_Coord(m, n));
232
+ matrix_C_reference.reset(cutlass::make_Coord(m, n), false);
233
+ matrix_E.reset(cutlass::make_Coord(m, k / Sparse / ElementsPerElementE));
234
+ matrix_E_reordered.reset(
235
+ cutlass::make_Coord(m, k / Sparse / ElementsPerElementE));
236
+ }
237
+
238
+ /// Returns true if the CUDA device is sufficient to execute the kernel.
239
+ bool sufficient() const {
240
+ //
241
+ // Determine SMEM requirements and waive if not satisfied
242
+ //
243
+
244
+ cudaDeviceProp properties;
245
+ int device_idx;
246
+ cudaError_t result = cudaGetDevice(&device_idx);
247
+
248
+ if (result != cudaSuccess) {
249
+ throw std::runtime_error("cudaGetDevice() API call failed.");
250
+ }
251
+
252
+ result = cudaGetDeviceProperties(&properties, device_idx);
253
+
254
+ if (result != cudaSuccess) {
255
+ throw std::runtime_error("cudaGetDeviceProperties() failed");
256
+ }
257
+
258
+ return true;
259
+ }
260
+
261
+ /// Runs the test
262
+ bool run(
263
+ dim3 grid, dim3 block,
264
+ cutlass::Distribution::Kind init_A = cutlass::Distribution::Uniform,
265
+ cutlass::Distribution::Kind init_B = cutlass::Distribution::Uniform,
266
+ cutlass::Distribution::Kind init_E = cutlass::Distribution::Uniform) {
267
+
268
+ // Waive the test
269
+ if (!sufficient()) {
270
+ return true;
271
+ }
272
+
273
+ //
274
+ // initialize device memory
275
+ //
276
+
277
+ if (init_A == cutlass::Distribution::Uniform) {
278
+
279
+ int scope_max = 8;
280
+ int scope_min = -8;
281
+
282
+ if (cutlass::sizeof_bits<ElementA>::value == 4) {
283
+ scope_max = 2;
284
+ scope_min = -2;
285
+ } else if (cutlass::sizeof_bits<ElementA>::value == 1) {
286
+ scope_max = 2;
287
+ scope_min = 0;
288
+ }
289
+
290
+ uint64_t seed = 7;
291
+ cutlass::reference::host::TensorFillRandomUniform(
292
+ matrix_A.host_view(), seed, scope_max, scope_min, 0);
293
+ } else if (init_A == cutlass::Distribution::Sequential) {
294
+ cutlass::reference::host::BlockFillSequential(matrix_A.host_data(),
295
+ matrix_A.capacity());
296
+ } else if (init_A == cutlass::Distribution::Identity) {
297
+ cutlass::reference::host::TensorFillIdentity(matrix_A.host_view());
298
+ } else {
299
+ return false;
300
+ }
301
+
302
+ if (init_B == cutlass::Distribution::Uniform) {
303
+
304
+ int scope_max = 8;
305
+ int scope_min = -8;
306
+
307
+ if (cutlass::sizeof_bits<ElementB>::value == 4) {
308
+ scope_max = 2;
309
+ scope_min = -2;
310
+ } else if (cutlass::sizeof_bits<ElementB>::value == 1) {
311
+ scope_max = 2;
312
+ scope_min = 0;
313
+ }
314
+
315
+ uint64_t seed = 7;
316
+ cutlass::reference::host::TensorFillRandomUniform(
317
+ matrix_B.host_view(), seed + 16, scope_max, scope_min, 0);
318
+ } else if (init_B == cutlass::Distribution::Sequential) {
319
+ cutlass::reference::host::BlockFillSequential(matrix_B.host_data(),
320
+ matrix_B.capacity());
321
+ } else if (init_B == cutlass::Distribution::Identity) {
322
+ cutlass::reference::host::TensorFillIdentity(matrix_B.host_view());
323
+ } else {
324
+ return false;
325
+ }
326
+
327
+ cutlass::reference::host::TensorFill(matrix_C_computed.host_view());
328
+
329
+ cutlass::reference::host::TensorFill(matrix_C_reference.host_view());
330
+
331
+ if (init_E == cutlass::Distribution::Uniform) {
332
+ uint64_t seed = 7;
333
+ cutlass::reference::host::TensorFillRandomSparseMeta(
334
+ matrix_E.host_view(), seed, MetaSizeInBits);
335
+ } else if (init_E == cutlass::Distribution::Identity) {
336
+ uint32_t content = (MaxID2 == 1) ? 0x44444444 : 0x4444;
337
+ cutlass::reference::host::TensorFill(matrix_E.host_view(),
338
+ (ElementE)(content));
339
+ } else {
340
+ return false;
341
+ }
342
+
343
+ cutlass::reorder_meta(matrix_E_reordered.host_ref(), matrix_E.host_ref(),
344
+ {problem_size.m(), problem_size.n(),
345
+ problem_size.k() / Sparse / ElementsPerElementE});
346
+
347
+ matrix_A.sync_device();
348
+ matrix_B.sync_device();
349
+ matrix_C_computed.sync_device();
350
+ matrix_E_reordered.sync_device();
351
+
352
+ typename IteratorA::Params params_A(matrix_A.layout());
353
+ typename IteratorB::Params params_B(matrix_B.layout());
354
+ typename IteratorE::Params params_E(matrix_E_reordered.layout());
355
+
356
+ cudaError_t result;
357
+
358
+ int smem_size = int(sizeof(typename Mma::SharedStorage));
359
+ if (smem_size >= (48 << 10)) {
360
+ result = cudaFuncSetAttribute(
361
+ test::gemm::threadblock::kernel_multistage_mma_sparse<Mma>,
362
+ cudaFuncAttributeMaxDynamicSharedMemorySize, smem_size);
363
+
364
+ if (result != cudaSuccess) {
365
+ return true;
366
+ }
367
+
368
+ result = cudaFuncSetAttribute(
369
+ test::gemm::threadblock::kernel_multistage_mma_sparse<Mma>,
370
+ cudaFuncAttributePreferredSharedMemoryCarveout, 100);
371
+
372
+ if (result != cudaSuccess) {
373
+ return true;
374
+ }
375
+ }
376
+
377
+ test::gemm::threadblock::kernel_multistage_mma_sparse<Mma>
378
+ <<<grid, block, smem_size, 0>>>(
379
+ problem_size, params_A, matrix_A.device_ref(), params_B,
380
+ matrix_B.device_ref(), matrix_C_computed.device_data(),
381
+ matrix_C_computed.layout().stride(0), params_E,
382
+ matrix_E_reordered.device_ref());
383
+
384
+ //
385
+ // Check error code
386
+ //
387
+
388
+ result = cudaDeviceSynchronize();
389
+ EXPECT_EQ(result, cudaSuccess)
390
+ << " kernel error: " << cudaGetErrorString(result);
391
+
392
+ matrix_C_computed.sync_host();
393
+
394
+ cutlass::uncompress(matrix_A_uncompressed.host_ref(), matrix_A.host_ref(),
395
+ matrix_E.host_ref(), problem_size.m(),
396
+ problem_size.k());
397
+
398
+ cutlass::reference::host::Gemm<ElementA, LayoutA, ElementB, LayoutB,
399
+ ElementC, LayoutC, ElementC, ElementC>
400
+ reference_gemm;
401
+
402
+ reference_gemm(problem_size, ElementC(alpha),
403
+ matrix_A_uncompressed.host_view(), matrix_B.host_view(),
404
+ ElementC(beta), matrix_C_reference.host_view());
405
+
406
+ bool passed = cutlass::reference::host::TensorEquals(
407
+ matrix_C_computed.host_view(), matrix_C_reference.host_view());
408
+
409
+ EXPECT_TRUE(passed);
410
+
411
+ if (!passed && CUTLASS_TEST_UNIT_ENABLE_WARNINGS) {
412
+
413
+ std::cout
414
+ << __FILE__ << ":" << __LINE__ << " "
415
+ << "A:\n" << matrix_A.host_view() << "\n"
416
+ << "B:\n" << matrix_B.host_view() << "\n"
417
+ << "E:\n" << matrix_E.host_view() << "\n"
418
+ << "Reference:\n"
419
+ << matrix_C_reference.host_view() << "\n"
420
+ << "Computed:\n"
421
+ << matrix_C_computed.host_view() << "\n";
422
+ }
423
+
424
+ EXPECT_GT(cutlass::reference::host::TensorNorm(matrix_C_reference.host_view()), 0);
425
+ EXPECT_GT(cutlass::reference::host::TensorNorm(matrix_C_computed.host_view()), 0);
426
+
427
+ return passed;
428
+ }
429
+ };
430
+
431
+ ////////////////////////////////////////////////////////////////////////////////
432
+
433
+ } // namespace threadblock
434
+ } // namespace gemm
435
+ } // namespace test
sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/cutlass/test/unit/gemm/threadblock/mma_pipelined_sm75.cu ADDED
@@ -0,0 +1,2128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ * SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice, this
9
+ * list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holder nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
+ *
30
+ **************************************************************************************************/
31
+ /*! \file
32
+ \brief Unit tests for threadblock-level GEMM
33
+ */
34
+
35
+ #include "mma_pipelined_testbed.h"
36
+
37
+ #if defined(CUTLASS_ARCH_MMA_SM75_SUPPORTED)
38
+
39
+ ////////////////////////////////////////////////////////////////////////////////
40
+
41
+ TEST(SM75_gemm_threadblock_congruous, tensor_op_64x64x32_64x64x32_16x8x8) {
42
+ using ElementA = cutlass::half_t;
43
+ using LayoutA = cutlass::layout::ColumnMajor;
44
+ using ElementB = cutlass::half_t;
45
+ using LayoutB = cutlass::layout::RowMajor;
46
+ using ElementC = float;
47
+ using LayoutC = cutlass::layout::ColumnMajor;
48
+
49
+ cutlass::gemm::GemmCoord problem_size(64, 64, 128);
50
+
51
+ using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 32>;
52
+ using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>;
53
+ using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
54
+
55
+ float alpha = 1.f;
56
+ float beta = 0.0f;
57
+
58
+ // Define the MmaCore components
59
+ using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
60
+ ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
61
+ ElementB, LayoutB, ElementC, LayoutC,
62
+ cutlass::arch::OpClassTensorOp>;
63
+
64
+ dim3 grid(1, 1);
65
+ dim3 block(32, 1, 1);
66
+
67
+ test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
68
+ problem_size.k(), alpha, beta)
69
+ .run(grid, block);
70
+ }
71
+
72
+ ////////////////////////////////////////////////////////////////////////////////
73
+
74
+ TEST(SM75_gemm_threadblock_congruous, tensor_op_128x64x32_64x32x32_16x8x8) {
75
+ using ElementA = cutlass::half_t;
76
+ using LayoutA = cutlass::layout::ColumnMajor;
77
+ using ElementB = cutlass::half_t;
78
+ using LayoutB = cutlass::layout::RowMajor;
79
+ using ElementC = float;
80
+ using LayoutC = cutlass::layout::ColumnMajor;
81
+
82
+ cutlass::gemm::GemmCoord problem_size(128, 64, 128);
83
+
84
+ using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 32>;
85
+ using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>;
86
+ using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
87
+
88
+ float alpha = 1.f;
89
+ float beta = 0.0f;
90
+
91
+ // Define the MmaCore components
92
+ using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
93
+ ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
94
+ ElementB, LayoutB, ElementC, LayoutC,
95
+ cutlass::arch::OpClassTensorOp>;
96
+
97
+ dim3 grid(1, 1);
98
+ dim3 block(32, 4, 1);
99
+
100
+ test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
101
+ problem_size.k(), alpha, beta)
102
+ .run(grid, block);
103
+ }
104
+
105
+ ////////////////////////////////////////////////////////////////////////////////
106
+
107
+ TEST(SM75_gemm_threadblock_congruous, tensor_op_64x128x32_32x64x32_16x8x8) {
108
+ using ElementA = cutlass::half_t;
109
+ using LayoutA = cutlass::layout::ColumnMajor;
110
+ using ElementB = cutlass::half_t;
111
+ using LayoutB = cutlass::layout::RowMajor;
112
+ using ElementC = float;
113
+ using LayoutC = cutlass::layout::ColumnMajor;
114
+
115
+ cutlass::gemm::GemmCoord problem_size(64, 128, 128);
116
+
117
+ using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 32>;
118
+ using WarpShape = cutlass::gemm::GemmShape<32, 64, 32>;
119
+ using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
120
+
121
+ float alpha = 1.f;
122
+ float beta = 0.0f;
123
+
124
+ // Define the MmaCore components
125
+ using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
126
+ ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
127
+ ElementB, LayoutB, ElementC, LayoutC,
128
+ cutlass::arch::OpClassTensorOp>;
129
+
130
+ dim3 grid(1, 1);
131
+ dim3 block(32, 4, 1);
132
+
133
+ test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
134
+ problem_size.k(), alpha, beta)
135
+ .run(grid, block);
136
+ }
137
+
138
+ ////////////////////////////////////////////////////////////////////////////////
139
+
140
+ TEST(SM75_gemm_threadblock_congruous, tensor_op_128x128x32_64x64x32_16x8x8) {
141
+ using ElementA = cutlass::half_t;
142
+ using LayoutA = cutlass::layout::ColumnMajor;
143
+ using ElementB = cutlass::half_t;
144
+ using LayoutB = cutlass::layout::RowMajor;
145
+ using ElementC = float;
146
+ using LayoutC = cutlass::layout::ColumnMajor;
147
+
148
+ cutlass::gemm::GemmCoord problem_size(128, 128, 128);
149
+
150
+ using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 32>;
151
+ using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>;
152
+ using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
153
+
154
+ float alpha = 1.f;
155
+ float beta = 0.0f;
156
+
157
+ // Define the MmaCore components
158
+ using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
159
+ ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
160
+ ElementB, LayoutB, ElementC, LayoutC,
161
+ cutlass::arch::OpClassTensorOp>;
162
+
163
+ dim3 grid(1, 1);
164
+ dim3 block(32, 4, 1);
165
+
166
+ test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
167
+ problem_size.k(), alpha, beta)
168
+ .run(grid, block);
169
+ }
170
+
171
+ ////////////////////////////////////////////////////////////////////////////////
172
+
173
+ TEST(SM75_gemm_threadblock_congruous,
174
+ multicta_256x256x96_128x128x32_64x64x32_16x8x8) {
175
+ using ElementA = cutlass::half_t;
176
+ using LayoutA = cutlass::layout::ColumnMajor;
177
+ using ElementB = cutlass::half_t;
178
+ using LayoutB = cutlass::layout::RowMajor;
179
+ using ElementC = cutlass::half_t;
180
+ using LayoutC = cutlass::layout::ColumnMajor;
181
+
182
+ cutlass::gemm::GemmCoord problem_size(256, 256, 96);
183
+
184
+ using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 32>;
185
+ using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>;
186
+ using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
187
+
188
+ float alpha = 1.f;
189
+ float beta = 0.0f;
190
+
191
+ // Define the MmaCore components
192
+ using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
193
+ ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
194
+ ElementB, LayoutB, ElementC, LayoutC,
195
+ cutlass::arch::OpClassTensorOp>;
196
+
197
+ dim3 grid(2, 2);
198
+ dim3 block(32, 4, 1);
199
+
200
+ test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
201
+ problem_size.k(), alpha, beta)
202
+ .run(grid, block);
203
+ }
204
+
205
+ ////////////////////////////////////////////////////////////////////////////////
206
+
207
+ TEST(SM75_gemm_threadblock_congruous,
208
+ multicta_512x256x384_256x128x32_64x64x32_16x8x8) {
209
+ using ElementA = cutlass::half_t;
210
+ using LayoutA = cutlass::layout::ColumnMajor;
211
+ using ElementB = cutlass::half_t;
212
+ using LayoutB = cutlass::layout::RowMajor;
213
+ using ElementC = cutlass::half_t;
214
+ using LayoutC = cutlass::layout::ColumnMajor;
215
+
216
+ cutlass::gemm::GemmCoord problem_size(512, 256, 384);
217
+
218
+ using ThreadblockShape = cutlass::gemm::GemmShape<256, 128, 32>;
219
+ using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>;
220
+ using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
221
+
222
+ float alpha = 1.f;
223
+ float beta = 0.0f;
224
+
225
+ // Define the MmaCore components
226
+ using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
227
+ ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
228
+ ElementB, LayoutB, ElementC, LayoutC,
229
+ cutlass::arch::OpClassTensorOp>;
230
+
231
+ dim3 grid(2, 2);
232
+ dim3 block(32, 8, 1);
233
+
234
+ test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
235
+ problem_size.k(), alpha, beta)
236
+ .run(grid, block);
237
+ }
238
+
239
+ ////////////////////////////////////////////////////////////////////////////////
240
+
241
+ TEST(SM75_gemm_threadblock_crosswise, tensor_op_64x64x32_64x64x32_16x8x8) {
242
+ using ElementA = cutlass::half_t;
243
+ using LayoutA = cutlass::layout::RowMajor;
244
+ using ElementB = cutlass::half_t;
245
+ using LayoutB = cutlass::layout::ColumnMajor;
246
+ using ElementC = float;
247
+ using LayoutC = cutlass::layout::ColumnMajor;
248
+
249
+ cutlass::gemm::GemmCoord problem_size(64, 64, 128);
250
+
251
+ using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 32>;
252
+ using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>;
253
+ using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
254
+
255
+ float alpha = 1.f;
256
+ float beta = 0.0f;
257
+
258
+ // Define the MmaCore components
259
+ using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
260
+ ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
261
+ ElementB, LayoutB, ElementC, LayoutC,
262
+ cutlass::arch::OpClassTensorOp>;
263
+
264
+ dim3 grid(1, 1);
265
+ dim3 block(32, 1, 1);
266
+
267
+ test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
268
+ problem_size.k(), alpha, beta)
269
+ .run(grid, block);
270
+ }
271
+
272
+ ////////////////////////////////////////////////////////////////////////////////
273
+
274
+ TEST(SM75_gemm_threadblock_crosswise, tensor_op_32x32x32_16x16x32_16x8x8) {
275
+ using ElementA = cutlass::half_t;
276
+ using LayoutA = cutlass::layout::RowMajor;
277
+ using ElementB = cutlass::half_t;
278
+ using LayoutB = cutlass::layout::ColumnMajor;
279
+ using ElementC = float;
280
+ using LayoutC = cutlass::layout::ColumnMajor;
281
+
282
+ cutlass::gemm::GemmCoord problem_size(32, 32, 128);
283
+
284
+ using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 32>;
285
+ using WarpShape = cutlass::gemm::GemmShape<16, 16, 32>;
286
+ using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
287
+
288
+ float alpha = 1.f;
289
+ float beta = 0.0f;
290
+
291
+ // Define the MmaCore components
292
+ using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
293
+ ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
294
+ ElementB, LayoutB, ElementC, LayoutC,
295
+ cutlass::arch::OpClassTensorOp>;
296
+
297
+ dim3 grid(1, 1);
298
+ dim3 block(32, 4, 1);
299
+
300
+ test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
301
+ problem_size.k(), alpha, beta)
302
+ .run(grid, block);
303
+ }
304
+
305
+ ////////////////////////////////////////////////////////////////////////////////
306
+
307
+ TEST(SM75_gemm_threadblock_crosswise, tensor_op_32x64x32_16x32x32_16x8x8) {
308
+ using ElementA = cutlass::half_t;
309
+ using LayoutA = cutlass::layout::RowMajor;
310
+ using ElementB = cutlass::half_t;
311
+ using LayoutB = cutlass::layout::ColumnMajor;
312
+ using ElementC = float;
313
+ using LayoutC = cutlass::layout::ColumnMajor;
314
+
315
+ cutlass::gemm::GemmCoord problem_size(32, 64, 128);
316
+
317
+ using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 32>;
318
+ using WarpShape = cutlass::gemm::GemmShape<16, 32, 32>;
319
+ using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
320
+
321
+ float alpha = 1.f;
322
+ float beta = 0.0f;
323
+
324
+ // Define the MmaCore components
325
+ using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
326
+ ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
327
+ ElementB, LayoutB, ElementC, LayoutC,
328
+ cutlass::arch::OpClassTensorOp>;
329
+
330
+ dim3 grid(1, 1);
331
+ dim3 block(32, 4, 1);
332
+
333
+ test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
334
+ problem_size.k(), alpha, beta)
335
+ .run(grid, block);
336
+ }
337
+
338
+ ////////////////////////////////////////////////////////////////////////////////
339
+
340
+ TEST(SM75_gemm_threadblock_crosswise, tensor_op_64x32x32_32x16x32_16x8x8) {
341
+ using ElementA = cutlass::half_t;
342
+ using LayoutA = cutlass::layout::RowMajor;
343
+ using ElementB = cutlass::half_t;
344
+ using LayoutB = cutlass::layout::ColumnMajor;
345
+ using ElementC = float;
346
+ using LayoutC = cutlass::layout::ColumnMajor;
347
+
348
+ cutlass::gemm::GemmCoord problem_size(64, 32, 128);
349
+
350
+ using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 32>;
351
+ using WarpShape = cutlass::gemm::GemmShape<32, 16, 32>;
352
+ using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
353
+
354
+ float alpha = 1.f;
355
+ float beta = 0.0f;
356
+
357
+ // Define the MmaCore components
358
+ using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
359
+ ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
360
+ ElementB, LayoutB, ElementC, LayoutC,
361
+ cutlass::arch::OpClassTensorOp>;
362
+
363
+ dim3 grid(1, 1);
364
+ dim3 block(32, 4, 1);
365
+
366
+ test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
367
+ problem_size.k(), alpha, beta)
368
+ .run(grid, block);
369
+ }
370
+
371
+ ////////////////////////////////////////////////////////////////////////////////
372
+
373
+ TEST(SM75_gemm_threadblock_crosswise, tensor_op_64x64x32_32x32x32_16x8x8) {
374
+ using ElementA = cutlass::half_t;
375
+ using LayoutA = cutlass::layout::RowMajor;
376
+ using ElementB = cutlass::half_t;
377
+ using LayoutB = cutlass::layout::ColumnMajor;
378
+ using ElementC = float;
379
+ using LayoutC = cutlass::layout::ColumnMajor;
380
+
381
+ cutlass::gemm::GemmCoord problem_size(64, 64, 128);
382
+
383
+ using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 32>;
384
+ using WarpShape = cutlass::gemm::GemmShape<32, 32, 32>;
385
+ using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
386
+
387
+ float alpha = 1.f;
388
+ float beta = 0.0f;
389
+
390
+ // Define the MmaCore components
391
+ using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
392
+ ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
393
+ ElementB, LayoutB, ElementC, LayoutC,
394
+ cutlass::arch::OpClassTensorOp>;
395
+
396
+ dim3 grid(1, 1);
397
+ dim3 block(32, 4, 1);
398
+
399
+ test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
400
+ problem_size.k(), alpha, beta)
401
+ .run(grid, block);
402
+ }
403
+
404
+ ////////////////////////////////////////////////////////////////////////////////
405
+
406
+ TEST(SM75_gemm_threadblock_crosswise, tensor_op_128x64x32_64x32x32_16x8x8) {
407
+ using ElementA = cutlass::half_t;
408
+ using LayoutA = cutlass::layout::RowMajor;
409
+ using ElementB = cutlass::half_t;
410
+ using LayoutB = cutlass::layout::ColumnMajor;
411
+ using ElementC = float;
412
+ using LayoutC = cutlass::layout::ColumnMajor;
413
+
414
+ cutlass::gemm::GemmCoord problem_size(128, 64, 128);
415
+
416
+ using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 32>;
417
+ using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>;
418
+ using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
419
+
420
+ float alpha = 1.f;
421
+ float beta = 0.0f;
422
+
423
+ // Define the MmaCore components
424
+ using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
425
+ ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
426
+ ElementB, LayoutB, ElementC, LayoutC,
427
+ cutlass::arch::OpClassTensorOp>;
428
+
429
+ dim3 grid(1, 1);
430
+ dim3 block(32, 4, 1);
431
+
432
+ test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
433
+ problem_size.k(), alpha, beta)
434
+ .run(grid, block);
435
+ }
436
+
437
+ ////////////////////////////////////////////////////////////////////////////////
438
+
439
+ TEST(SM75_gemm_threadblock_crosswise, tensor_op_64x128x32_32x64x32_16x8x8) {
440
+ using ElementA = cutlass::half_t;
441
+ using LayoutA = cutlass::layout::RowMajor;
442
+ using ElementB = cutlass::half_t;
443
+ using LayoutB = cutlass::layout::ColumnMajor;
444
+ using ElementC = float;
445
+ using LayoutC = cutlass::layout::ColumnMajor;
446
+
447
+ cutlass::gemm::GemmCoord problem_size(64, 128, 128);
448
+
449
+ using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 32>;
450
+ using WarpShape = cutlass::gemm::GemmShape<32, 64, 32>;
451
+ using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
452
+
453
+ float alpha = 1.f;
454
+ float beta = 0.0f;
455
+
456
+ // Define the MmaCore components
457
+ using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
458
+ ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
459
+ ElementB, LayoutB, ElementC, LayoutC,
460
+ cutlass::arch::OpClassTensorOp>;
461
+
462
+ dim3 grid(1, 1);
463
+ dim3 block(32, 4, 1);
464
+
465
+ test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
466
+ problem_size.k(), alpha, beta)
467
+ .run(grid, block);
468
+ }
469
+
470
+ ////////////////////////////////////////////////////////////////////////////////
471
+
472
+ TEST(SM75_gemm_threadblock_crosswise, tensor_op_128x128x32_64x64x32_16x8x8) {
473
+ using ElementA = cutlass::half_t;
474
+ using LayoutA = cutlass::layout::RowMajor;
475
+ using ElementB = cutlass::half_t;
476
+ using LayoutB = cutlass::layout::ColumnMajor;
477
+ using ElementC = float;
478
+ using LayoutC = cutlass::layout::ColumnMajor;
479
+
480
+ cutlass::gemm::GemmCoord problem_size(128, 128, 96);
481
+
482
+ using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 32>;
483
+ using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>;
484
+ using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
485
+
486
+ float alpha = 1.f;
487
+ float beta = 0.0f;
488
+
489
+ // Define the MmaCore components
490
+ using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
491
+ ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
492
+ ElementB, LayoutB, ElementC, LayoutC,
493
+ cutlass::arch::OpClassTensorOp>;
494
+
495
+ dim3 grid(1, 1);
496
+ dim3 block(32, 4, 1);
497
+
498
+ test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
499
+ problem_size.k(), alpha, beta)
500
+ .run(grid, block);
501
+ }
502
+
503
+ ////////////////////////////////////////////////////////////////////////////////
504
+
505
+ TEST(SM75_gemm_threadblock_crosswise,
506
+ multicta_256x256x96_128x128x32_64x64x32_16x8x8) {
507
+ using ElementA = cutlass::half_t;
508
+ using LayoutA = cutlass::layout::RowMajor;
509
+ using ElementB = cutlass::half_t;
510
+ using LayoutB = cutlass::layout::ColumnMajor;
511
+ using ElementC = float;
512
+ using LayoutC = cutlass::layout::ColumnMajor;
513
+
514
+ cutlass::gemm::GemmCoord problem_size(256, 256, 96);
515
+
516
+ using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 32>;
517
+ using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>;
518
+ using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
519
+
520
+ float alpha = 1.f;
521
+ float beta = 0.0f;
522
+
523
+ // Define the MmaCore components
524
+ using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
525
+ ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
526
+ ElementB, LayoutB, ElementC, LayoutC,
527
+ cutlass::arch::OpClassTensorOp>;
528
+
529
+ dim3 grid(2, 2);
530
+ dim3 block(32, 4, 1);
531
+
532
+ test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
533
+ problem_size.k(), alpha, beta)
534
+ .run(grid, block);
535
+ }
536
+
537
+ ////////////////////////////////////////////////////////////////////////////////
538
+
539
+ TEST(SM75_gemm_threadblock_crosswise,
540
+ multicta_512x256x384_256x128x32_64x64x32_16x8x8) {
541
+ using ElementA = cutlass::half_t;
542
+ using LayoutA = cutlass::layout::RowMajor;
543
+ using ElementB = cutlass::half_t;
544
+ using LayoutB = cutlass::layout::ColumnMajor;
545
+ using ElementC = float;
546
+ using LayoutC = cutlass::layout::ColumnMajor;
547
+
548
+ cutlass::gemm::GemmCoord problem_size(512, 256, 384);
549
+
550
+ using ThreadblockShape = cutlass::gemm::GemmShape<256, 128, 32>;
551
+ using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>;
552
+ using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
553
+
554
+ float alpha = 1.f;
555
+ float beta = 0.0f;
556
+
557
+ // Define the MmaCore components
558
+ using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
559
+ ThreadblockShape, WarpShape, InstructionShape, ElementA, LayoutA,
560
+ ElementB, LayoutB, ElementC, LayoutC,
561
+ cutlass::arch::OpClassTensorOp>;
562
+
563
+ dim3 grid(2, 2);
564
+ dim3 block(32, 8, 1);
565
+
566
+ test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
567
+ problem_size.k(), alpha, beta)
568
+ .run(grid, block);
569
+ }
570
+
571
+ ////////////////////////////////////////////////////////////////////////////////
572
+ TEST(SM75_gemm_threadblock_interleaved, tensor_op_32x32x64_16x16x64_8x8x16) {
573
+ using ElementA = uint8_t;
574
+ using LayoutA = cutlass::layout::ColumnMajorInterleaved<32>;
575
+ using ElementB = uint8_t;
576
+ using LayoutB = cutlass::layout::RowMajorInterleaved<32>;
577
+ using ElementC = int32_t;
578
+ using LayoutC = cutlass::layout::ColumnMajor;
579
+
580
+ cutlass::gemm::GemmCoord problem_size(32, 32, 256);
581
+
582
+ using ThreadBlockShape = cutlass::gemm::GemmShape<32, 32, 64>;
583
+ using WarpShape = cutlass::gemm::GemmShape<16, 16, 64>;
584
+ using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>;
585
+
586
+ float alpha = 1.f;
587
+ float beta = 0.f;
588
+
589
+ // Define the MmaCore components
590
+ using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
591
+ ThreadBlockShape, WarpShape, InstructionShape, ElementA, LayoutA,
592
+ ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp>;
593
+
594
+ dim3 grid(1, 1);
595
+ dim3 block(32, 4, 1);
596
+
597
+ test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
598
+ problem_size.k(), alpha, beta)
599
+ .run(grid, block);
600
+ }
601
+
602
+ ////////////////////////////////////////////////////////////////////////////////
603
+
604
+ TEST(SM75_gemm_threadblock_interleaved, tensor_op_64x32x64_32x16x64_8x8x16) {
605
+ using ElementA = uint8_t;
606
+ using LayoutA = cutlass::layout::ColumnMajorInterleaved<32>;
607
+ using ElementB = uint8_t;
608
+ using LayoutB = cutlass::layout::RowMajorInterleaved<32>;
609
+ using ElementC = int32_t;
610
+ using LayoutC = cutlass::layout::ColumnMajor;
611
+
612
+ cutlass::gemm::GemmCoord problem_size(64, 32, 256);
613
+
614
+ using ThreadBlockShape = cutlass::gemm::GemmShape<64, 32, 64>;
615
+ using WarpShape = cutlass::gemm::GemmShape<32, 16, 64>;
616
+ using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>;
617
+
618
+ float alpha = 1.f;
619
+ float beta = 0.f;
620
+
621
+ // Define the MmaCore components
622
+ using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
623
+ ThreadBlockShape, WarpShape, InstructionShape, ElementA, LayoutA,
624
+ ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp>;
625
+
626
+ dim3 grid(1, 1);
627
+ dim3 block(32, 4, 1);
628
+
629
+ test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
630
+ problem_size.k(), alpha, beta)
631
+ .run(grid, block);
632
+ }
633
+
634
+ ////////////////////////////////////////////////////////////////////////////////
635
+
636
+ TEST(SM75_gemm_threadblock_interleaved, tensor_op_32x64x64_16x32x64_8x8x16) {
637
+ using ElementA = uint8_t;
638
+ using LayoutA = cutlass::layout::ColumnMajorInterleaved<32>;
639
+ using ElementB = uint8_t;
640
+ using LayoutB = cutlass::layout::RowMajorInterleaved<32>;
641
+ using ElementC = int32_t;
642
+ using LayoutC = cutlass::layout::ColumnMajor;
643
+
644
+ cutlass::gemm::GemmCoord problem_size(32, 64, 256);
645
+
646
+ using ThreadBlockShape = cutlass::gemm::GemmShape<32, 64, 64>;
647
+ using WarpShape = cutlass::gemm::GemmShape<16, 32, 64>;
648
+ using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>;
649
+
650
+ float alpha = 1.f;
651
+ float beta = 0.f;
652
+
653
+ // Define the MmaCore components
654
+ using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
655
+ ThreadBlockShape, WarpShape, InstructionShape, ElementA, LayoutA,
656
+ ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp>;
657
+
658
+ dim3 grid(1, 1);
659
+ dim3 block(32, 4, 1);
660
+
661
+ test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
662
+ problem_size.k(), alpha, beta)
663
+ .run(grid, block);
664
+ }
665
+
666
+ ////////////////////////////////////////////////////////////////////////////////
667
+
668
+ TEST(SM75_gemm_threadblock_interleaved, tensor_op_64x64x64_32x32x64_8x8x16) {
669
+ using ElementA = uint8_t;
670
+ using LayoutA = cutlass::layout::ColumnMajorInterleaved<32>;
671
+ using ElementB = uint8_t;
672
+ using LayoutB = cutlass::layout::RowMajorInterleaved<32>;
673
+ using ElementC = int32_t;
674
+ using LayoutC = cutlass::layout::ColumnMajor;
675
+
676
+ cutlass::gemm::GemmCoord problem_size(64, 64, 256);
677
+
678
+ using ThreadBlockShape = cutlass::gemm::GemmShape<64, 64, 64>;
679
+ using WarpShape = cutlass::gemm::GemmShape<32, 32, 64>;
680
+ using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>;
681
+
682
+ float alpha = 1.f;
683
+ float beta = 0.f;
684
+
685
+ // Define the MmaCore components
686
+ using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
687
+ ThreadBlockShape, WarpShape, InstructionShape, ElementA, LayoutA,
688
+ ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp>;
689
+
690
+ dim3 grid(1, 1);
691
+ dim3 block(32, 4, 1);
692
+
693
+ test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
694
+ problem_size.k(), alpha, beta)
695
+ .run(grid, block);
696
+ }
697
+
698
+ ////////////////////////////////////////////////////////////////////////////////
699
+
700
+ TEST(SM75_gemm_threadblock_interleaved, tensor_op_128x64x64_64x32x64_8x8x16) {
701
+ using ElementA = uint8_t;
702
+ using LayoutA = cutlass::layout::ColumnMajorInterleaved<32>;
703
+ using ElementB = uint8_t;
704
+ using LayoutB = cutlass::layout::RowMajorInterleaved<32>;
705
+ using ElementC = int32_t;
706
+ using LayoutC = cutlass::layout::ColumnMajor;
707
+
708
+ cutlass::gemm::GemmCoord problem_size(128, 64, 256);
709
+
710
+ using ThreadBlockShape = cutlass::gemm::GemmShape<128, 64, 64>;
711
+ using WarpShape = cutlass::gemm::GemmShape<64, 32, 64>;
712
+ using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>;
713
+
714
+ float alpha = 1.f;
715
+ float beta = 0.f;
716
+
717
+ // Define the MmaCore component
718
+ using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
719
+ ThreadBlockShape, WarpShape, InstructionShape, ElementA, LayoutA,
720
+ ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp>;
721
+
722
+ dim3 grid(1, 1);
723
+ dim3 block(32, 4, 1);
724
+
725
+ test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
726
+ problem_size.k(), alpha, beta)
727
+ .run(grid, block);
728
+ }
729
+
730
+ ////////////////////////////////////////////////////////////////////////////////
731
+
732
+ TEST(SM75_gemm_threadblock_interleaved, tensor_op_64x128x64_32x64x64_8x8x16) {
733
+ using ElementA = uint8_t;
734
+ using LayoutA = cutlass::layout::ColumnMajorInterleaved<32>;
735
+ using ElementB = uint8_t;
736
+ using LayoutB = cutlass::layout::RowMajorInterleaved<32>;
737
+ using ElementC = int32_t;
738
+ using LayoutC = cutlass::layout::ColumnMajor;
739
+
740
+ cutlass::gemm::GemmCoord problem_size(64, 128, 256);
741
+
742
+ using ThreadBlockShape = cutlass::gemm::GemmShape<64, 128, 64>;
743
+ using WarpShape = cutlass::gemm::GemmShape<32, 64, 64>;
744
+ using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>;
745
+
746
+ float alpha = 1.f;
747
+ float beta = 0.f;
748
+
749
+ // Define the MmaCore components
750
+ using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
751
+ ThreadBlockShape, WarpShape, InstructionShape, ElementA, LayoutA,
752
+ ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp>;
753
+
754
+ dim3 grid(1, 1);
755
+ dim3 block(32, 4, 1);
756
+
757
+ test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
758
+ problem_size.k(), alpha, beta)
759
+ .run(grid, block);
760
+ }
761
+
762
+ ////////////////////////////////////////////////////////////////////////////////
763
+
764
+ TEST(SM75_gemm_threadblock_interleaved, tensor_op_128x128x64_64x64x64_8x8x16) {
765
+ using ElementA = uint8_t;
766
+ using LayoutA = cutlass::layout::ColumnMajorInterleaved<32>;
767
+ using ElementB = uint8_t;
768
+ using LayoutB = cutlass::layout::RowMajorInterleaved<32>;
769
+ using ElementC = int32_t;
770
+ using LayoutC = cutlass::layout::ColumnMajor;
771
+
772
+ cutlass::gemm::GemmCoord problem_size(128, 128, 256);
773
+
774
+ using ThreadBlockShape = cutlass::gemm::GemmShape<128, 128, 64>;
775
+ using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>;
776
+ using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>;
777
+
778
+ float alpha = 1.f;
779
+ float beta = 0.f;
780
+
781
+ // Define the MmaCore components
782
+ using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
783
+ ThreadBlockShape, WarpShape, InstructionShape, ElementA, LayoutA,
784
+ ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp>;
785
+
786
+ dim3 grid(1, 1);
787
+ dim3 block(32, 4, 1);
788
+
789
+ test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
790
+ problem_size.k(), alpha, beta)
791
+ .run(grid, block);
792
+ }
793
+
794
+ ////////////////////////////////////////////////////////////////////////////////
795
+
796
+ TEST(SM75_gemm_threadblock_interleaved,
797
+ multicta_256x256x192_128x128x64_64x64x64_8x8x16) {
798
+ using ElementA = uint8_t;
799
+ using LayoutA = cutlass::layout::ColumnMajorInterleaved<32>;
800
+ using ElementB = uint8_t;
801
+ using LayoutB = cutlass::layout::RowMajorInterleaved<32>;
802
+ using ElementC = int32_t;
803
+ using LayoutC = cutlass::layout::ColumnMajor;
804
+
805
+ cutlass::gemm::GemmCoord problem_size(256, 256, 192);
806
+
807
+ using ThreadBlockShape = cutlass::gemm::GemmShape<128, 128, 64>;
808
+ using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>;
809
+ using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>;
810
+
811
+ float alpha = 1.f;
812
+ float beta = 0.f;
813
+
814
+ // Define the MmaCore components
815
+ using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
816
+ ThreadBlockShape, WarpShape, InstructionShape, ElementA, LayoutA,
817
+ ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp>;
818
+
819
+ dim3 grid(2, 2);
820
+ dim3 block(32, 4, 1);
821
+
822
+ test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
823
+ problem_size.k(), alpha, beta)
824
+ .run(grid, block);
825
+ }
826
+
827
+ ////////////////////////////////////////////////////////////////////////////////
828
+
829
+ TEST(SM75_gemm_threadblock_interleaved,
830
+ multicta_512x256x768_256x128x64_64x64x64_8x8x16) {
831
+ using ElementA = uint8_t;
832
+ using LayoutA = cutlass::layout::ColumnMajorInterleaved<32>;
833
+ using ElementB = uint8_t;
834
+ using LayoutB = cutlass::layout::RowMajorInterleaved<32>;
835
+ using ElementC = int32_t;
836
+ using LayoutC = cutlass::layout::ColumnMajor;
837
+
838
+ cutlass::gemm::GemmCoord problem_size(512, 256, 768);
839
+
840
+ using ThreadBlockShape = cutlass::gemm::GemmShape<256, 128, 64>;
841
+ using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>;
842
+ using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>;
843
+
844
+ float alpha = 1.f;
845
+ float beta = 0.f;
846
+
847
+ // Define the MmaCore components
848
+ using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
849
+ ThreadBlockShape, WarpShape, InstructionShape, ElementA, LayoutA,
850
+ ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp>;
851
+
852
+ dim3 grid(2, 2);
853
+ dim3 block(32, 8, 1);
854
+
855
+ test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
856
+ problem_size.k(), alpha, beta)
857
+ .run(grid, block);
858
+ }
859
+
860
+ ////////////////////////////////////////////////////////////////////////////////
861
+
862
+ TEST(SM75_gemm_threadblock_crosswise, tensor_op_64x64x64_64x64x64_8x8x16) {
863
+ using ElementA = uint8_t;
864
+ using LayoutA = cutlass::layout::RowMajor;
865
+ using ElementB = uint8_t;
866
+ using LayoutB = cutlass::layout::ColumnMajor;
867
+ using ElementC = int32_t;
868
+ using LayoutC = cutlass::layout::ColumnMajor;
869
+
870
+ cutlass::gemm::GemmCoord problem_size(64, 64, 256);
871
+
872
+ using ThreadBlockShape = cutlass::gemm::GemmShape<64, 64, 64>;
873
+ using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>;
874
+ using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>;
875
+
876
+ float alpha = 1.f;
877
+ float beta = 0.f;
878
+
879
+ // Define the MmaCore components
880
+ using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
881
+ ThreadBlockShape, WarpShape, InstructionShape, ElementA, LayoutA,
882
+ ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp>;
883
+
884
+ dim3 grid(1, 1);
885
+ dim3 block(32, 1, 1);
886
+
887
+ test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
888
+ problem_size.k(), alpha, beta)
889
+ .run(grid, block);
890
+ }
891
+
892
+ ////////////////////////////////////////////////////////////////////////////////
893
+
894
+ TEST(SM75_gemm_threadblock_crosswise, tensor_op_32x32x64_16x16x64_8x8x16) {
895
+ using ElementA = uint8_t;
896
+ using LayoutA = cutlass::layout::RowMajor;
897
+ using ElementB = uint8_t;
898
+ using LayoutB = cutlass::layout::ColumnMajor;
899
+ using ElementC = int32_t;
900
+ using LayoutC = cutlass::layout::ColumnMajor;
901
+
902
+ cutlass::gemm::GemmCoord problem_size(32, 32, 256);
903
+
904
+ using ThreadBlockShape = cutlass::gemm::GemmShape<32, 32, 64>;
905
+ using WarpShape = cutlass::gemm::GemmShape<16, 16, 64>;
906
+ using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>;
907
+
908
+ float alpha = 1.f;
909
+ float beta = 0.f;
910
+
911
+ // Define the MmaCore components
912
+ using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
913
+ ThreadBlockShape, WarpShape, InstructionShape, ElementA, LayoutA,
914
+ ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp>;
915
+
916
+ dim3 grid(1, 1);
917
+ dim3 block(32, 4, 1);
918
+
919
+ test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
920
+ problem_size.k(), alpha, beta)
921
+ .run(grid, block);
922
+ }
923
+
924
+ ////////////////////////////////////////////////////////////////////////////////
925
+
926
+ TEST(SM75_gemm_threadblock_crosswise, tensor_op_64x32x64_32x16x64_8x8x16) {
927
+ using ElementA = uint8_t;
928
+ using LayoutA = cutlass::layout::RowMajor;
929
+ using ElementB = uint8_t;
930
+ using LayoutB = cutlass::layout::ColumnMajor;
931
+ using ElementC = int32_t;
932
+ using LayoutC = cutlass::layout::ColumnMajor;
933
+
934
+ cutlass::gemm::GemmCoord problem_size(64, 32, 256);
935
+
936
+ using ThreadBlockShape = cutlass::gemm::GemmShape<64, 32, 64>;
937
+ using WarpShape = cutlass::gemm::GemmShape<32, 16, 64>;
938
+ using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>;
939
+
940
+ float alpha = 1.f;
941
+ float beta = 0.f;
942
+
943
+ // Define the MmaCore components
944
+ using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
945
+ ThreadBlockShape, WarpShape, InstructionShape, ElementA, LayoutA,
946
+ ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp>;
947
+
948
+ dim3 grid(1, 1);
949
+ dim3 block(32, 4, 1);
950
+
951
+ test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
952
+ problem_size.k(), alpha, beta)
953
+ .run(grid, block);
954
+ }
955
+
956
+ ////////////////////////////////////////////////////////////////////////////////
957
+
958
+ TEST(SM75_gemm_threadblock_crosswise, tensor_op_32x64x64_16x32x64_8x8x16) {
959
+ using ElementA = uint8_t;
960
+ using LayoutA = cutlass::layout::RowMajor;
961
+ using ElementB = uint8_t;
962
+ using LayoutB = cutlass::layout::ColumnMajor;
963
+ using ElementC = int32_t;
964
+ using LayoutC = cutlass::layout::ColumnMajor;
965
+
966
+ cutlass::gemm::GemmCoord problem_size(32, 64, 256);
967
+
968
+ using ThreadBlockShape = cutlass::gemm::GemmShape<32, 64, 64>;
969
+ using WarpShape = cutlass::gemm::GemmShape<16, 32, 64>;
970
+ using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>;
971
+
972
+ float alpha = 1.f;
973
+ float beta = 0.f;
974
+
975
+ // Define the MmaCore components
976
+ using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
977
+ ThreadBlockShape, WarpShape, InstructionShape, ElementA, LayoutA,
978
+ ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp>;
979
+
980
+ dim3 grid(1, 1);
981
+ dim3 block(32, 4, 1);
982
+
983
+ test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
984
+ problem_size.k(), alpha, beta)
985
+ .run(grid, block);
986
+ }
987
+
988
+ ////////////////////////////////////////////////////////////////////////////////
989
+
990
+ TEST(SM75_gemm_threadblock_crosswise, tensor_op_64x64x64_32x32x64_8x8x16) {
991
+ using ElementA = uint8_t;
992
+ using LayoutA = cutlass::layout::RowMajor;
993
+ using ElementB = uint8_t;
994
+ using LayoutB = cutlass::layout::ColumnMajor;
995
+ using ElementC = int32_t;
996
+ using LayoutC = cutlass::layout::ColumnMajor;
997
+
998
+ cutlass::gemm::GemmCoord problem_size(64, 64, 256);
999
+
1000
+ using ThreadBlockShape = cutlass::gemm::GemmShape<64, 64, 64>;
1001
+ using WarpShape = cutlass::gemm::GemmShape<32, 32, 64>;
1002
+ using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>;
1003
+
1004
+ float alpha = 1.f;
1005
+ float beta = 0.f;
1006
+
1007
+ // Define the MmaCore components
1008
+ using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
1009
+ ThreadBlockShape, WarpShape, InstructionShape, ElementA, LayoutA,
1010
+ ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp>;
1011
+
1012
+ dim3 grid(1, 1);
1013
+ dim3 block(32, 4, 1);
1014
+
1015
+ test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
1016
+ problem_size.k(), alpha, beta)
1017
+ .run(grid, block);
1018
+ }
1019
+
1020
+ ////////////////////////////////////////////////////////////////////////////////
1021
+
1022
+ TEST(SM75_gemm_threadblock_crosswise, tensor_op_128x64x64_64x32x64_8x8x16) {
1023
+ using ElementA = uint8_t;
1024
+ using LayoutA = cutlass::layout::RowMajor;
1025
+ using ElementB = uint8_t;
1026
+ using LayoutB = cutlass::layout::ColumnMajor;
1027
+ using ElementC = int32_t;
1028
+ using LayoutC = cutlass::layout::ColumnMajor;
1029
+
1030
+ cutlass::gemm::GemmCoord problem_size(128, 64, 256);
1031
+
1032
+ using ThreadBlockShape = cutlass::gemm::GemmShape<128, 64, 64>;
1033
+ using WarpShape = cutlass::gemm::GemmShape<64, 32, 64>;
1034
+ using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>;
1035
+
1036
+ float alpha = 1.f;
1037
+ float beta = 0.f;
1038
+
1039
+ // Define the MmaCore component
1040
+ using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
1041
+ ThreadBlockShape, WarpShape, InstructionShape, ElementA, LayoutA,
1042
+ ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp>;
1043
+
1044
+ dim3 grid(1, 1);
1045
+ dim3 block(32, 4, 1);
1046
+
1047
+ test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
1048
+ problem_size.k(), alpha, beta)
1049
+ .run(grid, block);
1050
+ }
1051
+
1052
+ ////////////////////////////////////////////////////////////////////////////////
1053
+
1054
+ TEST(SM75_gemm_threadblock_crosswise, tensor_op_64x128x64_32x64x64_8x8x16) {
1055
+ using ElementA = uint8_t;
1056
+ using LayoutA = cutlass::layout::RowMajor;
1057
+ using ElementB = uint8_t;
1058
+ using LayoutB = cutlass::layout::ColumnMajor;
1059
+ using ElementC = int32_t;
1060
+ using LayoutC = cutlass::layout::ColumnMajor;
1061
+
1062
+ cutlass::gemm::GemmCoord problem_size(64, 128, 256);
1063
+
1064
+ using ThreadBlockShape = cutlass::gemm::GemmShape<64, 128, 64>;
1065
+ using WarpShape = cutlass::gemm::GemmShape<32, 64, 64>;
1066
+ using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>;
1067
+
1068
+ float alpha = 1.f;
1069
+ float beta = 0.f;
1070
+
1071
+ // Define the MmaCore components
1072
+ using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
1073
+ ThreadBlockShape, WarpShape, InstructionShape, ElementA, LayoutA,
1074
+ ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp>;
1075
+
1076
+ dim3 grid(1, 1);
1077
+ dim3 block(32, 4, 1);
1078
+
1079
+ test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
1080
+ problem_size.k(), alpha, beta)
1081
+ .run(grid, block);
1082
+ }
1083
+
1084
+ ////////////////////////////////////////////////////////////////////////////////
1085
+
1086
+ TEST(SM75_gemm_threadblock_crosswise, tensor_op_128x128x64_64x64x64_8x8x16) {
1087
+ using ElementA = uint8_t;
1088
+ using LayoutA = cutlass::layout::RowMajor;
1089
+ using ElementB = uint8_t;
1090
+ using LayoutB = cutlass::layout::ColumnMajor;
1091
+ using ElementC = int32_t;
1092
+ using LayoutC = cutlass::layout::ColumnMajor;
1093
+
1094
+ cutlass::gemm::GemmCoord problem_size(128, 128, 256);
1095
+
1096
+ using ThreadBlockShape = cutlass::gemm::GemmShape<128, 128, 64>;
1097
+ using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>;
1098
+ using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>;
1099
+
1100
+ float alpha = 1.f;
1101
+ float beta = 0.f;
1102
+
1103
+ // Define the MmaCore components
1104
+ using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
1105
+ ThreadBlockShape, WarpShape, InstructionShape, ElementA, LayoutA,
1106
+ ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp>;
1107
+
1108
+ dim3 grid(1, 1);
1109
+ dim3 block(32, 4, 1);
1110
+
1111
+ test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
1112
+ problem_size.k(), alpha, beta)
1113
+ .run(grid, block);
1114
+ }
1115
+
1116
+ ////////////////////////////////////////////////////////////////////////////////
1117
+
1118
+ TEST(SM75_gemm_threadblock_crosswise,
1119
+ multicta_256x256x192_128x128x64_64x64x64_8x8x16) {
1120
+ using ElementA = uint8_t;
1121
+ using LayoutA = cutlass::layout::RowMajor;
1122
+ using ElementB = uint8_t;
1123
+ using LayoutB = cutlass::layout::ColumnMajor;
1124
+ using ElementC = int32_t;
1125
+ using LayoutC = cutlass::layout::ColumnMajor;
1126
+
1127
+ cutlass::gemm::GemmCoord problem_size(256, 256, 192);
1128
+
1129
+ using ThreadBlockShape = cutlass::gemm::GemmShape<128, 128, 64>;
1130
+ using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>;
1131
+ using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>;
1132
+
1133
+ float alpha = 1.f;
1134
+ float beta = 0.f;
1135
+
1136
+ // Define the MmaCore components
1137
+ using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
1138
+ ThreadBlockShape, WarpShape, InstructionShape, ElementA, LayoutA,
1139
+ ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp>;
1140
+
1141
+ dim3 grid(2, 2);
1142
+ dim3 block(32, 4, 1);
1143
+
1144
+ test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
1145
+ problem_size.k(), alpha, beta)
1146
+ .run(grid, block);
1147
+ }
1148
+
1149
+ ////////////////////////////////////////////////////////////////////////////////
1150
+
1151
+ TEST(SM75_gemm_threadblock_crosswise,
1152
+ multicta_512x256x768_256x128x64_64x64x64_8x8x16) {
1153
+ using ElementA = uint8_t;
1154
+ using LayoutA = cutlass::layout::RowMajor;
1155
+ using ElementB = uint8_t;
1156
+ using LayoutB = cutlass::layout::ColumnMajor;
1157
+ using ElementC = int32_t;
1158
+ using LayoutC = cutlass::layout::ColumnMajor;
1159
+
1160
+ cutlass::gemm::GemmCoord problem_size(512, 256, 768);
1161
+
1162
+ using ThreadBlockShape = cutlass::gemm::GemmShape<256, 128, 64>;
1163
+ using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>;
1164
+ using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>;
1165
+
1166
+ float alpha = 1.f;
1167
+ float beta = 0.f;
1168
+
1169
+ // Define the MmaCore components
1170
+ using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
1171
+ ThreadBlockShape, WarpShape, InstructionShape, ElementA, LayoutA,
1172
+ ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp>;
1173
+
1174
+ dim3 grid(2, 2);
1175
+ dim3 block(32, 8, 1);
1176
+
1177
+ test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
1178
+ problem_size.k(), alpha, beta)
1179
+ .run(grid, block);
1180
+ }
1181
+
1182
+ ////////////////////////////////////////////////////////////////////////////////
1183
+
1184
+ TEST(SM75_gemm_threadblock_crosswise, tensor_op_64x64x128_64x64x128_8x8x32) {
1185
+ using ElementA = cutlass::uint4b_t;
1186
+ using LayoutA = cutlass::layout::RowMajor;
1187
+ using ElementB = cutlass::uint4b_t;
1188
+ using LayoutB = cutlass::layout::ColumnMajor;
1189
+ using ElementC = int32_t;
1190
+ using LayoutC = cutlass::layout::ColumnMajor;
1191
+
1192
+ cutlass::gemm::GemmCoord problem_size(64, 64, 512);
1193
+
1194
+ using ThreadBlockShape = cutlass::gemm::GemmShape<64, 64, 128>;
1195
+ using WarpShape = cutlass::gemm::GemmShape<64, 64, 128>;
1196
+ using InstructionShape = cutlass::gemm::GemmShape<8, 8, 32>;
1197
+
1198
+ float alpha = 1.f;
1199
+ float beta = 0.f;
1200
+
1201
+ // Define the MmaCore components
1202
+ using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
1203
+ ThreadBlockShape, WarpShape, InstructionShape, ElementA, LayoutA,
1204
+ ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp>;
1205
+
1206
+ dim3 grid(1, 1);
1207
+ dim3 block(32, 1, 1);
1208
+
1209
+ test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
1210
+ problem_size.k(), alpha, beta)
1211
+ .run(grid, block);
1212
+ }
1213
+
1214
+ ////////////////////////////////////////////////////////////////////////////////
1215
+
1216
+ TEST(SM75_gemm_threadblock_crosswise, tensor_op_32x32x128_16x16x128_8x8x32) {
1217
+ using ElementA = cutlass::uint4b_t;
1218
+ using LayoutA = cutlass::layout::RowMajor;
1219
+ using ElementB = cutlass::uint4b_t;
1220
+ using LayoutB = cutlass::layout::ColumnMajor;
1221
+ using ElementC = int32_t;
1222
+ using LayoutC = cutlass::layout::ColumnMajor;
1223
+
1224
+ cutlass::gemm::GemmCoord problem_size(32, 32, 512);
1225
+
1226
+ using ThreadBlockShape = cutlass::gemm::GemmShape<32, 32, 128>;
1227
+ using WarpShape = cutlass::gemm::GemmShape<16, 16, 128>;
1228
+ using InstructionShape = cutlass::gemm::GemmShape<8, 8, 32>;
1229
+
1230
+ float alpha = 1.f;
1231
+ float beta = 0.f;
1232
+
1233
+ // Define the MmaCore components
1234
+ using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
1235
+ ThreadBlockShape, WarpShape, InstructionShape, ElementA, LayoutA,
1236
+ ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp>;
1237
+
1238
+ dim3 grid(1, 1);
1239
+ dim3 block(32, 4, 1);
1240
+
1241
+ test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
1242
+ problem_size.k(), alpha, beta)
1243
+ .run(grid, block);
1244
+ }
1245
+
1246
+ ////////////////////////////////////////////////////////////////////////////////
1247
+
1248
+ TEST(SM75_gemm_threadblock_crosswise, tensor_op_64x32x128_32x16x128_8x8x32) {
1249
+ using ElementA = cutlass::uint4b_t;
1250
+ using LayoutA = cutlass::layout::RowMajor;
1251
+ using ElementB = cutlass::uint4b_t;
1252
+ using LayoutB = cutlass::layout::ColumnMajor;
1253
+ using ElementC = int32_t;
1254
+ using LayoutC = cutlass::layout::ColumnMajor;
1255
+
1256
+ cutlass::gemm::GemmCoord problem_size(64, 32, 512);
1257
+
1258
+ using ThreadBlockShape = cutlass::gemm::GemmShape<64, 32, 128>;
1259
+ using WarpShape = cutlass::gemm::GemmShape<32, 16, 128>;
1260
+ using InstructionShape = cutlass::gemm::GemmShape<8, 8, 32>;
1261
+
1262
+ float alpha = 1.f;
1263
+ float beta = 0.f;
1264
+
1265
+ // Define the MmaCore components
1266
+ using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
1267
+ ThreadBlockShape, WarpShape, InstructionShape, ElementA, LayoutA,
1268
+ ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp>;
1269
+
1270
+ dim3 grid(1, 1);
1271
+ dim3 block(32, 4, 1);
1272
+
1273
+ test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
1274
+ problem_size.k(), alpha, beta)
1275
+ .run(grid, block);
1276
+ }
1277
+
1278
+ ////////////////////////////////////////////////////////////////////////////////
1279
+
1280
+ TEST(SM75_gemm_threadblock_crosswise, tensor_op_32x64x128_16x32x128_8x8x32) {
1281
+ using ElementA = cutlass::uint4b_t;
1282
+ using LayoutA = cutlass::layout::RowMajor;
1283
+ using ElementB = cutlass::uint4b_t;
1284
+ using LayoutB = cutlass::layout::ColumnMajor;
1285
+ using ElementC = int32_t;
1286
+ using LayoutC = cutlass::layout::ColumnMajor;
1287
+
1288
+ cutlass::gemm::GemmCoord problem_size(32, 64, 512);
1289
+
1290
+ using ThreadBlockShape = cutlass::gemm::GemmShape<32, 64, 128>;
1291
+ using WarpShape = cutlass::gemm::GemmShape<16, 32, 128>;
1292
+ using InstructionShape = cutlass::gemm::GemmShape<8, 8, 32>;
1293
+
1294
+ float alpha = 1.f;
1295
+ float beta = 0.f;
1296
+
1297
+ // Define the MmaCore components
1298
+ using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
1299
+ ThreadBlockShape, WarpShape, InstructionShape, ElementA, LayoutA,
1300
+ ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp>;
1301
+
1302
+ dim3 grid(1, 1);
1303
+ dim3 block(32, 4, 1);
1304
+
1305
+ test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
1306
+ problem_size.k(), alpha, beta)
1307
+ .run(grid, block);
1308
+ }
1309
+
1310
+ ////////////////////////////////////////////////////////////////////////////////
1311
+
1312
+ TEST(SM75_gemm_threadblock_crosswise, tensor_op_64x64x128_32x32x128_8x8x32) {
1313
+ using ElementA = cutlass::uint4b_t;
1314
+ using LayoutA = cutlass::layout::RowMajor;
1315
+ using ElementB = cutlass::uint4b_t;
1316
+ using LayoutB = cutlass::layout::ColumnMajor;
1317
+ using ElementC = int32_t;
1318
+ using LayoutC = cutlass::layout::ColumnMajor;
1319
+
1320
+ cutlass::gemm::GemmCoord problem_size(64, 64, 512);
1321
+
1322
+ using ThreadBlockShape = cutlass::gemm::GemmShape<64, 64, 128>;
1323
+ using WarpShape = cutlass::gemm::GemmShape<32, 32, 128>;
1324
+ using InstructionShape = cutlass::gemm::GemmShape<8, 8, 32>;
1325
+
1326
+ float alpha = 1.f;
1327
+ float beta = 0.f;
1328
+
1329
+ // Define the MmaCore components
1330
+ using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
1331
+ ThreadBlockShape, WarpShape, InstructionShape, ElementA, LayoutA,
1332
+ ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp>;
1333
+
1334
+ dim3 grid(1, 1);
1335
+ dim3 block(32, 4, 1);
1336
+
1337
+ test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
1338
+ problem_size.k(), alpha, beta)
1339
+ .run(grid, block);
1340
+ }
1341
+
1342
+ ////////////////////////////////////////////////////////////////////////////////
1343
+
1344
+ TEST(SM75_gemm_threadblock_crosswise, tensor_op_128x64x128_64x32x128_8x8x32) {
1345
+ using ElementA = cutlass::uint4b_t;
1346
+ using LayoutA = cutlass::layout::RowMajor;
1347
+ using ElementB = cutlass::uint4b_t;
1348
+ using LayoutB = cutlass::layout::ColumnMajor;
1349
+ using ElementC = int32_t;
1350
+ using LayoutC = cutlass::layout::ColumnMajor;
1351
+
1352
+ cutlass::gemm::GemmCoord problem_size(128, 64, 512);
1353
+
1354
+ using ThreadBlockShape = cutlass::gemm::GemmShape<128, 64, 128>;
1355
+ using WarpShape = cutlass::gemm::GemmShape<64, 32, 128>;
1356
+ using InstructionShape = cutlass::gemm::GemmShape<8, 8, 32>;
1357
+
1358
+ float alpha = 1.f;
1359
+ float beta = 0.f;
1360
+
1361
+ // Define the MmaCore component
1362
+ using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
1363
+ ThreadBlockShape, WarpShape, InstructionShape, ElementA, LayoutA,
1364
+ ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp>;
1365
+
1366
+ dim3 grid(1, 1);
1367
+ dim3 block(32, 4, 1);
1368
+
1369
+ test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
1370
+ problem_size.k(), alpha, beta)
1371
+ .run(grid, block);
1372
+ }
1373
+
1374
+ ////////////////////////////////////////////////////////////////////////////////
1375
+
1376
+ TEST(SM75_gemm_threadblock_crosswise, tensor_op_64x128x128_32x64x128_8x8x32) {
1377
+ using ElementA = cutlass::uint4b_t;
1378
+ using LayoutA = cutlass::layout::RowMajor;
1379
+ using ElementB = cutlass::uint4b_t;
1380
+ using LayoutB = cutlass::layout::ColumnMajor;
1381
+ using ElementC = int32_t;
1382
+ using LayoutC = cutlass::layout::ColumnMajor;
1383
+
1384
+ cutlass::gemm::GemmCoord problem_size(64, 128, 512);
1385
+
1386
+ using ThreadBlockShape = cutlass::gemm::GemmShape<64, 128, 128>;
1387
+ using WarpShape = cutlass::gemm::GemmShape<32, 64, 128>;
1388
+ using InstructionShape = cutlass::gemm::GemmShape<8, 8, 32>;
1389
+
1390
+ float alpha = 1.f;
1391
+ float beta = 0.f;
1392
+
1393
+ // Define the MmaCore components
1394
+ using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
1395
+ ThreadBlockShape, WarpShape, InstructionShape, ElementA, LayoutA,
1396
+ ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp>;
1397
+
1398
+ dim3 grid(1, 1);
1399
+ dim3 block(32, 4, 1);
1400
+
1401
+ test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
1402
+ problem_size.k(), alpha, beta)
1403
+ .run(grid, block);
1404
+ }
1405
+
1406
+ ////////////////////////////////////////////////////////////////////////////////
1407
+
1408
+ TEST(SM75_gemm_threadblock_crosswise, tensor_op_128x128x128_64x64x128_8x8x32) {
1409
+ using ElementA = cutlass::uint4b_t;
1410
+ using LayoutA = cutlass::layout::RowMajor;
1411
+ using ElementB = cutlass::uint4b_t;
1412
+ using LayoutB = cutlass::layout::ColumnMajor;
1413
+ using ElementC = int32_t;
1414
+ using LayoutC = cutlass::layout::ColumnMajor;
1415
+
1416
+ cutlass::gemm::GemmCoord problem_size(128, 128, 512);
1417
+
1418
+ using ThreadBlockShape = cutlass::gemm::GemmShape<128, 128, 128>;
1419
+ using WarpShape = cutlass::gemm::GemmShape<64, 64, 128>;
1420
+ using InstructionShape = cutlass::gemm::GemmShape<8, 8, 32>;
1421
+
1422
+ float alpha = 1.f;
1423
+ float beta = 0.f;
1424
+
1425
+ // Define the MmaCore components
1426
+ using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
1427
+ ThreadBlockShape, WarpShape, InstructionShape, ElementA, LayoutA,
1428
+ ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp>;
1429
+
1430
+ dim3 grid(1, 1);
1431
+ dim3 block(32, 4, 1);
1432
+
1433
+ test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
1434
+ problem_size.k(), alpha, beta)
1435
+ .run(grid, block);
1436
+ }
1437
+
1438
+ ////////////////////////////////////////////////////////////////////////////////
1439
+
1440
+ TEST(SM75_gemm_threadblock_crosswise,
1441
+ multicta_256x256x384_128x128x128_64x64x128_8x8x32) {
1442
+ using ElementA = cutlass::uint4b_t;
1443
+ using LayoutA = cutlass::layout::RowMajor;
1444
+ using ElementB = cutlass::uint4b_t;
1445
+ using LayoutB = cutlass::layout::ColumnMajor;
1446
+ using ElementC = int32_t;
1447
+ using LayoutC = cutlass::layout::ColumnMajor;
1448
+
1449
+ cutlass::gemm::GemmCoord problem_size(256, 256, 384);
1450
+
1451
+ using ThreadBlockShape = cutlass::gemm::GemmShape<128, 128, 128>;
1452
+ using WarpShape = cutlass::gemm::GemmShape<64, 64, 128>;
1453
+ using InstructionShape = cutlass::gemm::GemmShape<8, 8, 32>;
1454
+
1455
+ float alpha = 1.f;
1456
+ float beta = 0.f;
1457
+
1458
+ // Define the MmaCore components
1459
+ using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
1460
+ ThreadBlockShape, WarpShape, InstructionShape, ElementA, LayoutA,
1461
+ ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp>;
1462
+
1463
+ dim3 grid(2, 2);
1464
+ dim3 block(32, 4, 1);
1465
+
1466
+ test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
1467
+ problem_size.k(), alpha, beta)
1468
+ .run(grid, block);
1469
+ }
1470
+
1471
+ ////////////////////////////////////////////////////////////////////////////////
1472
+
1473
+ TEST(SM75_gemm_threadblock_crosswise,
1474
+ multicta_512x256x1536_256x128x128_64x64x128_8x8x32) {
1475
+ using ElementA = cutlass::uint4b_t;
1476
+ using LayoutA = cutlass::layout::RowMajor;
1477
+ using ElementB = cutlass::uint4b_t;
1478
+ using LayoutB = cutlass::layout::ColumnMajor;
1479
+ using ElementC = int32_t;
1480
+ using LayoutC = cutlass::layout::ColumnMajor;
1481
+
1482
+ cutlass::gemm::GemmCoord problem_size(512, 256, 1536);
1483
+
1484
+ using ThreadBlockShape = cutlass::gemm::GemmShape<256, 128, 128>;
1485
+ using WarpShape = cutlass::gemm::GemmShape<64, 64, 128>;
1486
+ using InstructionShape = cutlass::gemm::GemmShape<8, 8, 32>;
1487
+
1488
+ float alpha = 1.f;
1489
+ float beta = 0.f;
1490
+
1491
+ // Define the MmaCore components
1492
+ using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
1493
+ ThreadBlockShape, WarpShape, InstructionShape, ElementA, LayoutA,
1494
+ ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp>;
1495
+
1496
+ dim3 grid(2, 2);
1497
+ dim3 block(32, 8, 1);
1498
+
1499
+ test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
1500
+ problem_size.k(), alpha, beta)
1501
+ .run(grid, block);
1502
+ }
1503
+
1504
+ ////////////////////////////////////////////////////////////////////////////////
1505
+
1506
+ TEST(SM75_gemm_threadblock_interleaved, tensor_op_32x32x128_16x16x128_8x8x32) {
1507
+ using ElementA = cutlass::uint4b_t;
1508
+ using LayoutA = cutlass::layout::ColumnMajorInterleaved<64>;
1509
+ using ElementB = cutlass::uint4b_t;
1510
+ using LayoutB = cutlass::layout::RowMajorInterleaved<64>;
1511
+ using ElementC = int32_t;
1512
+ using LayoutC = cutlass::layout::ColumnMajor;
1513
+
1514
+ cutlass::gemm::GemmCoord problem_size(32, 32, 512);
1515
+
1516
+ using ThreadBlockShape = cutlass::gemm::GemmShape<32, 32, 128>;
1517
+ using WarpShape = cutlass::gemm::GemmShape<16, 16, 128>;
1518
+ using InstructionShape = cutlass::gemm::GemmShape<8, 8, 32>;
1519
+
1520
+ float alpha = 1.f;
1521
+ float beta = 0.f;
1522
+
1523
+ // Define the MmaCore components
1524
+ using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
1525
+ ThreadBlockShape, WarpShape, InstructionShape, ElementA, LayoutA,
1526
+ ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp>;
1527
+
1528
+ dim3 grid(1, 1);
1529
+ dim3 block(32, 4, 1);
1530
+
1531
+ test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
1532
+ problem_size.k(), alpha, beta)
1533
+ .run(grid, block);
1534
+ }
1535
+
1536
+ ////////////////////////////////////////////////////////////////////////////////
1537
+
1538
+ TEST(SM75_gemm_threadblock_interleaved, tensor_op_64x32x128_32x16x128_8x8x32) {
1539
+ using ElementA = cutlass::uint4b_t;
1540
+ using LayoutA = cutlass::layout::ColumnMajorInterleaved<64>;
1541
+ using ElementB = cutlass::uint4b_t;
1542
+ using LayoutB = cutlass::layout::RowMajorInterleaved<64>;
1543
+ using ElementC = int32_t;
1544
+ using LayoutC = cutlass::layout::ColumnMajor;
1545
+
1546
+ cutlass::gemm::GemmCoord problem_size(64, 32, 512);
1547
+
1548
+ using ThreadBlockShape = cutlass::gemm::GemmShape<64, 32, 128>;
1549
+ using WarpShape = cutlass::gemm::GemmShape<32, 16, 128>;
1550
+ using InstructionShape = cutlass::gemm::GemmShape<8, 8, 32>;
1551
+
1552
+ float alpha = 1.f;
1553
+ float beta = 0.f;
1554
+
1555
+ // Define the MmaCore components
1556
+ using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
1557
+ ThreadBlockShape, WarpShape, InstructionShape, ElementA, LayoutA,
1558
+ ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp>;
1559
+
1560
+ dim3 grid(1, 1);
1561
+ dim3 block(32, 4, 1);
1562
+
1563
+ test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
1564
+ problem_size.k(), alpha, beta)
1565
+ .run(grid, block);
1566
+ }
1567
+
1568
+ ////////////////////////////////////////////////////////////////////////////////
1569
+
1570
+ TEST(SM75_gemm_threadblock_interleaved, tensor_op_32x64x128_16x32x128_8x8x32) {
1571
+ using ElementA = cutlass::uint4b_t;
1572
+ using LayoutA = cutlass::layout::ColumnMajorInterleaved<64>;
1573
+ using ElementB = cutlass::uint4b_t;
1574
+ using LayoutB = cutlass::layout::RowMajorInterleaved<64>;
1575
+ using ElementC = int32_t;
1576
+ using LayoutC = cutlass::layout::ColumnMajor;
1577
+
1578
+ cutlass::gemm::GemmCoord problem_size(32, 64, 512);
1579
+
1580
+ using ThreadBlockShape = cutlass::gemm::GemmShape<32, 64, 128>;
1581
+ using WarpShape = cutlass::gemm::GemmShape<16, 32, 128>;
1582
+ using InstructionShape = cutlass::gemm::GemmShape<8, 8, 32>;
1583
+
1584
+ float alpha = 1.f;
1585
+ float beta = 0.f;
1586
+
1587
+ // Define the MmaCore components
1588
+ using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
1589
+ ThreadBlockShape, WarpShape, InstructionShape, ElementA, LayoutA,
1590
+ ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp>;
1591
+
1592
+ dim3 grid(1, 1);
1593
+ dim3 block(32, 4, 1);
1594
+
1595
+ test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
1596
+ problem_size.k(), alpha, beta)
1597
+ .run(grid, block);
1598
+ }
1599
+
1600
+ ////////////////////////////////////////////////////////////////////////////////
1601
+
1602
+ TEST(SM75_gemm_threadblock_interleaved, tensor_op_64x64x128_32x32x128_8x8x32) {
1603
+ using ElementA = cutlass::uint4b_t;
1604
+ using LayoutA = cutlass::layout::ColumnMajorInterleaved<64>;
1605
+ using ElementB = cutlass::uint4b_t;
1606
+ using LayoutB = cutlass::layout::RowMajorInterleaved<64>;
1607
+ using ElementC = int32_t;
1608
+ using LayoutC = cutlass::layout::ColumnMajor;
1609
+
1610
+ cutlass::gemm::GemmCoord problem_size(64, 64, 512);
1611
+
1612
+ using ThreadBlockShape = cutlass::gemm::GemmShape<64, 64, 128>;
1613
+ using WarpShape = cutlass::gemm::GemmShape<32, 32, 128>;
1614
+ using InstructionShape = cutlass::gemm::GemmShape<8, 8, 32>;
1615
+
1616
+ float alpha = 1.f;
1617
+ float beta = 0.f;
1618
+
1619
+ // Define the MmaCore components
1620
+ using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
1621
+ ThreadBlockShape, WarpShape, InstructionShape, ElementA, LayoutA,
1622
+ ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp>;
1623
+
1624
+ dim3 grid(1, 1);
1625
+ dim3 block(32, 4, 1);
1626
+
1627
+ test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
1628
+ problem_size.k(), alpha, beta)
1629
+ .run(grid, block);
1630
+ }
1631
+
1632
+ ////////////////////////////////////////////////////////////////////////////////
1633
+
1634
+ TEST(SM75_gemm_threadblock_interleaved, tensor_op_128x64x128_64x32x128_8x8x32) {
1635
+ using ElementA = cutlass::uint4b_t;
1636
+ using LayoutA = cutlass::layout::ColumnMajorInterleaved<64>;
1637
+ using ElementB = cutlass::uint4b_t;
1638
+ using LayoutB = cutlass::layout::RowMajorInterleaved<64>;
1639
+ using ElementC = int32_t;
1640
+ using LayoutC = cutlass::layout::ColumnMajor;
1641
+
1642
+ cutlass::gemm::GemmCoord problem_size(128, 64, 512);
1643
+
1644
+ using ThreadBlockShape = cutlass::gemm::GemmShape<128, 64, 128>;
1645
+ using WarpShape = cutlass::gemm::GemmShape<64, 32, 128>;
1646
+ using InstructionShape = cutlass::gemm::GemmShape<8, 8, 32>;
1647
+
1648
+ float alpha = 1.f;
1649
+ float beta = 0.f;
1650
+
1651
+ // Define the MmaCore component
1652
+ using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
1653
+ ThreadBlockShape, WarpShape, InstructionShape, ElementA, LayoutA,
1654
+ ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp>;
1655
+
1656
+ dim3 grid(1, 1);
1657
+ dim3 block(32, 4, 1);
1658
+
1659
+ test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
1660
+ problem_size.k(), alpha, beta)
1661
+ .run(grid, block);
1662
+ }
1663
+
1664
+ ////////////////////////////////////////////////////////////////////////////////
1665
+
1666
+ TEST(SM75_gemm_threadblock_interleaved, tensor_op_64x128x128_32x64x128_8x8x32) {
1667
+ using ElementA = cutlass::uint4b_t;
1668
+ using LayoutA = cutlass::layout::ColumnMajorInterleaved<64>;
1669
+ using ElementB = cutlass::uint4b_t;
1670
+ using LayoutB = cutlass::layout::RowMajorInterleaved<64>;
1671
+ using ElementC = int32_t;
1672
+ using LayoutC = cutlass::layout::ColumnMajor;
1673
+
1674
+ cutlass::gemm::GemmCoord problem_size(64, 128, 512);
1675
+
1676
+ using ThreadBlockShape = cutlass::gemm::GemmShape<64, 128, 128>;
1677
+ using WarpShape = cutlass::gemm::GemmShape<32, 64, 128>;
1678
+ using InstructionShape = cutlass::gemm::GemmShape<8, 8, 32>;
1679
+
1680
+ float alpha = 1.f;
1681
+ float beta = 0.f;
1682
+
1683
+ // Define the MmaCore components
1684
+ using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
1685
+ ThreadBlockShape, WarpShape, InstructionShape, ElementA, LayoutA,
1686
+ ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp>;
1687
+
1688
+ dim3 grid(1, 1);
1689
+ dim3 block(32, 4, 1);
1690
+
1691
+ test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
1692
+ problem_size.k(), alpha, beta)
1693
+ .run(grid, block);
1694
+ }
1695
+
1696
+ ////////////////////////////////////////////////////////////////////////////////
1697
+
1698
+ TEST(SM75_gemm_threadblock_interleaved, tensor_op_128x128x128_64x64x128_8x8x32) {
1699
+ using ElementA = cutlass::uint4b_t;
1700
+ using LayoutA = cutlass::layout::ColumnMajorInterleaved<64>;
1701
+ using ElementB = cutlass::uint4b_t;
1702
+ using LayoutB = cutlass::layout::RowMajorInterleaved<64>;
1703
+ using ElementC = int32_t;
1704
+ using LayoutC = cutlass::layout::ColumnMajor;
1705
+
1706
+ cutlass::gemm::GemmCoord problem_size(128, 128, 512);
1707
+
1708
+ using ThreadBlockShape = cutlass::gemm::GemmShape<128, 128, 128>;
1709
+ using WarpShape = cutlass::gemm::GemmShape<64, 64, 128>;
1710
+ using InstructionShape = cutlass::gemm::GemmShape<8, 8, 32>;
1711
+
1712
+ float alpha = 1.f;
1713
+ float beta = 0.f;
1714
+
1715
+ // Define the MmaCore components
1716
+ using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
1717
+ ThreadBlockShape, WarpShape, InstructionShape, ElementA, LayoutA,
1718
+ ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp>;
1719
+
1720
+ dim3 grid(1, 1);
1721
+ dim3 block(32, 4, 1);
1722
+
1723
+ test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
1724
+ problem_size.k(), alpha, beta)
1725
+ .run(grid, block);
1726
+ }
1727
+
1728
+ ////////////////////////////////////////////////////////////////////////////////
1729
+
1730
+ TEST(SM75_gemm_threadblock_interleaved,
1731
+ multicta_256x256x384_128x128x128_64x64x128_8x8x32) {
1732
+ using ElementA = cutlass::uint4b_t;
1733
+ using LayoutA = cutlass::layout::ColumnMajorInterleaved<64>;
1734
+ using ElementB = cutlass::uint4b_t;
1735
+ using LayoutB = cutlass::layout::RowMajorInterleaved<64>;
1736
+ using ElementC = int32_t;
1737
+ using LayoutC = cutlass::layout::ColumnMajor;
1738
+
1739
+ cutlass::gemm::GemmCoord problem_size(256, 256, 384);
1740
+
1741
+ using ThreadBlockShape = cutlass::gemm::GemmShape<128, 128, 128>;
1742
+ using WarpShape = cutlass::gemm::GemmShape<64, 64, 128>;
1743
+ using InstructionShape = cutlass::gemm::GemmShape<8, 8, 32>;
1744
+
1745
+ float alpha = 1.f;
1746
+ float beta = 0.f;
1747
+
1748
+ // Define the MmaCore components
1749
+ using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
1750
+ ThreadBlockShape, WarpShape, InstructionShape, ElementA, LayoutA,
1751
+ ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp>;
1752
+
1753
+ dim3 grid(2, 2);
1754
+ dim3 block(32, 4, 1);
1755
+
1756
+ test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
1757
+ problem_size.k(), alpha, beta)
1758
+ .run(grid, block);
1759
+ }
1760
+
1761
+ ////////////////////////////////////////////////////////////////////////////////
1762
+
1763
+ TEST(SM75_gemm_threadblock_interleaved,
1764
+ multicta_512x256x1536_256x128x128_64x64x128_8x8x32) {
1765
+ using ElementA = cutlass::uint4b_t;
1766
+ using LayoutA = cutlass::layout::ColumnMajorInterleaved<64>;
1767
+ using ElementB = cutlass::uint4b_t;
1768
+ using LayoutB = cutlass::layout::RowMajorInterleaved<64>;
1769
+ using ElementC = int32_t;
1770
+ using LayoutC = cutlass::layout::ColumnMajor;
1771
+
1772
+ cutlass::gemm::GemmCoord problem_size(512, 256, 1536);
1773
+
1774
+ using ThreadBlockShape = cutlass::gemm::GemmShape<256, 128, 128>;
1775
+ using WarpShape = cutlass::gemm::GemmShape<64, 64, 128>;
1776
+ using InstructionShape = cutlass::gemm::GemmShape<8, 8, 32>;
1777
+
1778
+ float alpha = 1.f;
1779
+ float beta = 0.f;
1780
+
1781
+ // Define the MmaCore components
1782
+ using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
1783
+ ThreadBlockShape, WarpShape, InstructionShape, ElementA, LayoutA,
1784
+ ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp>;
1785
+
1786
+ dim3 grid(2, 2);
1787
+ dim3 block(32, 8, 1);
1788
+
1789
+ test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
1790
+ problem_size.k(), alpha, beta)
1791
+ .run(grid, block);
1792
+ }
1793
+
1794
+ ////////////////////////////////////////////////////////////////////////////////
1795
+
1796
+ TEST(SM75_gemm_threadblock_crosswise, tensor_op_64x64x512_64x64x512_8x8x128) {
1797
+ using ElementA = cutlass::uint1b_t;
1798
+ using LayoutA = cutlass::layout::RowMajor;
1799
+ using ElementB = cutlass::uint1b_t;
1800
+ using LayoutB = cutlass::layout::ColumnMajor;
1801
+ using ElementC = int32_t;
1802
+ using LayoutC = cutlass::layout::ColumnMajor;
1803
+
1804
+ cutlass::gemm::GemmCoord problem_size(64, 64, 2048);
1805
+
1806
+ using ThreadBlockShape = cutlass::gemm::GemmShape<64, 64, 512>;
1807
+ using WarpShape = cutlass::gemm::GemmShape<64, 64, 512>;
1808
+ using InstructionShape = cutlass::gemm::GemmShape<8, 8, 128>;
1809
+
1810
+ float alpha = 1.f;
1811
+ float beta = 0.f;
1812
+
1813
+ // Define the MmaCore components
1814
+ using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
1815
+ ThreadBlockShape, WarpShape, InstructionShape, ElementA, LayoutA,
1816
+ ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, 2,
1817
+ cutlass::arch::OpXorPopc>;
1818
+
1819
+ dim3 grid(1, 1);
1820
+ dim3 block(32, 1, 1);
1821
+
1822
+ test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
1823
+ problem_size.k(), alpha, beta)
1824
+ .run(grid, block);
1825
+ }
1826
+
1827
+ ////////////////////////////////////////////////////////////////////////////////
1828
+
1829
+ TEST(SM75_gemm_threadblock_crosswise, tensor_op_32x32x512_16x16x512_8x8x128) {
1830
+ using ElementA = cutlass::uint1b_t;
1831
+ using LayoutA = cutlass::layout::RowMajor;
1832
+ using ElementB = cutlass::uint1b_t;
1833
+ using LayoutB = cutlass::layout::ColumnMajor;
1834
+ using ElementC = int32_t;
1835
+ using LayoutC = cutlass::layout::ColumnMajor;
1836
+
1837
+ cutlass::gemm::GemmCoord problem_size(32, 32, 2048);
1838
+
1839
+ using ThreadBlockShape = cutlass::gemm::GemmShape<32, 32, 512>;
1840
+ using WarpShape = cutlass::gemm::GemmShape<16, 16, 512>;
1841
+ using InstructionShape = cutlass::gemm::GemmShape<8, 8, 128>;
1842
+
1843
+ float alpha = 1.f;
1844
+ float beta = 0.f;
1845
+
1846
+ // Define the MmaCore components
1847
+ using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
1848
+ ThreadBlockShape, WarpShape, InstructionShape, ElementA, LayoutA,
1849
+ ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, 2,
1850
+ cutlass::arch::OpXorPopc>;
1851
+
1852
+ dim3 grid(1, 1);
1853
+ dim3 block(32, 4, 1);
1854
+
1855
+ test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
1856
+ problem_size.k(), alpha, beta)
1857
+ .run(grid, block);
1858
+ }
1859
+
1860
+ ////////////////////////////////////////////////////////////////////////////////
1861
+
1862
+ TEST(SM75_gemm_threadblock_crosswise, tensor_op_64x32x512_32x16x512_8x8x128) {
1863
+ using ElementA = cutlass::uint1b_t;
1864
+ using LayoutA = cutlass::layout::RowMajor;
1865
+ using ElementB = cutlass::uint1b_t;
1866
+ using LayoutB = cutlass::layout::ColumnMajor;
1867
+ using ElementC = int32_t;
1868
+ using LayoutC = cutlass::layout::ColumnMajor;
1869
+
1870
+ cutlass::gemm::GemmCoord problem_size(64, 32, 2048);
1871
+
1872
+ using ThreadBlockShape = cutlass::gemm::GemmShape<64, 32, 512>;
1873
+ using WarpShape = cutlass::gemm::GemmShape<32, 16, 512>;
1874
+ using InstructionShape = cutlass::gemm::GemmShape<8, 8, 128>;
1875
+
1876
+ float alpha = 1.f;
1877
+ float beta = 0.f;
1878
+
1879
+ // Define the MmaCore components
1880
+ using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
1881
+ ThreadBlockShape, WarpShape, InstructionShape, ElementA, LayoutA,
1882
+ ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, 2,
1883
+ cutlass::arch::OpXorPopc>;
1884
+
1885
+ dim3 grid(1, 1);
1886
+ dim3 block(32, 4, 1);
1887
+
1888
+ test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
1889
+ problem_size.k(), alpha, beta)
1890
+ .run(grid, block);
1891
+ }
1892
+
1893
+ ////////////////////////////////////////////////////////////////////////////////
1894
+
1895
+ TEST(SM75_gemm_threadblock_crosswise, tensor_op_32x64x512_16x32x512_8x8x128) {
1896
+ using ElementA = cutlass::uint1b_t;
1897
+ using LayoutA = cutlass::layout::RowMajor;
1898
+ using ElementB = cutlass::uint1b_t;
1899
+ using LayoutB = cutlass::layout::ColumnMajor;
1900
+ using ElementC = int32_t;
1901
+ using LayoutC = cutlass::layout::ColumnMajor;
1902
+
1903
+ cutlass::gemm::GemmCoord problem_size(32, 64, 2048);
1904
+
1905
+ using ThreadBlockShape = cutlass::gemm::GemmShape<32, 64, 512>;
1906
+ using WarpShape = cutlass::gemm::GemmShape<16, 32, 512>;
1907
+ using InstructionShape = cutlass::gemm::GemmShape<8, 8, 128>;
1908
+
1909
+ float alpha = 1.f;
1910
+ float beta = 0.f;
1911
+
1912
+ // Define the MmaCore components
1913
+ using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
1914
+ ThreadBlockShape, WarpShape, InstructionShape, ElementA, LayoutA,
1915
+ ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, 2,
1916
+ cutlass::arch::OpXorPopc>;
1917
+
1918
+ dim3 grid(1, 1);
1919
+ dim3 block(32, 4, 1);
1920
+
1921
+ test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
1922
+ problem_size.k(), alpha, beta)
1923
+ .run(grid, block);
1924
+ }
1925
+
1926
+ ////////////////////////////////////////////////////////////////////////////////
1927
+
1928
+ TEST(SM75_gemm_threadblock_crosswise, tensor_op_64x64x512_32x32x512_8x8x128) {
1929
+ using ElementA = cutlass::uint1b_t;
1930
+ using LayoutA = cutlass::layout::RowMajor;
1931
+ using ElementB = cutlass::uint1b_t;
1932
+ using LayoutB = cutlass::layout::ColumnMajor;
1933
+ using ElementC = int32_t;
1934
+ using LayoutC = cutlass::layout::ColumnMajor;
1935
+
1936
+ cutlass::gemm::GemmCoord problem_size(64, 64, 2048);
1937
+
1938
+ using ThreadBlockShape = cutlass::gemm::GemmShape<64, 64, 512>;
1939
+ using WarpShape = cutlass::gemm::GemmShape<32, 32, 512>;
1940
+ using InstructionShape = cutlass::gemm::GemmShape<8, 8, 128>;
1941
+
1942
+ float alpha = 1.f;
1943
+ float beta = 0.f;
1944
+
1945
+ // Define the MmaCore components
1946
+ using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
1947
+ ThreadBlockShape, WarpShape, InstructionShape, ElementA, LayoutA,
1948
+ ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, 2,
1949
+ cutlass::arch::OpXorPopc>;
1950
+
1951
+ dim3 grid(1, 1);
1952
+ dim3 block(32, 4, 1);
1953
+
1954
+ test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
1955
+ problem_size.k(), alpha, beta)
1956
+ .run(grid, block);
1957
+ }
1958
+
1959
+ ////////////////////////////////////////////////////////////////////////////////
1960
+
1961
+ TEST(SM75_gemm_threadblock_crosswise, tensor_op_128x64x512_64x32x512_8x8x128) {
1962
+ using ElementA = cutlass::uint1b_t;
1963
+ using LayoutA = cutlass::layout::RowMajor;
1964
+ using ElementB = cutlass::uint1b_t;
1965
+ using LayoutB = cutlass::layout::ColumnMajor;
1966
+ using ElementC = int32_t;
1967
+ using LayoutC = cutlass::layout::ColumnMajor;
1968
+
1969
+ cutlass::gemm::GemmCoord problem_size(128, 64, 2048);
1970
+
1971
+ using ThreadBlockShape = cutlass::gemm::GemmShape<128, 64, 512>;
1972
+ using WarpShape = cutlass::gemm::GemmShape<64, 32, 512>;
1973
+ using InstructionShape = cutlass::gemm::GemmShape<8, 8, 128>;
1974
+
1975
+ float alpha = 1.f;
1976
+ float beta = 0.f;
1977
+
1978
+ // Define the MmaCore component
1979
+ using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
1980
+ ThreadBlockShape, WarpShape, InstructionShape, ElementA, LayoutA,
1981
+ ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, 2,
1982
+ cutlass::arch::OpXorPopc>;
1983
+
1984
+ dim3 grid(1, 1);
1985
+ dim3 block(32, 4, 1);
1986
+
1987
+ test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
1988
+ problem_size.k(), alpha, beta)
1989
+ .run(grid, block);
1990
+ }
1991
+
1992
+ ////////////////////////////////////////////////////////////////////////////////
1993
+
1994
+ TEST(SM75_gemm_threadblock_crosswise, tensor_op_64x128x512_32x64x512_8x8x128) {
1995
+ using ElementA = cutlass::uint1b_t;
1996
+ using LayoutA = cutlass::layout::RowMajor;
1997
+ using ElementB = cutlass::uint1b_t;
1998
+ using LayoutB = cutlass::layout::ColumnMajor;
1999
+ using ElementC = int32_t;
2000
+ using LayoutC = cutlass::layout::ColumnMajor;
2001
+
2002
+ cutlass::gemm::GemmCoord problem_size(64, 128, 2048);
2003
+
2004
+ using ThreadBlockShape = cutlass::gemm::GemmShape<64, 128, 512>;
2005
+ using WarpShape = cutlass::gemm::GemmShape<32, 64, 512>;
2006
+ using InstructionShape = cutlass::gemm::GemmShape<8, 8, 128>;
2007
+
2008
+ float alpha = 1.f;
2009
+ float beta = 0.f;
2010
+
2011
+ // Define the MmaCore components
2012
+ using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
2013
+ ThreadBlockShape, WarpShape, InstructionShape, ElementA, LayoutA,
2014
+ ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, 2,
2015
+ cutlass::arch::OpXorPopc>;
2016
+
2017
+ dim3 grid(1, 1);
2018
+ dim3 block(32, 4, 1);
2019
+
2020
+ test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
2021
+ problem_size.k(), alpha, beta)
2022
+ .run(grid, block);
2023
+ }
2024
+
2025
+ ////////////////////////////////////////////////////////////////////////////////
2026
+
2027
+ TEST(SM75_gemm_threadblock_crosswise, tensor_op_128x128x512_64x64x512_8x8x128) {
2028
+ using ElementA = cutlass::uint1b_t;
2029
+ using LayoutA = cutlass::layout::RowMajor;
2030
+ using ElementB = cutlass::uint1b_t;
2031
+ using LayoutB = cutlass::layout::ColumnMajor;
2032
+ using ElementC = int32_t;
2033
+ using LayoutC = cutlass::layout::ColumnMajor;
2034
+
2035
+ cutlass::gemm::GemmCoord problem_size(128, 128, 2048);
2036
+
2037
+ using ThreadBlockShape = cutlass::gemm::GemmShape<128, 128, 512>;
2038
+ using WarpShape = cutlass::gemm::GemmShape<64, 64, 512>;
2039
+ using InstructionShape = cutlass::gemm::GemmShape<8, 8, 128>;
2040
+
2041
+ float alpha = 1.f;
2042
+ float beta = 0.f;
2043
+
2044
+ // Define the MmaCore components
2045
+ using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
2046
+ ThreadBlockShape, WarpShape, InstructionShape, ElementA, LayoutA,
2047
+ ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, 2,
2048
+ cutlass::arch::OpXorPopc>;
2049
+
2050
+ dim3 grid(1, 1);
2051
+ dim3 block(32, 4, 1);
2052
+
2053
+ test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
2054
+ problem_size.k(), alpha, beta)
2055
+ .run(grid, block);
2056
+ }
2057
+
2058
+ ////////////////////////////////////////////////////////////////////////////////
2059
+
2060
+ TEST(SM75_gemm_threadblock_crosswise,
2061
+ multicta_256x256x1536_128x128x512_64x64x512_8x8x128) {
2062
+ using ElementA = cutlass::uint1b_t;
2063
+ using LayoutA = cutlass::layout::RowMajor;
2064
+ using ElementB = cutlass::uint1b_t;
2065
+ using LayoutB = cutlass::layout::ColumnMajor;
2066
+ using ElementC = int32_t;
2067
+ using LayoutC = cutlass::layout::ColumnMajor;
2068
+
2069
+ cutlass::gemm::GemmCoord problem_size(256, 256, 1536);
2070
+
2071
+ using ThreadBlockShape = cutlass::gemm::GemmShape<128, 128, 512>;
2072
+ using WarpShape = cutlass::gemm::GemmShape<64, 64, 512>;
2073
+ using InstructionShape = cutlass::gemm::GemmShape<8, 8, 128>;
2074
+
2075
+ float alpha = 1.f;
2076
+ float beta = 0.f;
2077
+
2078
+ // Define the MmaCore components
2079
+ using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
2080
+ ThreadBlockShape, WarpShape, InstructionShape, ElementA, LayoutA,
2081
+ ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, 2,
2082
+ cutlass::arch::OpXorPopc>;
2083
+
2084
+ dim3 grid(2, 2);
2085
+ dim3 block(32, 4, 1);
2086
+
2087
+ test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
2088
+ problem_size.k(), alpha, beta)
2089
+ .run(grid, block);
2090
+ }
2091
+
2092
+ ////////////////////////////////////////////////////////////////////////////////
2093
+
2094
+ TEST(SM75_gemm_threadblock_crosswise,
2095
+ multicta_512x256x6144_256x128x512_64x64x512_8x8x128) {
2096
+ using ElementA = cutlass::uint1b_t;
2097
+ using LayoutA = cutlass::layout::RowMajor;
2098
+ using ElementB = cutlass::uint1b_t;
2099
+ using LayoutB = cutlass::layout::ColumnMajor;
2100
+ using ElementC = int32_t;
2101
+ using LayoutC = cutlass::layout::ColumnMajor;
2102
+
2103
+ cutlass::gemm::GemmCoord problem_size(512, 256, 6144);
2104
+
2105
+ using ThreadBlockShape = cutlass::gemm::GemmShape<256, 128, 512>;
2106
+ using WarpShape = cutlass::gemm::GemmShape<64, 64, 512>;
2107
+ using InstructionShape = cutlass::gemm::GemmShape<8, 8, 128>;
2108
+
2109
+ float alpha = 1.f;
2110
+ float beta = 0.f;
2111
+
2112
+ // Define the MmaCore components
2113
+ using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore<
2114
+ ThreadBlockShape, WarpShape, InstructionShape, ElementA, LayoutA,
2115
+ ElementB, LayoutB, ElementC, LayoutC, cutlass::arch::OpClassTensorOp, 2,
2116
+ cutlass::arch::OpXorPopc>;
2117
+
2118
+ dim3 grid(2, 2);
2119
+ dim3 block(32, 8, 1);
2120
+
2121
+ test::gemm::threadblock::Testbed<MmaCore>(problem_size.m(), problem_size.n(),
2122
+ problem_size.k(), alpha, beta)
2123
+ .run(grid, block);
2124
+ }
2125
+
2126
+ ////////////////////////////////////////////////////////////////////////////////
2127
+
2128
+ #endif
sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/cutlass/tools/profiler/include/cutlass/profiler/conv3d_operation_profiler.h ADDED
@@ -0,0 +1,449 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ * SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice, this
9
+ * list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holder nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
+ *
30
+ **************************************************************************************************/
31
+ /* \file
32
+ \brief Defines profiling functionality for convolution
33
+
34
+ */
35
+
36
+ #pragma once
37
+
38
+ #include <vector>
39
+ #include <string>
40
+ #include <memory>
41
+ #include <algorithm>
42
+ #include <unordered_map>
43
+
44
+ // CUTLASS Library includes
45
+ #include "cutlass/library/library.h"
46
+ #include "cutlass/library/util.h"
47
+ #include "cutlass/library/handle.h"
48
+ #include "cutlass/library/manifest.h"
49
+ #include "cutlass/library/singleton.h"
50
+
51
+ // Profiler includes
52
+ #include "options.h"
53
+ #include "device_context.h"
54
+ #include "operation_profiler.h"
55
+ #include "performance_result.h"
56
+ #include "problem_space.h"
57
+ #include "reduction_operation_profiler.h"
58
+ #if CUTLASS_ENABLE_CUDNN
59
+ #include "cudnn_helpers.h"
60
+ #endif //#if CUTLASS_ENABLE_CUDNN
61
+ #include "debug.h"
62
+
63
+ /////////////////////////////////////////////////////////////////////////////////////////////////
64
+
65
+ namespace cutlass {
66
+ namespace profiler {
67
+
68
+ /////////////////////////////////////////////////////////////////////////////////////////////////
69
+
70
+ /// Abstract base class for each math function
71
+ class Conv3dOperationProfiler : public OperationProfiler {
72
+ public:
73
+
74
+ /// Problem structure obtained from problem space
75
+ struct Conv3dProblem {
76
+
77
+ int64_t n, d, h, w, c, z, p, q, k, t, r, s;
78
+ int64_t pad_d, pad_h, pad_w;
79
+ int64_t stride_d, stride_h, stride_w;
80
+ int64_t dilation_d, dilation_h, dilation_w;
81
+
82
+ std::vector<uint8_t> alpha;
83
+ std::vector<uint8_t> beta;
84
+
85
+ library::SplitKMode split_k_mode;
86
+ int64_t split_k_slices;
87
+
88
+ library::ConvModeID conv_mode;
89
+
90
+ library::Provider eq_gemm_provider;
91
+
92
+ // convolution with parallel interleaved reduction
93
+ // convolution epilogue (alpha, beta) = (1.0, 0.0)
94
+ // reduction epilogue (alpha, beta) = (Conv3dProblem::alpha, Conv3dProblem::beta)
95
+ std::vector<uint8_t> alpha_one;
96
+ std::vector<uint8_t> beta_zero;
97
+
98
+ //
99
+ // Methods
100
+ //
101
+
102
+ /// Total number of bytes loaded
103
+ int64_t bytes(library::ConvDescription const &operation_desc) const;
104
+
105
+ /// Total number of flops computed
106
+ int64_t flops(library::ConvDescription const &operation_desc) const;
107
+
108
+ /// Infers output size from the input size, padding, stride, and dilation
109
+ void set_default_output_size() {
110
+ z = ((d + pad_d - t * dilation_d) / stride_d) + 1;
111
+ p = ((h + pad_h - r * dilation_h) / stride_h) + 1;
112
+ q = ((w + pad_w - s * dilation_w) / stride_w) + 1;
113
+ }
114
+
115
+ // Returns equivalent gemm problem size for convolution
116
+ cutlass::gemm::GemmCoord eq_gemm_size(library::ConvKind const &conv_kind) const {
117
+
118
+ switch (conv_kind) {
119
+ case library::ConvKind::kFprop: return cutlass::gemm::GemmCoord(int(n * z * p * q), int(k), int(t * r * s * c));
120
+ case library::ConvKind::kDgrad: return cutlass::gemm::GemmCoord(int(n * d * h * w), int(c), int(t * r * s * k));
121
+ case library::ConvKind::kWgrad: return cutlass::gemm::GemmCoord(int(k), int(t * r * s * c), int(n * z * p * q));
122
+ default : throw std::runtime_error("Invalid Conv Operator (fprop, dgrad, wgrad)");
123
+ }
124
+ }
125
+
126
+ // Returns extent for tensor A
127
+ std::vector<int> extent_a(library::ConvKind const &conv_kind) const {
128
+
129
+ switch (conv_kind) {
130
+ case library::ConvKind::kFprop: return {int(n), int(d), int(h), int(w), int(c)};
131
+ case library::ConvKind::kDgrad: return {int(n), int(z), int(p), int(q), int(k)};
132
+ case library::ConvKind::kWgrad: return {int(n), int(z), int(p), int(q), int(k)};
133
+ default : throw std::runtime_error("Invalid Conv Operator (fprop, dgrad, wgrad)");
134
+ }
135
+ }
136
+
137
+ // Returns extent for tensor B
138
+ std::vector<int> extent_b(library::ConvKind const &conv_kind) const {
139
+
140
+ switch (conv_kind) {
141
+ case library::ConvKind::kFprop: return {int(k), int(t), int(r), int(s), int(c)};
142
+ case library::ConvKind::kDgrad: return {int(k), int(t), int(r), int(s), int(c)};
143
+ case library::ConvKind::kWgrad: return {int(n), int(d), int(h), int(w), int(c)};
144
+ default : throw std::runtime_error("Invalid Conv Operator (fprop, dgrad, wgrad)");
145
+ }
146
+ }
147
+
148
+ // Returns extent for tensor C
149
+ std::vector<int> extent_c(library::ConvKind const &conv_kind) const {
150
+
151
+ switch (conv_kind) {
152
+ case library::ConvKind::kFprop: return {int(n), int(z), int(p), int(q), int(k)};
153
+ case library::ConvKind::kDgrad: return {int(n), int(d), int(h), int(w), int(c)};
154
+ case library::ConvKind::kWgrad: return {int(k), int(t), int(r), int(s), int(c)};
155
+ default : throw std::runtime_error("Invalid Conv Operator (fprop, dgrad, wgrad)");
156
+ }
157
+ }
158
+
159
+ // Returns layout for equivalent gemm matrix A
160
+ library::LayoutTypeID eq_gemm_layout_a(library::ConvKind const &conv_kind) const {
161
+
162
+ switch (conv_kind) {
163
+ case library::ConvKind::kFprop: return library::LayoutTypeID::kRowMajor; // TN Gemm
164
+ case library::ConvKind::kDgrad: return library::LayoutTypeID::kRowMajor; // TT Gemm
165
+ case library::ConvKind::kWgrad: return library::LayoutTypeID::kColumnMajor; // NT Gemm
166
+ default : throw std::runtime_error("Invalid Conv Operator (fprop, dgrad, wgrad)");
167
+ }
168
+ }
169
+
170
+ // Returns layout for equivalent gemm matrix B
171
+ library::LayoutTypeID eq_gemm_layout_b(library::ConvKind const &conv_kind) const {
172
+
173
+ switch (conv_kind) {
174
+ case library::ConvKind::kFprop: return library::LayoutTypeID::kColumnMajor; // TN Gemm
175
+ case library::ConvKind::kDgrad: return library::LayoutTypeID::kRowMajor; // TT Gemm
176
+ case library::ConvKind::kWgrad: return library::LayoutTypeID::kRowMajor; // NT Gemm
177
+ default : throw std::runtime_error("Invalid Conv Operator (fprop, dgrad, wgrad)");
178
+ }
179
+ }
180
+
181
+ // Returns layout for equivalent gemm matrix C
182
+ library::LayoutTypeID eq_gemm_layout_c(library::ConvKind const &conv_kind) const {
183
+
184
+ switch (conv_kind) {
185
+ // Gemm operator assumes column-major output
186
+ case library::ConvKind::kFprop:
187
+ case library::ConvKind::kDgrad:
188
+ case library::ConvKind::kWgrad: return library::LayoutTypeID::kColumnMajor;
189
+ default : throw std::runtime_error("Invalid Conv Operator (fprop, dgrad, wgrad)");
190
+ }
191
+ }
192
+
193
+ // Returns leading dimension for equivalent gemm matrix A
194
+ int64_t eq_gemm_lda(library::ConvKind const &conv_kind) const {
195
+
196
+ switch (conv_kind) {
197
+ case library::ConvKind::kFprop: return eq_gemm_size(conv_kind).k();
198
+ case library::ConvKind::kDgrad: return eq_gemm_size(conv_kind).k();
199
+ case library::ConvKind::kWgrad: return eq_gemm_size(conv_kind).m();
200
+ default : throw std::runtime_error("Invalid Conv Operator (fprop, dgrad, wgrad)");
201
+ }
202
+ }
203
+
204
+ // Returns leading dimension for equivalent gemm matrix B
205
+ int64_t eq_gemm_ldb(library::ConvKind const &conv_kind) const {
206
+
207
+ switch (conv_kind) {
208
+ case library::ConvKind::kFprop: return eq_gemm_size(conv_kind).k();
209
+ case library::ConvKind::kDgrad: return eq_gemm_size(conv_kind).n();
210
+ case library::ConvKind::kWgrad: return eq_gemm_size(conv_kind).n();
211
+ default : throw std::runtime_error("Invalid Conv Operator (fprop, dgrad, wgrad)");
212
+ }
213
+ }
214
+
215
+ // Returns leading dimension for equivalent gemm matrix C
216
+ int64_t eq_gemm_ldc(library::ConvKind const &conv_kind) const {
217
+
218
+ switch (conv_kind) {
219
+ case library::ConvKind::kFprop:
220
+ case library::ConvKind::kDgrad:
221
+ case library::ConvKind::kWgrad: return eq_gemm_size(conv_kind).m();
222
+ default : throw std::runtime_error("Invalid Conv Operator (fprop, dgrad, wgrad)");
223
+ }
224
+ }
225
+ };
226
+
227
+ /// Workspace used
228
+ struct Conv2dWorkspace {
229
+
230
+ /// Conv device allocations
231
+ DeviceAllocation *A;
232
+ DeviceAllocation *B;
233
+ DeviceAllocation *C;
234
+ DeviceAllocation *Computed;
235
+ DeviceAllocation *Reference;
236
+
237
+ /// Library configuration and arguments for convolution operator
238
+ library::Conv3dConfiguration configuration;
239
+ library::ConvArguments arguments;
240
+
241
+ /// Number of copies of the problem workspace which are visited sequentially during
242
+ /// profiling to avoid camping in the last level cache.
243
+ int problem_count;
244
+
245
+ /// Buffer used for the cutlass conv2d operations' host workspace
246
+ std::vector<uint8_t> host_workspace;
247
+
248
+ /// Buffer used for the cutlass operations' device workspace
249
+ DeviceAllocation device_workspace;
250
+
251
+ /// Library configuration and arguments for reduction operator
252
+ library::ReductionConfiguration reduction_configuration;
253
+ library::ReductionArguments reduction_arguments;
254
+
255
+ /// Buffer used for the cutlass reduction operations' host workspace
256
+ std::vector<uint8_t> reduction_host_workspace;
257
+
258
+ /// Host data buffers for host reference operation
259
+ /// host buffer for tensor
260
+ std::vector<uint8_t> host_tensor_a;
261
+
262
+ /// host buffer for tensor b
263
+ std::vector<uint8_t> host_tensor_b;
264
+
265
+ /// host buffer for tensor c
266
+ std::vector<uint8_t> host_tensor_c;
267
+
268
+
269
+ //
270
+ // Methods
271
+ //
272
+
273
+ Conv2dWorkspace():
274
+ A(nullptr), B(nullptr), C(nullptr), Computed(nullptr), Reference(nullptr) { }
275
+
276
+ // Returns stride vector for tensor A
277
+ std::vector<int64_t> stride_a(library::ConvKind const &conv_kind) {
278
+ return {
279
+ configuration.layout_a(conv_kind).stride()[0],
280
+ configuration.layout_a(conv_kind).stride()[1],
281
+ configuration.layout_a(conv_kind).stride()[2],
282
+ configuration.layout_a(conv_kind).stride()[3]
283
+ };
284
+ }
285
+
286
+ // Returns stride vector for tensor B
287
+ std::vector<int64_t> stride_b(library::ConvKind const &conv_kind) {
288
+
289
+ return {
290
+ configuration.layout_b(conv_kind).stride()[0],
291
+ configuration.layout_b(conv_kind).stride()[1],
292
+ configuration.layout_b(conv_kind).stride()[2],
293
+ configuration.layout_b(conv_kind).stride()[3]
294
+ };
295
+ }
296
+
297
+ // Returns stride vector for tensor C
298
+ std::vector<int64_t> stride_c(library::ConvKind const &conv_kind) {
299
+
300
+ return {
301
+ configuration.layout_c(conv_kind).stride()[0],
302
+ configuration.layout_c(conv_kind).stride()[1],
303
+ configuration.layout_c(conv_kind).stride()[2],
304
+ configuration.layout_c(conv_kind).stride()[3]
305
+ };
306
+ }
307
+ };
308
+
309
+ protected:
310
+
311
+ //
312
+ // Data members
313
+ //
314
+
315
+ /// CONV problem obtained from problem space
316
+ Conv3dProblem problem_;
317
+
318
+ /// Device memory allocations
319
+ Conv2dWorkspace conv_workspace_;
320
+
321
+ /// CUTLASS parallel reduction operation to follow this* conv2d operation
322
+ library::Operation const *reduction_op_;
323
+
324
+ public:
325
+ //
326
+ // Methods
327
+ //
328
+
329
+ /// Ctor
330
+ Conv3dOperationProfiler(Options const &options);
331
+
332
+ /// Destructor
333
+ virtual ~Conv3dOperationProfiler();
334
+
335
+ Conv3dProblem const& problem() const { return problem_; }
336
+
337
+ /// Prints usage statement for the math function
338
+ virtual void print_usage(std::ostream &out) const;
339
+
340
+ /// Prints examples
341
+ virtual void print_examples(std::ostream &out) const;
342
+
343
+ /// Extracts the problem dimensions
344
+ virtual Status initialize_configuration(
345
+ Options const &options,
346
+ PerformanceReport &report,
347
+ DeviceContext &device_context,
348
+ library::Operation const *operation,
349
+ ProblemSpace const &problem_space,
350
+ ProblemSpace::Problem const &problem);
351
+
352
+ /// Initializes workspace
353
+ virtual Status initialize_workspace(
354
+ Options const &options,
355
+ PerformanceReport &report,
356
+ DeviceContext &device_context,
357
+ library::Operation const *operation,
358
+ ProblemSpace const &problem_space,
359
+ ProblemSpace::Problem const &problem);
360
+
361
+ /// Verifies CUTLASS against references
362
+ virtual bool verify_cutlass(
363
+ Options const &options,
364
+ PerformanceReport &report,
365
+ DeviceContext &device_context,
366
+ library::Operation const *operation,
367
+ ProblemSpace const &problem_space,
368
+ ProblemSpace::Problem const &problem);
369
+
370
+ /// Measures performance results
371
+ virtual bool profile(
372
+ Options const &options,
373
+ PerformanceReport &report,
374
+ DeviceContext &device_context,
375
+ library::Operation const *operation,
376
+ ProblemSpace const &problem_space,
377
+ ProblemSpace::Problem const &problem);
378
+
379
+ protected:
380
+
381
+ /// Updates the arguments structure for the CUTLASS operator based on
382
+ /// the problem index.
383
+ void set_cutlass_operator_arguments_(int problem_idx = 0);
384
+
385
+ /// Method to profile an initialized CUTLASS operation
386
+ virtual Status profile_cutlass_(
387
+ double &runtime,
388
+ Options const &options,
389
+ library::Operation const *operation,
390
+ void *arguments,
391
+ void *host_workspace,
392
+ void *device_workspace);
393
+
394
+ /// Initialize reduction problem dimensions and library::Operation
395
+ bool initialize_reduction_configuration_(
396
+ Options const &options,
397
+ PerformanceReport &report,
398
+ DeviceContext &device_context,
399
+ library::Operation const *operation,
400
+ ProblemSpace const &problem_space,
401
+ ProblemSpace::Problem const &problem);
402
+
403
+ /// Initializes the performance result
404
+ void initialize_result_(
405
+ PerformanceResult &result,
406
+ Options const &options,
407
+ library::ConvDescription const &operation_desc,
408
+ ProblemSpace const &problem_space);
409
+
410
+ /// Verifies CUTLASS against host reference
411
+ bool verify_with_host_reference_(
412
+ Options const &options,
413
+ PerformanceReport &report,
414
+ DeviceContext &device_context,
415
+ library::Operation const *operation,
416
+ ProblemSpace const &problem_space,
417
+ ProblemSpace::Problem const &problem);
418
+
419
+ /// Verifies CUTLASS against device reference
420
+ bool verify_with_device_reference_(
421
+ Options const &options,
422
+ PerformanceReport &report,
423
+ DeviceContext &device_context,
424
+ library::Operation const *operation,
425
+ ProblemSpace const &problem_space,
426
+ ProblemSpace::Problem const &problem);
427
+
428
+ #if CUTLASS_ENABLE_CUDNN
429
+
430
+ /// Verifies CUTLASS against cudnn reference
431
+ bool verify_with_cudnn_(
432
+ Options const &options,
433
+ PerformanceReport &report,
434
+ DeviceContext &device_context,
435
+ library::Operation const *operation,
436
+ ProblemSpace const &problem_space,
437
+ ProblemSpace::Problem const &problem);
438
+
439
+ #endif //#if CUTLASS_ENABLE_CUDNN
440
+
441
+ };
442
+
443
+ /////////////////////////////////////////////////////////////////////////////////////////////////
444
+
445
+ } // namespace profiler
446
+ } // namespace cutlass
447
+
448
+ /////////////////////////////////////////////////////////////////////////////////////////////////
449
+
sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/cutlass/tools/profiler/include/cutlass/profiler/cublas_helpers.h ADDED
@@ -0,0 +1,456 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ * SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice, this
9
+ * list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holder nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
+ *
30
+ **************************************************************************************************/
31
+ /* \file
32
+ \brief Helper functions for mapping CUTLASS concepts to cuBLAS.
33
+ */
34
+
35
+ #pragma once
36
+
37
+ #if CUTLASS_ENABLE_CUBLAS
38
+ #include <cublas_v2.h>
39
+ #include <cublasLt.h>
40
+
41
+ #include "cutlass/cutlass.h"
42
+ #include "cutlass/library/library.h"
43
+ #include "cutlass/library/util.h"
44
+ #include "cutlass/blas3.h"
45
+
46
+ #include "options.h"
47
+
48
+ /////////////////////////////////////////////////////////////////////////////////////////////////
49
+
50
+ namespace cutlass {
51
+ namespace profiler {
52
+
53
+ /////////////////////////////////////////////////////////////////////////////////////////////////
54
+
55
+ /// Converts a cuBLAS status to cutlass::Status
56
+ Status get_cutlass_status(cublasStatus_t cublas);
57
+
58
+ /// Converts a cuBLAS status to cutlass::profiler::Disposition
59
+ Disposition get_cutlass_disposition(cublasStatus_t cublas_status);
60
+
61
+ /// Maps a CUTLASS tensor layout to a cuBLAS transpose operation
62
+ bool get_cublas_transpose_operation(
63
+ cublasOperation_t &operation,
64
+ library::LayoutTypeID layout,
65
+ library::ComplexTransform transform = library::ComplexTransform::kNone);
66
+
67
+ /// Maps a CUTLASS numeric type to a cuBLAS data type enumeration
68
+ bool get_cublas_datatype(cublasDataType_t &data_type, library::NumericTypeID element_type);
69
+
70
+ /// Gets the cublas algorithm given threadblock tile dimensions and math opcode class
71
+ cublasGemmAlgo_t get_cublas_gemm_algo(
72
+ int cta_m,
73
+ int cta_n,
74
+ int cta_k,
75
+ library::OpcodeClassID opcode_class);
76
+
77
+ /// Returns a status if cuBLAS can satisfy a particular GEMM description
78
+ Status cublas_satisfies(library::GemmDescription const &desc);
79
+
80
+ /// Returns a status if cuBLAS can satisfy a particular RankK description
81
+ Status cublas_satisfies(library::RankKDescription const &desc);
82
+
83
+ /// Returns a status if cuBLAS can satisfy a particular TRMM description
84
+ Status cublas_satisfies(library::TrmmDescription const &desc);
85
+
86
+ /// Returns a status if cuBLAS can satisfy a particular SYMM/HEMM description
87
+ Status cublas_satisfies(library::SymmDescription const &desc);
88
+
89
+ /// This is a helper class to create cublasHandle_t automatically on CublasCreate object creation and
90
+ /// to destroy cublasHandle_t on CublasCreate object destruction.
91
+ /// Additionally, it provides implicit cast from CublasCreate's object to cublasHandle_t's object
92
+ class CublasCreate {
93
+ private:
94
+ cublasHandle_t handle;
95
+ cublasStatus_t status;
96
+
97
+ public:
98
+ CublasCreate() {
99
+ status = cublasCreate(&handle);
100
+ }
101
+
102
+ ~CublasCreate() {
103
+ cublasDestroy(handle);
104
+ }
105
+
106
+ /// Implicit cast CublasCreate object to cublasHandle_t
107
+ operator cublasHandle_t() const { return handle; }
108
+
109
+ /// returns cublasStatus_t for handle creation
110
+ cublasStatus_t get_cublas_create_status() { return status; }
111
+ };
112
+
113
+ /// This is a helper class to create cublasLtHandle_t automatically on CublasLtCreate object creation and
114
+ /// to destroy cublasLtHandle_t on CublasLtCreate object destruction.
115
+ /// Additionally, it provides implicit cast from CublasLtCreate's object to cublasLtHandle_t's object
116
+ class CublasLtCreate {
117
+ private:
118
+ cublasLtHandle_t handle;
119
+ cublasStatus_t status;
120
+
121
+ public:
122
+ CublasLtCreate() {
123
+ status = cublasLtCreate(&handle);
124
+ }
125
+
126
+ ~CublasLtCreate() {
127
+ cublasLtDestroy(handle);
128
+ }
129
+
130
+ /// Implicit cast CublasLtCreate object to cublasLtHandle_t
131
+ operator cublasLtHandle_t() const { return handle; }
132
+
133
+ /// returns cublasLtStatus_t for handle creation
134
+ cublasStatus_t get_cublaslt_create_status() { return status; }
135
+ };
136
+ /////////////////////////////////////////////////////////////////////////////////////////////////
137
+
138
+ namespace detail {
139
+
140
+ /// Selects one or more cuBLAS algorithms.
141
+ static void select_cublas_algorithms(
142
+ std::vector<cublasGemmAlgo_t> &algorithms,
143
+ Options const &options,
144
+ library::GemmDescription const &op_desc) {
145
+
146
+ library::OpcodeClassID const & opcode_class =
147
+ op_desc.tile_description.math_instruction.opcode_class;
148
+
149
+ switch (options.library.algorithm_mode) {
150
+ case AlgorithmMode::kMatching:
151
+ {
152
+ algorithms.push_back(get_cublas_gemm_algo(
153
+ op_desc.tile_description.threadblock_shape.m(),
154
+ op_desc.tile_description.threadblock_shape.n(),
155
+ op_desc.tile_description.threadblock_shape.k(),
156
+ opcode_class));
157
+ break;
158
+ }
159
+
160
+ case AlgorithmMode::kBest:
161
+ {
162
+ // Choose first enumerated mode. If none are enumerated, choose based on opcode class
163
+ // and evaluate all of them.
164
+
165
+ if (options.library.algorithms.empty()) {
166
+ // Enumerate all algorithms
167
+ if (opcode_class == library::OpcodeClassID::kSimt) {
168
+
169
+ for (int algo = CUBLAS_GEMM_DEFAULT;
170
+ algo <= CUBLAS_GEMM_ALGO23;
171
+ ++algo) {
172
+
173
+ algorithms.push_back(cublasGemmAlgo_t(algo));
174
+ }
175
+ }
176
+ else {
177
+
178
+ for (int algo = CUBLAS_GEMM_DEFAULT_TENSOR_OP;
179
+ algo <= CUBLAS_GEMM_ALGO15_TENSOR_OP;
180
+ ++algo) {
181
+
182
+ algorithms.push_back(cublasGemmAlgo_t(algo));
183
+ }
184
+ }
185
+ }
186
+ else {
187
+ // Use the listed algorithms
188
+ algorithms.reserve(options.library.algorithms.size());
189
+
190
+ for (int algo : options.library.algorithms) {
191
+ algorithms.push_back(reinterpret_cast<cublasGemmAlgo_t const &>(algo));
192
+ }
193
+ }
194
+
195
+ break;
196
+ }
197
+
198
+ case AlgorithmMode::kDefault:
199
+ {
200
+
201
+ // Use the library's default algorithm
202
+ algorithms.push_back((opcode_class == library::OpcodeClassID::kSimt ?
203
+ CUBLAS_GEMM_DEFAULT : CUBLAS_GEMM_DEFAULT_TENSOR_OP));
204
+
205
+ break;
206
+ }
207
+ default:
208
+ {
209
+ break;
210
+ }
211
+ }
212
+ }
213
+
214
+ /// Dispatcher to cublasGemmEx()
215
+ struct cublasGemmExDispatcher {
216
+
217
+ //
218
+ // Data members
219
+ //
220
+ library::GemmUniversalConfiguration configuration;
221
+ library::GemmUniversalArguments arguments;
222
+
223
+ // cublas-specific data structures to fill cublas API call arguments
224
+ cublasOperation_t trans_A;
225
+ cublasOperation_t trans_B;
226
+ cudaDataType_t data_type_A;
227
+ cudaDataType_t data_type_B;
228
+ cudaDataType_t data_type_C;
229
+ cudaDataType_t compute_data_type;
230
+
231
+ #if (__CUDACC_VER_MAJOR__ >= 11)
232
+ cublasComputeType_t compute_type;
233
+ #endif
234
+
235
+ cublasGemmAlgo_t algo;
236
+ Status status;
237
+
238
+ //
239
+ // Methods
240
+ //
241
+
242
+ cublasGemmExDispatcher(
243
+ library::GemmDescription const &op_desc,
244
+ library::GemmUniversalConfiguration configuration_,
245
+ library::GemmUniversalArguments arguments_,
246
+ cublasGemmAlgo_t algorithm = CUBLAS_GEMM_DFALT
247
+ );
248
+
249
+ /// Executes GEMM using these arguments
250
+ cublasStatus_t operator()(cublasHandle_t handle);
251
+ };
252
+
253
+ /// Dispatcher to cublaslt kernels
254
+ //
255
+ struct cublasLtGemmExDispatcher {
256
+
257
+ //
258
+ // Data members
259
+ //
260
+ library::GemmDescription const &op_desc;
261
+ library::GemmUniversalConfiguration configuration;
262
+ library::GemmUniversalArguments arguments;
263
+
264
+ // cublas-specific data structures to fill cublas API call arguments
265
+ cublasOperation_t trans_A;
266
+ cublasOperation_t trans_B;
267
+ cudaDataType_t data_type_A;
268
+ cudaDataType_t data_type_B;
269
+ cudaDataType_t data_type_C;
270
+ cudaDataType_t compute_data_type = CUDA_R_32F;
271
+
272
+ //cublasLt-specific data structures
273
+ cublasLtMatmulDesc_t operationDesc = NULL;
274
+ cublasLtMatrixLayout_t Adesc = NULL, Bdesc = NULL, Cdesc = NULL, Ddesc = NULL;
275
+ cublasLtMatmulPreference_t preference = NULL;
276
+
277
+ //is set by call to get_cublaslt_algo()
278
+ cublasLtMatmulHeuristicResult_t heuristicResult_;
279
+ void *workspace = nullptr;
280
+
281
+ Status status;
282
+
283
+ #if (__CUDACC_VER_MAJOR__ >= 11)
284
+ cublasComputeType_t compute_type;
285
+ #endif
286
+
287
+ //
288
+ // Methods
289
+ //
290
+
291
+ cublasLtGemmExDispatcher(
292
+ library::GemmDescription const &op_desc,
293
+ library::GemmUniversalConfiguration configuration_,
294
+ library::GemmUniversalArguments arguments_
295
+ );
296
+
297
+ /// Initialize the cublasLt variables
298
+ void initialize_cublaslt();
299
+
300
+
301
+ /// Runs auto-tuning for the cublas heuristics
302
+ bool get_cublaslt_algo(cublasLtHandle_t handle,
303
+ AlgorithmMode algorithm_mode
304
+ );
305
+
306
+ /// Executes GEMM using these arguments
307
+ cublasStatus_t operator()(cublasLtHandle_t handle);
308
+
309
+ ~cublasLtGemmExDispatcher(){
310
+
311
+ // descriptors are no longer needed as all GPU work was already enqueued
312
+ if (preference) cublasLtMatmulPreferenceDestroy(preference);
313
+ if (Ddesc) cublasLtMatrixLayoutDestroy(Ddesc);
314
+ if (Cdesc) cublasLtMatrixLayoutDestroy(Cdesc);
315
+ if (Bdesc) cublasLtMatrixLayoutDestroy(Bdesc);
316
+ if (Adesc) cublasLtMatrixLayoutDestroy(Adesc);
317
+ if (operationDesc) cublasLtMatmulDescDestroy(operationDesc);
318
+
319
+ if (workspace) {
320
+ cudaFree(workspace);
321
+ }
322
+
323
+ }
324
+
325
+ };
326
+
327
+ ///////////////////////////////////////////////////////////////////////////////////////////////////
328
+
329
+ /// Dispatcher to cublas rank k update kernels
330
+ struct cublasRankKDispatcher {
331
+
332
+ //
333
+ // Data members
334
+ //
335
+ library::RankKConfiguration configuration;
336
+ library::RankKArguments arguments;
337
+
338
+ // cublas-specific data structures to fill cublas API call arguments
339
+ cublasOperation_t trans_A;
340
+ cublasFillMode_t uplo;
341
+ cudaDataType_t data_type_A;
342
+ cudaDataType_t data_type_C;
343
+ cudaDataType_t compute_data_type;
344
+
345
+ #if (__CUDACC_VER_MAJOR__ >= 11)
346
+ cublasComputeType_t compute_type;
347
+ #endif
348
+
349
+ int num_ranks; //(rank-k or rank-2k)
350
+ BlasMode blas_mode; //(symmetric or hermitian)
351
+ Status status;
352
+
353
+ //
354
+ // Methods
355
+ //
356
+
357
+ cublasRankKDispatcher(
358
+ library::RankKDescription const &op_desc,
359
+ library::RankKConfiguration configuration_,
360
+ library::RankKArguments arguments_
361
+ );
362
+
363
+ /// Executes RankK using these arguments
364
+ cublasStatus_t operator()(cublasHandle_t handle);
365
+ };
366
+
367
+ ///////////////////////////////////////////////////////////////////////////////////////////////////
368
+
369
+ /// Dispatcher to cublasTrmm()
370
+ struct cublasTrmmDispatcher {
371
+
372
+ //
373
+ // Data members
374
+ //
375
+ library::TrmmConfiguration configuration;
376
+ library::TrmmArguments arguments;
377
+
378
+ // cublas-specific data structures to fill cublas API call arguments
379
+ cublasOperation_t trans_A;
380
+ cublasSideMode_t side;
381
+ cublasFillMode_t uplo;
382
+ cublasDiagType_t diag;
383
+ cudaDataType_t data_type_A;
384
+ cudaDataType_t data_type_B;
385
+ cudaDataType_t data_type_D;
386
+ cudaDataType_t compute_data_type;
387
+
388
+ #if (__CUDACC_VER_MAJOR__ >= 11)
389
+ cublasComputeType_t compute_type;
390
+ #endif
391
+
392
+ Status status;
393
+
394
+ //
395
+ // Methods
396
+ //
397
+
398
+ cublasTrmmDispatcher(
399
+ library::TrmmDescription const &op_desc,
400
+ library::TrmmConfiguration configuration_,
401
+ library::TrmmArguments arguments_
402
+ );
403
+
404
+ /// Executes TRMM using these arguments
405
+ cublasStatus_t operator()(cublasHandle_t handle);
406
+ };
407
+
408
+ ///////////////////////////////////////////////////////////////////////////////////////////////////
409
+
410
+ /// Dispatcher to cublas symm/hemm update kernels
411
+ struct cublasSymmDispatcher {
412
+
413
+ //
414
+ // Data members
415
+ //
416
+ library::SymmConfiguration configuration;
417
+ library::SymmArguments arguments;
418
+
419
+ // cublas-specific data structures to fill cublas API call arguments
420
+ cublasSideMode_t side;
421
+ cublasFillMode_t uplo;
422
+ cudaDataType_t data_type_A;
423
+ cudaDataType_t data_type_B;
424
+ cudaDataType_t data_type_C;
425
+ cudaDataType_t compute_data_type;
426
+
427
+ #if (__CUDACC_VER_MAJOR__ >= 11)
428
+ cublasComputeType_t compute_type;
429
+ #endif
430
+
431
+ BlasMode blas_mode; //(symmetric or hermitian)
432
+ Status status;
433
+
434
+ //
435
+ // Methods
436
+ //
437
+
438
+ cublasSymmDispatcher(
439
+ library::SymmDescription const &op_desc,
440
+ library::SymmConfiguration configuration_,
441
+ library::SymmArguments arguments_
442
+ );
443
+
444
+ /// Executes Symm using these arguments
445
+ cublasStatus_t operator()(cublasHandle_t handle);
446
+ };
447
+
448
+ ///////////////////////////////////////////////////////////////////////////////////////////////////
449
+
450
+ } // namespace detail
451
+
452
+ } // namespace profiler
453
+ } // namespace cutlass
454
+
455
+
456
+ #endif // #if CUTLASS_ENABLE_CUBLAS
sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/cutlass/tools/profiler/include/cutlass/profiler/cudnn_helpers.h ADDED
@@ -0,0 +1,590 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ * SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice, this
9
+ * list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holder nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
+ *
30
+ **************************************************************************************************/
31
+ /* \file
32
+ \brief Helper functions for mapping CUTLASS concepts to cuDNN.
33
+
34
+ */
35
+
36
+ #pragma once
37
+ #if CUTLASS_ENABLE_CUDNN
38
+ #include <cuda_runtime.h>
39
+ #include <cudnn.h>
40
+ #include <iostream>
41
+ #include "cutlass/cutlass.h"
42
+ #include "cutlass/util/device_memory.h"
43
+ #include "cutlass/library/library.h"
44
+ #include "enumerated_types.h"
45
+
46
+ /////////////////////////////////////////////////////////////////////////////////////////////////
47
+
48
+ namespace cutlass {
49
+ namespace profiler {
50
+
51
+ /////////////////////////////////////////////////////////////////////////////////////////////////
52
+ /// Converts a cuDNN status to cutlass::Status
53
+ Status get_cutlass_status(cudnnStatus_t cudnn_status);
54
+
55
+ /// Converts a cuDNN status to cutlass::profiler::Disposition
56
+ Disposition get_cutlass_disposition(cudnnStatus_t cudnn_status);
57
+
58
+ /// Checks cudnnStatus_t converts to cutlas status and returns if Status::kSuccess o.w. throws exception
59
+ Status checkCudnnErr(cudnnStatus_t cudnn_status);
60
+
61
+ /// Maps a CUTLASS conv mode to a cuDNN conv mode enumeration
62
+ bool get_cudnn_conv_mode(cudnnConvolutionMode_t &cudnn_conv_mode, conv::Mode conv_mode);
63
+
64
+ /// Maps a CUTLASS layout type to a cuDNN data type enumeration
65
+ bool get_cudnn_layout(cudnnTensorFormat_t &cudnn_layout, library::LayoutTypeID layout);
66
+
67
+ /// Maps a CUTLASS numeric type to a cuDNN data type enumeration
68
+ bool get_cudnn_datatype(cudnnDataType_t &cudnn_element_type, library::NumericTypeID element_type);
69
+
70
+ /// Maps CUTLASS math OpcodeClassID and MathOperationID to cuDNN math_type
71
+ bool get_cudnn_mathtype(cudnnMathType_t &cudnn_math_type, library::ConvDescription const &conv_desc);
72
+
73
+ /// Returns a status if cudnn can satisfy a particular Conv2d description
74
+ Status cudnn_satisfies(library::ConvDescription const &desc, library::Conv2dConfiguration const &configuration);
75
+
76
+ /// Returns a status if cudnn can satisfy a particular Conv3d description
77
+ Status cudnn_satisfies(library::ConvDescription const &desc, library::Conv3dConfiguration const &configuration);
78
+
79
+ /// Cudnn compute type seems to be hardcoded to float (To handle a possible cudnn issue)
80
+ float cast_cudnn_compute_type_to_float(library::NumericTypeID type, void const * src);
81
+
82
+
83
+ /// This is a helper class to create cudnnHandle_t automatically on CudnnCreate object creation and
84
+ /// to destroy cudnnHandle_t on CudnnCreate object destruction.
85
+ /// Additionally, it provides implicit cast from CudnnCreate's object to cudnnHandle_t's object
86
+ class CudnnCreate {
87
+ private:
88
+ cudnnHandle_t handle;
89
+ cudnnStatus_t status;
90
+
91
+ public:
92
+ CudnnCreate() {
93
+ status = cudnnCreate(&handle);
94
+ }
95
+
96
+ ~CudnnCreate() {
97
+ cudnnDestroy(handle);
98
+ }
99
+
100
+ /// Implicit cast CudnnCreate object to cudnnHandle_t
101
+ operator cudnnHandle_t() const { return handle; }
102
+
103
+ /// returns cudnnStatus_t for handle creation
104
+ cudnnStatus_t get_cudnn_create_status() { return status; }
105
+ };
106
+
107
+
108
+ namespace detail {
109
+
110
+ /// Dispatcher to cudnn convolution operators
111
+ struct cudnnConvDispatcher {
112
+
113
+ //
114
+ // Data members
115
+ //
116
+ //library::Conv2dConfiguration configuration;
117
+ library::ConvArguments arguments;
118
+ library::ConvKind conv_kind;
119
+
120
+ // cudnn-specific data structures to fill cudnn API call arguments
121
+ // cudnn activation, filter, and output descriptors
122
+ cudnnTensorDescriptor_t activation_desc;
123
+ cudnnFilterDescriptor_t filter_desc;
124
+ cudnnTensorDescriptor_t output_desc;
125
+ cudnnConvolutionDescriptor_t conv_desc;
126
+
127
+ // cudnn datatypes
128
+ cudnnDataType_t data_type_activation;
129
+ cudnnDataType_t data_type_filter;
130
+ cudnnDataType_t data_type_output;
131
+
132
+ // cudnn layouts
133
+ cudnnTensorFormat_t layout_activation;
134
+ cudnnTensorFormat_t layout_filter;
135
+ cudnnTensorFormat_t layout_output;
136
+
137
+ // cudnn convolution mode
138
+ cudnnConvolutionMode_t conv_mode;
139
+
140
+ // cudnn math type (tensorop, tensorop with conversion, simt)
141
+ cudnnMathType_t math_type;
142
+
143
+ // cudnn compute data type
144
+ cudnnDataType_t compute_type;
145
+
146
+ // cudnn compute type seems to be hardcoded to float (to handle a possible a cudnn issue)
147
+ float alpha;
148
+ float beta;
149
+
150
+ // cudnn workspace
151
+ size_t workspace_size_in_bytes = 0;
152
+ cutlass::device_memory::allocation<char> workspace;
153
+
154
+ // select cudnn's implicit gemm precomputed algorithm with tensor operations
155
+ static cudnnConvolutionFwdAlgo_t const fprop_algo = CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM;
156
+ static cudnnConvolutionBwdDataAlgo_t const dgrad_algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1;
157
+ static cudnnConvolutionBwdFilterAlgo_t const wgrad_algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1;
158
+
159
+ Status status;
160
+
161
+ //
162
+ // Methods
163
+ //
164
+
165
+ // TODO: unify ctor cudnnConvDispatcher for conv2d and conv3d by unifying Conv2dConfiguration
166
+
167
+ // ctor for conv2d
168
+ cudnnConvDispatcher(
169
+ library::ConvDescription const &op_desc,
170
+ library::Conv2dConfiguration configuration,
171
+ library::ConvArguments arguments_,
172
+ cudnnHandle_t handle
173
+ ):
174
+ //configuration(configuration_),
175
+ arguments(arguments_),
176
+ conv_kind(op_desc.conv_kind),
177
+ status(Status::kSuccess) {
178
+
179
+ bool good = true;
180
+
181
+ // Get cudnn datatype, layout, and convolution mode from library::ConvDescription
182
+ good = (good && get_cudnn_datatype(data_type_activation, op_desc.A.element));
183
+ good = (good && get_cudnn_datatype(data_type_filter, op_desc.B.element));
184
+ good = (good && get_cudnn_datatype(data_type_output, op_desc.C.element));
185
+ good = (good && get_cudnn_layout(layout_activation, op_desc.A.layout));
186
+ good = (good && get_cudnn_layout(layout_filter, op_desc.B.layout));
187
+ good = (good && get_cudnn_layout(layout_output, op_desc.C.layout));
188
+ good = (good && get_cudnn_conv_mode(conv_mode, configuration.problem_size.mode));
189
+ // Get cudnn mathtype (cudnnMathType_t)
190
+ good = (good && get_cudnn_mathtype(math_type, op_desc));
191
+ good = (good && get_cudnn_datatype(
192
+ compute_type,
193
+ op_desc.tile_description.math_instruction.element_accumulator));
194
+ // Check cutlass Conv2d description has equivalent operator in cudnn
195
+ if (!good) {
196
+ status = Status::kErrorNotSupported;
197
+ return;
198
+ }
199
+ // cudnn compute type seems to be hardcoded to float (to handle a possible a cudnn issue)
200
+ alpha = cast_cudnn_compute_type_to_float(op_desc.element_epilogue, arguments.alpha);
201
+ beta = cast_cudnn_compute_type_to_float(op_desc.element_epilogue, arguments.beta);
202
+
203
+ // Create convolution descriptor object
204
+ status = get_cutlass_status(cudnnCreateConvolutionDescriptor(&conv_desc));
205
+
206
+ // Configure convolution operator
207
+ std::vector<int> padding {configuration.problem_size.pad_h, configuration.problem_size.pad_w};
208
+ std::vector<int> stride {configuration.problem_size.stride_h, configuration.problem_size.stride_w};
209
+ std::vector<int> dilation {configuration.problem_size.dilation_h, configuration.problem_size.dilation_w};
210
+
211
+ status = get_cutlass_status(
212
+ cudnnSetConvolutionNdDescriptor(
213
+ conv_desc,
214
+ op_desc.conv_dim,
215
+ padding.data(),
216
+ stride.data(),
217
+ dilation.data(),
218
+ conv_mode,
219
+ compute_type
220
+ ));
221
+
222
+ // Set groups
223
+ status = get_cutlass_status(cudnnSetConvolutionGroupCount(conv_desc, configuration.problem_size.groups));
224
+
225
+ // Create activation, filter, and output descriptor objects
226
+ status = get_cutlass_status(cudnnCreateTensorDescriptor(&activation_desc));
227
+ status = get_cutlass_status(cudnnCreateFilterDescriptor(&filter_desc));
228
+ status = get_cutlass_status(cudnnCreateTensorDescriptor(&output_desc));
229
+
230
+ // Set activation, filter, and output descriptor
231
+ status = get_cutlass_status(
232
+ cudnnSetTensor4dDescriptor(
233
+ activation_desc,
234
+ layout_activation,
235
+ data_type_activation,
236
+ configuration.problem_size.N,
237
+ configuration.problem_size.C,
238
+ configuration.problem_size.H,
239
+ configuration.problem_size.W
240
+ ));
241
+
242
+ status = get_cutlass_status(
243
+ cudnnSetFilter4dDescriptor(
244
+ filter_desc,
245
+ data_type_filter,
246
+ layout_filter,
247
+ configuration.problem_size.K,
248
+ configuration.problem_size.C / configuration.problem_size.groups,
249
+ configuration.problem_size.R,
250
+ configuration.problem_size.S
251
+ ));
252
+
253
+ status = get_cutlass_status(
254
+ cudnnSetTensor4dDescriptor(
255
+ output_desc,
256
+ layout_output,
257
+ data_type_output,
258
+ configuration.problem_size.N,
259
+ configuration.problem_size.K,
260
+ configuration.problem_size.P,
261
+ configuration.problem_size.Q
262
+ ));
263
+
264
+ // Set math instruction to tensor op
265
+ status = get_cutlass_status(
266
+ cudnnSetConvolutionMathType(conv_desc, math_type));
267
+
268
+ // Initialize workspace
269
+ switch (conv_kind) {
270
+ case library::ConvKind::kFprop:
271
+ status = get_cutlass_status(
272
+ cudnnGetConvolutionForwardWorkspaceSize(
273
+ handle,
274
+ activation_desc,
275
+ filter_desc,
276
+ conv_desc,
277
+ output_desc,
278
+ fprop_algo,
279
+ &workspace_size_in_bytes
280
+ )); break;
281
+ case library::ConvKind::kDgrad:
282
+ status = get_cutlass_status(
283
+ cudnnGetConvolutionBackwardDataWorkspaceSize(
284
+ handle,
285
+ filter_desc,
286
+ output_desc,
287
+ conv_desc,
288
+ activation_desc,
289
+ dgrad_algo,
290
+ &workspace_size_in_bytes
291
+ )); break;
292
+ case library::ConvKind::kWgrad:
293
+ status = get_cutlass_status(
294
+ cudnnGetConvolutionBackwardFilterWorkspaceSize(
295
+ handle,
296
+ activation_desc,
297
+ output_desc,
298
+ conv_desc,
299
+ filter_desc,
300
+ wgrad_algo,
301
+ &workspace_size_in_bytes
302
+ )); break;
303
+
304
+ }
305
+
306
+ workspace = cutlass::device_memory::allocation<char>(workspace_size_in_bytes);
307
+ }
308
+
309
+
310
+ // ctor for conv3d
311
+ cudnnConvDispatcher(
312
+ library::ConvDescription const &op_desc,
313
+ library::Conv3dConfiguration configuration,
314
+ library::ConvArguments arguments_,
315
+ cudnnHandle_t handle
316
+ ):
317
+ //configuration(configuration_),
318
+ arguments(arguments_),
319
+ conv_kind(op_desc.conv_kind),
320
+ status(Status::kSuccess) {
321
+
322
+ bool good = true;
323
+
324
+ // Get cudnn datatype, layout, and convolution mode from library::ConvDescription
325
+ good = (good && get_cudnn_datatype(data_type_activation, op_desc.A.element));
326
+ good = (good && get_cudnn_datatype(data_type_filter, op_desc.B.element));
327
+ good = (good && get_cudnn_datatype(data_type_output, op_desc.C.element));
328
+
329
+ good = (good && get_cudnn_layout(layout_activation, op_desc.A.layout));
330
+ good = (good && get_cudnn_layout(layout_filter, op_desc.B.layout));
331
+ good = (good && get_cudnn_layout(layout_output, op_desc.C.layout));
332
+
333
+ good = (good && get_cudnn_conv_mode(conv_mode, configuration.problem_size.mode));
334
+
335
+ // cudnn compute type seems to be hardcoded to float (to handle a possible a cudnn issue)
336
+ alpha = cast_cudnn_compute_type_to_float(op_desc.element_epilogue, arguments.alpha);
337
+ beta = cast_cudnn_compute_type_to_float(op_desc.element_epilogue, arguments.beta);
338
+
339
+ good = (good && get_cudnn_datatype(
340
+ compute_type,
341
+ op_desc.tile_description.math_instruction.element_accumulator));
342
+
343
+ // Check cutlass Conv2d description has equivalent operator in cudnn
344
+ if (!good) {
345
+ status = Status::kErrorNotSupported;
346
+ }
347
+
348
+ // Create convolution descriptor object
349
+ status = get_cutlass_status(cudnnCreateConvolutionDescriptor(&conv_desc));
350
+
351
+ // Configure convolution operator
352
+ std::vector<int> padding {configuration.problem_size.pad_d, configuration.problem_size.pad_h, configuration.problem_size.pad_w};
353
+ std::vector<int> stride {configuration.problem_size.stride_d, configuration.problem_size.stride_h, configuration.problem_size.stride_w};
354
+ std::vector<int> dilation {configuration.problem_size.dilation_d, configuration.problem_size.dilation_h, configuration.problem_size.dilation_w};
355
+
356
+ status = get_cutlass_status(
357
+ cudnnSetConvolutionNdDescriptor(
358
+ conv_desc,
359
+ op_desc.conv_dim,
360
+ padding.data(),
361
+ stride.data(),
362
+ dilation.data(),
363
+ conv_mode,
364
+ compute_type
365
+ ));
366
+
367
+ // Set groups
368
+ status = get_cutlass_status(cudnnSetConvolutionGroupCount(conv_desc, configuration.problem_size.groups));
369
+
370
+ // Create activation, filter, and output descriptor objects
371
+ status = get_cutlass_status(cudnnCreateTensorDescriptor(&activation_desc));
372
+ status = get_cutlass_status(cudnnCreateFilterDescriptor(&filter_desc));
373
+ status = get_cutlass_status(cudnnCreateTensorDescriptor(&output_desc));
374
+
375
+ // Set activation descriptor
376
+ std::vector<int> activation_extent {
377
+ configuration.problem_size.N,
378
+ configuration.problem_size.C,
379
+ configuration.problem_size.D,
380
+ configuration.problem_size.H,
381
+ configuration.problem_size.W
382
+ };
383
+
384
+ std::vector<int> activation_stride {
385
+ configuration.layout_activations.stride()[3],
386
+ 1,
387
+ configuration.layout_activations.stride()[2],
388
+ configuration.layout_activations.stride()[1],
389
+ configuration.layout_activations.stride()[0]
390
+ };
391
+
392
+ status = get_cutlass_status(
393
+ cudnnSetTensorNdDescriptor(
394
+ activation_desc,
395
+ data_type_activation,
396
+ op_desc.conv_dim + 2,
397
+ activation_extent.data(),
398
+ activation_stride.data()
399
+ ));
400
+
401
+ // Set filter descriptor
402
+ std::vector<int> filter_extent {
403
+ configuration.problem_size.K,
404
+ configuration.problem_size.C,
405
+ configuration.problem_size.T,
406
+ configuration.problem_size.R,
407
+ configuration.problem_size.S
408
+ };
409
+
410
+ std::vector<int> filter_stride {
411
+ configuration.layout_filters.stride()[3],
412
+ 1,
413
+ configuration.layout_filters.stride()[2],
414
+ configuration.layout_filters.stride()[1],
415
+ configuration.layout_filters.stride()[0]
416
+ };
417
+
418
+ status = get_cutlass_status(
419
+ cudnnSetFilterNdDescriptor(
420
+ filter_desc,
421
+ data_type_filter,
422
+ layout_filter,
423
+ op_desc.conv_dim + 2,
424
+ filter_extent.data()
425
+ ));
426
+
427
+
428
+ // Set output descriptor
429
+ std::vector<int> output_extent {
430
+ configuration.problem_size.N,
431
+ configuration.problem_size.K,
432
+ configuration.problem_size.Z,
433
+ configuration.problem_size.P,
434
+ configuration.problem_size.Q
435
+ };
436
+
437
+ std::vector<int> output_stride {
438
+ configuration.layout_output.stride()[3],
439
+ 1,
440
+ configuration.layout_output.stride()[2],
441
+ configuration.layout_output.stride()[1],
442
+ configuration.layout_output.stride()[0]
443
+ };
444
+
445
+ status = get_cutlass_status(
446
+ cudnnSetTensorNdDescriptor(
447
+ output_desc,
448
+ data_type_output,
449
+ op_desc.conv_dim + 2,
450
+ output_extent.data(),
451
+ output_stride.data()
452
+ ));
453
+
454
+ // Set math instruction to tensor op
455
+ status = get_cutlass_status(
456
+ cudnnSetConvolutionMathType(conv_desc, math_type));
457
+
458
+ // Initialize workspace
459
+ switch (conv_kind) {
460
+ case library::ConvKind::kFprop:
461
+ status = get_cutlass_status(
462
+ cudnnGetConvolutionForwardWorkspaceSize(
463
+ handle,
464
+ activation_desc,
465
+ filter_desc,
466
+ conv_desc,
467
+ output_desc,
468
+ fprop_algo,
469
+ &workspace_size_in_bytes
470
+ )); break;
471
+ case library::ConvKind::kDgrad:
472
+ status = get_cutlass_status(
473
+ cudnnGetConvolutionBackwardDataWorkspaceSize(
474
+ handle,
475
+ filter_desc,
476
+ output_desc,
477
+ conv_desc,
478
+ activation_desc,
479
+ dgrad_algo,
480
+ &workspace_size_in_bytes
481
+ )); break;
482
+ case library::ConvKind::kWgrad:
483
+ status = get_cutlass_status(
484
+ cudnnGetConvolutionBackwardFilterWorkspaceSize(
485
+ handle,
486
+ activation_desc,
487
+ output_desc,
488
+ conv_desc,
489
+ filter_desc,
490
+ wgrad_algo,
491
+ &workspace_size_in_bytes
492
+ )); break;
493
+
494
+ }
495
+
496
+ workspace = cutlass::device_memory::allocation<char>(workspace_size_in_bytes);
497
+ }
498
+
499
+ /// Executes Conv2d operator from cudnn library
500
+ cudnnStatus_t operator()(cudnnHandle_t handle) {
501
+
502
+ switch (conv_kind) {
503
+ case library::ConvKind::kFprop:
504
+ return cudnnConvolutionForward(
505
+ handle,
506
+ &alpha,
507
+ activation_desc,
508
+ activation(),
509
+ filter_desc,
510
+ filter(),
511
+ conv_desc,
512
+ fprop_algo,
513
+ workspace.get(),
514
+ workspace_size_in_bytes,
515
+ &beta,
516
+ output_desc,
517
+ arguments.D
518
+ );
519
+ case library::ConvKind::kDgrad:
520
+ return cudnnConvolutionBackwardData(
521
+ handle,
522
+ &alpha,
523
+ filter_desc,
524
+ filter(),
525
+ output_desc,
526
+ output(),
527
+ conv_desc,
528
+ dgrad_algo,
529
+ workspace.get(),
530
+ workspace_size_in_bytes,
531
+ &beta,
532
+ activation_desc,
533
+ arguments.D
534
+ );
535
+ case library::ConvKind::kWgrad:
536
+ return cudnnConvolutionBackwardFilter(
537
+ handle,
538
+ &alpha,
539
+ activation_desc,
540
+ activation(),
541
+ output_desc,
542
+ output(),
543
+ conv_desc,
544
+ wgrad_algo,
545
+ workspace.get(),
546
+ workspace_size_in_bytes,
547
+ &beta,
548
+ filter_desc,
549
+ arguments.D
550
+ );
551
+ default : throw std::runtime_error("Invalid Conv Operator (fprop, dgrad, wgrad)");
552
+ }
553
+ }
554
+
555
+ // Returns Activation Tensor
556
+ void const * activation() const {
557
+ switch(conv_kind) {
558
+ case library::ConvKind::kFprop : return arguments.A;
559
+ case library::ConvKind::kDgrad : return arguments.C;
560
+ case library::ConvKind::kWgrad : return arguments.B;
561
+ default : throw std::runtime_error("Invalid Conv Operator (fprop, dgrad, wgrad)");
562
+ }
563
+ }
564
+
565
+ // Returns Filter Tensor
566
+ void const *filter() const {
567
+ switch(conv_kind) {
568
+ case library::ConvKind::kFprop : return arguments.B;
569
+ case library::ConvKind::kDgrad : return arguments.B;
570
+ case library::ConvKind::kWgrad : return arguments.C;
571
+ default : throw std::runtime_error("Invalid Conv Operator (fprop, dgrad, wgrad)");
572
+ }
573
+ }
574
+
575
+ // Returns Output Tensor
576
+ void const *output() const {
577
+ switch(conv_kind) {
578
+ case library::ConvKind::kFprop : return arguments.C;
579
+ case library::ConvKind::kDgrad : return arguments.A;
580
+ case library::ConvKind::kWgrad : return arguments.A;
581
+ default : throw std::runtime_error("Invalid Conv Operator (fprop, dgrad, wgrad)");
582
+ }
583
+ }
584
+ };
585
+
586
+ } // namespace detail
587
+ /////////////////////////////////////////////////////////////////////////////////////////////////
588
+ #endif //#if CUTLASS_ENABLE_CUDNN
589
+ } // namespace profiler
590
+ } // namespace cutlass
sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/cutlass/tools/profiler/include/cutlass/profiler/cutlass_profiler.h ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ * SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice, this
9
+ * list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holder nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
+ *
30
+ **************************************************************************************************/
31
+ /* \file
32
+ \brief Execution environment
33
+ */
34
+
35
+ #pragma once
36
+ // CUTLASS Library includes
37
+ #include "cutlass/library/library.h"
38
+ #include "cutlass/library/manifest.h"
39
+ #include "cutlass/library/singleton.h"
40
+
41
+ #include "options.h"
42
+ #include "operation_profiler.h"
43
+
44
+ /////////////////////////////////////////////////////////////////////////////////////////////////
45
+
46
+ namespace cutlass {
47
+ namespace profiler {
48
+
49
+ /////////////////////////////////////////////////////////////////////////////////////////////////
50
+
51
+ /// CUTLASS Profiler application
52
+ class CutlassProfiler {
53
+ private:
54
+
55
+ //
56
+ // Data members
57
+ //
58
+
59
+ /// Performance testbench options
60
+ Options options_;
61
+
62
+ /// Entry points for each operation
63
+ OperationProfilerVector operation_profilers_;
64
+
65
+ private:
66
+
67
+ /// Prints usage
68
+ void print_usage_(std::ostream &);
69
+
70
+ /// Prints usage
71
+ void print_options_(std::ostream &);
72
+
73
+ /// Enumerates all operations
74
+ void enumerate_();
75
+
76
+ /// Profiles all operations
77
+ int profile_();
78
+
79
+ public:
80
+
81
+ CutlassProfiler(Options const &options);
82
+ ~CutlassProfiler();
83
+
84
+ /// Invokes profiling operations
85
+ int operator()();
86
+ };
87
+
88
+ /////////////////////////////////////////////////////////////////////////////////////////////////
89
+
90
+ } // namespace profiler
91
+ } // namespace cutlass
92
+
93
+ /////////////////////////////////////////////////////////////////////////////////////////////////
sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/cutlass/tools/profiler/include/cutlass/profiler/debug.h ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ * SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice, this
9
+ * list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holder nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
+ *
30
+ **************************************************************************************************/
31
+ /* \file
32
+ \brief
33
+ */
34
+
35
+ #pragma once
36
+
37
+ #include <iostream>
38
+
39
+ //#define report(x) { std::cout << "\033[31m" << __FILE__ << ":" << __LINE__ << " " << x << "\033[0m" << std::endl; }
40
+ //#define report(x) {}
41
+
42
+ // Enable/Disable Profiler debug prints
43
+ //#define DEBUG_PROFILER
44
+
45
+ //RED 31m // profiler prints debug messages in red
46
+ //YELLOW 33m // ir prints debug messages in yellow
47
+
48
+ #ifndef DEBUG_PROFILER
49
+ #define debugprof(...)
50
+ #else
51
+ #define debugprof(...) do { \
52
+ printf("\033[33m[DEBUG PROF] %s:%d | ", __FILE__, __LINE__); \
53
+ printf(__VA_ARGS__); \
54
+ printf("\033[0m\n"); \
55
+ } while (0)
56
+ #endif
sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/cutlass/tools/profiler/include/cutlass/profiler/device_context.h ADDED
@@ -0,0 +1,136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ * SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice, this
9
+ * list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holder nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
+ *
30
+ **************************************************************************************************/
31
+ /* \file
32
+ \brief
33
+ */
34
+
35
+ #pragma once
36
+
37
+ #include <map>
38
+ #include <string>
39
+
40
+
41
+ #include "cutlass/library/library.h"
42
+ #include "cutlass/library/util.h"
43
+
44
+ #include "options.h"
45
+ #include "device_allocation.h"
46
+
47
+ namespace cutlass {
48
+ namespace profiler {
49
+
50
+ /////////////////////////////////////////////////////////////////////////////////////////////////
51
+
52
+ /// Collection of allocations on the device
53
+ class DeviceContext {
54
+ public:
55
+
56
+ //
57
+ // Type definitions
58
+ //
59
+ using AllocationMap = std::map<std::string, DeviceAllocation *>;
60
+
61
+ private:
62
+ //
63
+ // Data members
64
+ //
65
+
66
+ /// Memory allocations that exist (owning)
67
+ DeviceAllocationList device_memory_;
68
+
69
+ /// Non-owning set of named allocations
70
+ AllocationMap allocations_;
71
+
72
+ public:
73
+
74
+ /// Allocates memory of a given type, capacity (elements), and name
75
+ DeviceAllocation *allocate_block(
76
+ Options const &options,
77
+ std::string const &name,
78
+ library::NumericTypeID type,
79
+ size_t capacity,
80
+ size_t device_index);
81
+
82
+ /// Allocates memory of a given type, capacity (elements), and name
83
+ DeviceAllocation *allocate_tensor(
84
+ Options const &options,
85
+ std::string const &name,
86
+ library::NumericTypeID type,
87
+ library::LayoutTypeID layout_id,
88
+ std::vector<int> const &extent,
89
+ std::vector<int64_t> const &stride,
90
+ int batch_count,
91
+ size_t device_index);
92
+
93
+ /// Allocates memory of a given type, capacity (elements), and name
94
+ DeviceAllocation *allocate_and_initialize_tensor(
95
+ Options const &options,
96
+ std::string const &name,
97
+ library::NumericTypeID type,
98
+ library::LayoutTypeID layout_id,
99
+ std::vector<int> const &extent,
100
+ std::vector<int64_t> const &stride,
101
+ int batch_count,
102
+ int seed_shift,
103
+ size_t device_index);
104
+
105
+ /// Allocates memory for sparse meta data
106
+ DeviceAllocation *allocate_and_initialize_sparsemeta_tensor(
107
+ Options const &options,
108
+ std::string const &name,
109
+ library::NumericTypeID type,
110
+ library::LayoutTypeID layout_id,
111
+ library::NumericTypeID type_a,
112
+ std::vector<int> const &extent,
113
+ std::vector<int64_t> const &stride,
114
+ int batch_count,
115
+ int seed_shift,
116
+ size_t device_index);
117
+
118
+ /// Clears named allocations (but does not necessarily free memory)
119
+ void clear();
120
+
121
+ /// Frees all device memory allocations
122
+ void free();
123
+
124
+ /// Gets the allocation by name
125
+ DeviceAllocation &at(std::string const &name);
126
+
127
+ size_t size() const;
128
+
129
+ AllocationMap::iterator begin();
130
+ AllocationMap::iterator end();
131
+ };
132
+
133
+ /////////////////////////////////////////////////////////////////////////////////////////////////
134
+
135
+ } // namespace profiler
136
+ } // namespace cutlass
sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/cutlass/tools/profiler/include/cutlass/profiler/enumerated_types.h ADDED
@@ -0,0 +1,169 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ * SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice, this
9
+ * list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holder nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
+ *
30
+ **************************************************************************************************/
31
+ /* \file
32
+ \brief Provides several functions for filling tensors with data.
33
+ */
34
+
35
+ #pragma once
36
+
37
+ #include <string>
38
+ #include <vector>
39
+ #include <map>
40
+ #include <iostream>
41
+ #include "cutlass/library/library.h"
42
+
43
+ #define TRACE(x) { std::cout << __FILE__ << ":" << __LINE__ << " " << x << std::endl; }
44
+
45
+ namespace cutlass {
46
+ namespace profiler {
47
+
48
+ /////////////////////////////////////////////////////////////////////////////////////////////////
49
+
50
+ template <typename T>
51
+ T from_string(std::string const &);
52
+
53
+ /////////////////////////////////////////////////////////////////////////////////////////////////
54
+
55
+ /// Enumerated type describing how the performance testbench evaluates kernels.
56
+ enum class ExecutionMode {
57
+ kProfile, ///< regular verification and profiling
58
+ kDryRun, ///< no kernels are launched or workspaces allocated; used to assess what operators might be launched
59
+ kEnumerate, ///< no kernels launched or workspaces allocated; lists all operation kind and operations
60
+ kTrace, ///< executes a single device-side computation with no other kernel launches
61
+ kInvalid
62
+ };
63
+
64
+ /// Converts a ExecutionMode enumerant to a string
65
+ char const *to_string(ExecutionMode mode, bool pretty = false);
66
+
67
+ /// Parses a ExecutionMode enumerant from a string
68
+ template <>
69
+ ExecutionMode from_string<ExecutionMode>(std::string const &str);
70
+
71
+ /////////////////////////////////////////////////////////////////////////////////////////////////
72
+
73
+ /// Library algorithm mode
74
+ enum class AlgorithmMode {
75
+ kMatching, ///< compare against best matching algorithm
76
+ kBest, ///< evaluate all library algorithms and report best
77
+ kDefault, ///< use the library's default algorithm option
78
+ kInvalid
79
+ };
80
+
81
+ /// Converts a ExecutionMode enumerant to a string
82
+ char const *to_string(AlgorithmMode mode, bool pretty = false);
83
+
84
+ /// Parses a ExecutionMode enumerant from a string
85
+ template <>
86
+ AlgorithmMode from_string<AlgorithmMode>(std::string const &str);
87
+
88
+ /////////////////////////////////////////////////////////////////////////////////////////////////
89
+
90
+ /// Outcome of a performance test
91
+ enum class Disposition {
92
+ kPassed,
93
+ kFailed,
94
+ kNotRun,
95
+ kIncorrect,
96
+ kNotVerified,
97
+ kInvalidProblem,
98
+ kNotSupported,
99
+ kInvalid
100
+ };
101
+
102
+ /// Converts a Disposition enumerant to a string
103
+ char const *to_string(Disposition disposition, bool pretty = false);
104
+
105
+ /// Parses a Disposition enumerant from a string
106
+ template <>
107
+ Disposition from_string<Disposition>(std::string const &str);
108
+
109
+ /////////////////////////////////////////////////////////////////////////////////////////////////
110
+
111
+ /// Indicates when to save
112
+ enum class SaveWorkspace {
113
+ kNever,
114
+ kIncorrect,
115
+ kAlways,
116
+ kInvalid
117
+ };
118
+
119
+ /// Converts a SaveWorkspace enumerant to a string
120
+ char const *to_string(SaveWorkspace save_option, bool pretty = false);
121
+
122
+ /// Parses a SaveWorkspace enumerant from a string
123
+ template <>
124
+ SaveWorkspace from_string<SaveWorkspace>(std::string const &str);
125
+
126
+ /////////////////////////////////////////////////////////////////////////////////////////////////
127
+
128
+ /// Indicates the type of kernel argument
129
+ // ArgumentType can be both ScalarType or NumericType. Thus, enums kScalar and kNumeric
130
+ // 1) kScalar: e.g. of a Scalar ArgumentType is u32 is a Scalar type.
131
+ // Its c++ equivalent as "type name = initializer" is "u32 m = 32"
132
+ // 2) kNumeric: e.g. of a Numeric ArgumentType is NumericTypeID is a Numeric type.
133
+ // Its c++ equivalent as "type name = initializer" is "NumericTypeID numeric_type = u32"
134
+ enum class ArgumentTypeID {
135
+ kScalar,
136
+ kInteger,
137
+ kTensor,
138
+ kBatchedTensor,
139
+ kStructure,
140
+ kEnumerated,
141
+ kInvalid
142
+ };
143
+
144
+ /// Converts a ArgumentTypeID enumerant to a string
145
+ char const *to_string(ArgumentTypeID type, bool pretty = false);
146
+
147
+ /// Parses a ArgumentTypeID enumerant from a string
148
+ template <>
149
+ ArgumentTypeID from_string<ArgumentTypeID>(std::string const &str);
150
+
151
+ /////////////////////////////////////////////////////////////////////////////////////////////////
152
+ // Profiler typedefs
153
+ using ProviderVector = std::vector<library::Provider>;
154
+ using DispositionMap = std::map<library::Provider, Disposition>;
155
+
156
+ /////////////////////////////////////////////////////////////////////////////////////////////////
157
+
158
+ // Print vector for the report
159
+ template <typename T>
160
+ std::ostream& operator<< (std::ostream& out, const std::vector<T>& v) {
161
+ for (size_t i = 0; i < v.size(); ++i) {
162
+ out << to_string(v[i], true) << (i + 1u != v.size() ? "," : "");
163
+ }
164
+ return out;
165
+ }
166
+ /////////////////////////////////////////////////////////////////////////////////////////////////
167
+
168
+ } // namespace profiler
169
+ } // namespace cutlass
sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/cutlass/tools/profiler/include/cutlass/profiler/gemm_operation_profiler.h ADDED
@@ -0,0 +1,268 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ * SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice, this
9
+ * list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holder nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
+ *
30
+ **************************************************************************************************/
31
+ /* \file
32
+ \brief Gemm Profiler
33
+ */
34
+
35
+ #pragma once
36
+
37
+ #include <vector>
38
+ #include <string>
39
+ #include <memory>
40
+ #include <algorithm>
41
+ #include <unordered_map>
42
+
43
+ // CUTLASS Library includes
44
+ #include "cutlass/library/library.h"
45
+ #include "cutlass/library/util.h"
46
+ #include "cutlass/library/manifest.h"
47
+
48
+ // Profiler includes
49
+ #include "options.h"
50
+ #include "device_context.h"
51
+ #include "operation_profiler.h"
52
+ #include "performance_result.h"
53
+ #include "problem_space.h"
54
+ #include "reduction_operation_profiler.h"
55
+
56
+ /////////////////////////////////////////////////////////////////////////////////////////////////
57
+
58
+ namespace cutlass {
59
+ namespace profiler {
60
+
61
+ /////////////////////////////////////////////////////////////////////////////////////////////////
62
+
63
+ /// Abstract base class for each math function
64
+ class GemmOperationProfiler : public OperationProfiler {
65
+ public:
66
+
67
+ /// Problem structure obtained from problem space
68
+ struct GemmProblem {
69
+
70
+ cutlass::library::GemmUniversalMode mode{library::GemmUniversalMode::kGemm};
71
+
72
+ int64_t m{16};
73
+ int64_t n{16};
74
+ int64_t k{16};
75
+
76
+ int64_t lda{0};
77
+ int64_t ldb{0};
78
+ int64_t ldc{0};
79
+ std::vector<uint8_t> alpha;
80
+ std::vector<uint8_t> beta;
81
+
82
+ cutlass::library::SplitKMode split_k_mode{library::SplitKMode::kNone};
83
+ int split_k_slices{1};
84
+ int batch_count{1};
85
+
86
+ cutlass::library::RasterOrder raster_order{cutlass::library::RasterOrder::kHeuristic};
87
+ int swizzle_size{1};
88
+
89
+ // gemm with parallel interleaved reduction
90
+ // gemm epilogue (alpha, beta) = (1.0, 0.0)
91
+ // reduction epilogue (alpha, beta) = (GemmProblem::alpha, GemmProblem::beta)
92
+ std::vector<uint8_t> alpha_one;
93
+ std::vector<uint8_t> beta_zero;
94
+
95
+ //
96
+ // Methods
97
+ //
98
+
99
+ /// Parses the problem
100
+ Status parse(
101
+ library::GemmDescription const &operation_desc,
102
+ ProblemSpace const &problem_space,
103
+ ProblemSpace::Problem const &problem);
104
+
105
+ /// Total number of bytes loaded
106
+ int64_t bytes(library::GemmDescription const &operation_desc) const;
107
+
108
+ /// Total number of flops computed
109
+ int64_t flops(library::GemmDescription const &operation_desc) const;
110
+
111
+ /// Initializes a performance result
112
+ void initialize_result(
113
+ PerformanceResult &result,
114
+ library::GemmDescription const &operation_desc,
115
+ ProblemSpace const &problem_space);
116
+ };
117
+
118
+ /// Workspace used
119
+ struct GemmWorkspace {
120
+
121
+ DeviceAllocation *A{nullptr};
122
+ DeviceAllocation *B{nullptr};
123
+ DeviceAllocation *C{nullptr};
124
+ DeviceAllocation *Computed{nullptr};
125
+ DeviceAllocation *Reference{nullptr};
126
+
127
+ /// Number of copies of the problem workspace which are visited sequentially during
128
+ /// profiling to avoid camping in the last level cache.
129
+ int problem_count{1};
130
+
131
+ library::GemmUniversalConfiguration configuration;
132
+ library::GemmUniversalArguments arguments;
133
+
134
+ /// Buffer used for the operation's host workspace
135
+ std::vector<uint8_t> host_workspace;
136
+
137
+ /// Buffer used for the operations' device workspace
138
+ DeviceAllocation device_workspace;
139
+
140
+ /// Library configuration and arguments for reduction operator
141
+ library::ReductionConfiguration reduction_configuration;
142
+ library::ReductionArguments reduction_arguments;
143
+
144
+ /// Buffer used for the cutlass reduction operations' host workspace
145
+ std::vector<uint8_t> reduction_host_workspace;
146
+ };
147
+
148
+ protected:
149
+
150
+ //
151
+ // Data members
152
+ //
153
+
154
+ /// GEMM problem obtained from problem space
155
+ GemmProblem problem_;
156
+
157
+ /// Device memory allocations
158
+ GemmWorkspace gemm_workspace_;
159
+
160
+ /// CUTLASS parallel reduction operation to follow this* gemm operation
161
+ library::Operation const *reduction_op_;
162
+
163
+ public:
164
+ //
165
+ // Methods
166
+ //
167
+
168
+ /// Ctor
169
+ GemmOperationProfiler(Options const &options);
170
+
171
+ /// Destructor
172
+ virtual ~GemmOperationProfiler();
173
+
174
+ GemmProblem const& problem() const { return problem_; }
175
+
176
+ /// Prints usage statement for the math function
177
+ virtual void print_usage(std::ostream &out) const;
178
+
179
+ /// Prints examples
180
+ virtual void print_examples(std::ostream &out) const;
181
+
182
+ /// Extracts the problem dimensions
183
+ virtual Status initialize_configuration(
184
+ Options const &options,
185
+ PerformanceReport &report,
186
+ DeviceContext &device_context,
187
+ library::Operation const *operation,
188
+ ProblemSpace const &problem_space,
189
+ ProblemSpace::Problem const &problem);
190
+
191
+ /// Initializes workspace
192
+ virtual Status initialize_workspace(
193
+ Options const &options,
194
+ PerformanceReport &report,
195
+ DeviceContext &device_context,
196
+ library::Operation const *operation,
197
+ ProblemSpace const &problem_space,
198
+ ProblemSpace::Problem const &problem);
199
+
200
+ /// Verifies CUTLASS against references
201
+ virtual bool verify_cutlass(
202
+ Options const &options,
203
+ PerformanceReport &report,
204
+ DeviceContext &device_context,
205
+ library::Operation const *operation,
206
+ ProblemSpace const &problem_space,
207
+ ProblemSpace::Problem const &problem);
208
+
209
+ /// Measures performance results
210
+ virtual bool profile(
211
+ Options const &options,
212
+ PerformanceReport &report,
213
+ DeviceContext &device_context,
214
+ library::Operation const *operation,
215
+ ProblemSpace const &problem_space,
216
+ ProblemSpace::Problem const &problem);
217
+
218
+ protected:
219
+
220
+ /// Initializes the performance result
221
+ void initialize_result_(
222
+ PerformanceResult &result,
223
+ Options const &options,
224
+ library::GemmDescription const &operation_desc,
225
+ ProblemSpace const &problem_space);
226
+
227
+ /// Verifies CUTLASS against references
228
+ bool verify_with_cublas_(
229
+ Options const &options,
230
+ PerformanceReport &report,
231
+ DeviceContext &device_context,
232
+ library::Operation const *operation,
233
+ ProblemSpace const &problem_space,
234
+ ProblemSpace::Problem const &problem);
235
+
236
+ /// Verifies CUTLASS against host and device references
237
+ bool verify_with_reference_(
238
+ Options const &options,
239
+ PerformanceReport &report,
240
+ DeviceContext &device_context,
241
+ library::Operation const *operation,
242
+ ProblemSpace const &problem_space,
243
+ ProblemSpace::Problem const &problem,
244
+ cutlass::library::NumericTypeID element_A,
245
+ cutlass::library::NumericTypeID element_B);
246
+
247
+ /// Method to profile a CUTLASS Operation
248
+ Status profile_cutlass_(
249
+ double &runtime,
250
+ Options const &options,
251
+ library::Operation const *operation,
252
+ void *arguments,
253
+ void *host_workspace,
254
+ void *device_workspace);
255
+
256
+ /// Initialize reduction problem dimensions and library::Operation
257
+ bool initialize_reduction_configuration_(
258
+ library::Operation const *operation,
259
+ ProblemSpace::Problem const &problem);
260
+ };
261
+
262
+ /////////////////////////////////////////////////////////////////////////////////////////////////
263
+
264
+ } // namespace profiler
265
+ } // namespace cutlass
266
+
267
+ /////////////////////////////////////////////////////////////////////////////////////////////////
268
+
sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/cutlass/tools/profiler/include/cutlass/profiler/gpu_timer.h ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ * SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice, this
9
+ * list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holder nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
+ *
30
+ **************************************************************************************************/
31
+ /* \file
32
+ \brief Defines a math function
33
+ */
34
+
35
+ #pragma once
36
+
37
+ #include <cuda_runtime.h>
38
+ #include "cutlass/cutlass.h"
39
+
40
+ namespace cutlass {
41
+ namespace profiler {
42
+
43
+ /////////////////////////////////////////////////////////////////////////////////////////////////
44
+
45
+ struct GpuTimer {
46
+
47
+ cudaEvent_t events[2];
48
+
49
+ //
50
+ // Methods
51
+ //
52
+
53
+ GpuTimer();
54
+ ~GpuTimer();
55
+
56
+ /// Records a start event in the stream
57
+ void start(cudaStream_t stream = nullptr);
58
+
59
+ /// Records a stop event in the stream
60
+ void stop(cudaStream_t stream = nullptr);
61
+
62
+ /// Records a stop event in the stream and synchronizes on the stream
63
+ void stop_and_wait(cudaStream_t stream = nullptr);
64
+
65
+ /// Returns the duration in milliseconds
66
+ double duration(int iterations = 1) const;
67
+ };
68
+
69
+ /////////////////////////////////////////////////////////////////////////////////////////////////
70
+
71
+ } // namespace profiler
72
+ } // namespace cutlass
sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/cutlass/tools/profiler/include/cutlass/profiler/options.h ADDED
@@ -0,0 +1,345 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ * SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice, this
9
+ * list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holder nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
+ *
30
+ **************************************************************************************************/
31
+ /* \file
32
+ \brief Command line options for performance test program
33
+ */
34
+
35
+ #pragma once
36
+
37
+ #include <string>
38
+ #include <vector>
39
+ #include <map>
40
+
41
+ #include <cuda_runtime.h>
42
+
43
+ #include "cutlass/util/command_line.h"
44
+ #include "cutlass/util/distribution.h"
45
+ #include "cutlass/library/library.h"
46
+
47
+ #include "enumerated_types.h"
48
+
49
+ namespace cutlass {
50
+ namespace profiler {
51
+
52
+ /////////////////////////////////////////////////////////////////////////////////////////////////
53
+
54
+ /// Global options
55
+ class Options {
56
+ public:
57
+
58
+ /// Cublas and cuDNN options
59
+ struct Library {
60
+
61
+ //
62
+ // Data members
63
+ //
64
+
65
+ /// Algorithm mode
66
+ AlgorithmMode algorithm_mode;
67
+
68
+ /// Algorithm enumerants
69
+ std::vector<int> algorithms;
70
+
71
+ //
72
+ // Methods
73
+ //
74
+
75
+ explicit Library(CommandLine const &cmdline);
76
+
77
+ void print_usage(std::ostream &out) const;
78
+ void print_options(std::ostream &out, int indent = 0) const;
79
+ };
80
+
81
+ /// Options related to the selected device
82
+ struct Device {
83
+
84
+ /// Device ID
85
+ std::vector<int> devices;
86
+
87
+ /// Number of total devices
88
+ /// This is not set by the user, it is set by automatically
89
+ int num_devices;
90
+
91
+ /// CUDA Device properties
92
+ std::vector<cudaDeviceProp> properties;
93
+
94
+ /// Total memory allocation on each device
95
+ size_t maximum_capacity;
96
+
97
+ //
98
+ // Methods
99
+ //
100
+
101
+ explicit Device(CommandLine const &cmdline);
102
+
103
+ void print_usage(std::ostream &out) const;
104
+ void print_options(std::ostream &out, int indent = 0) const;
105
+ void print_device_info(std::ostream &out) const;
106
+
107
+ /// Returns the device ID from a device index
108
+ int device_id(size_t device_index) const;
109
+
110
+ /// Returns the compute capability of the listed devices (e.g. 61, 60, 70, 75)
111
+ int compute_capability(int device_index) const;
112
+ };
113
+
114
+ /// Options related to initializing input tensors
115
+ struct Initialization {
116
+
117
+ /// If true, data is initialized randomly. If false, no initialization is performed after
118
+ /// allocating tensors.
119
+ bool enabled;
120
+
121
+ /// If true, data distribution is set by the user and is not allowed to change
122
+ /// If false, data distribution is allowed to change based on element_type (library::NumericTypeID)
123
+ bool fix_data_distribution;
124
+
125
+ /// Data distribution for input tensors
126
+ Distribution data_distribution;
127
+
128
+ /// Source of random tensor elements
129
+ library::Provider provider;
130
+
131
+ /// Random number generator seed.
132
+ int seed;
133
+
134
+ //
135
+ // Methods
136
+ //
137
+
138
+ explicit Initialization(CommandLine const &cmdline);
139
+
140
+ void print_usage(std::ostream &out) const;
141
+ void print_options(std::ostream &out, int indent = 0) const;
142
+
143
+ /// Helper to parse a Distribution object from the command line parser
144
+ static void get_distribution(
145
+ cutlass::CommandLine const &args,
146
+ std::string const &arg,
147
+ cutlass::Distribution &dist);
148
+ };
149
+
150
+ /// Options related to verification of the result
151
+ struct Verification {
152
+
153
+ //
154
+ // Data members
155
+ //
156
+
157
+ /// If true, kernels are verified before they are profiled
158
+ bool enabled;
159
+
160
+ /// If true, causes profiler to return an error code if no reference check is run.
161
+ /// Only valid when verification is enabled.
162
+ bool required;
163
+
164
+ /// Relative error threshold - zero to require bit-level consistency
165
+ double epsilon;
166
+
167
+ /// Values smaller than this are assumed to be zero
168
+ double nonzero_floor;
169
+
170
+ /// List of providers used to verify each result
171
+ ProviderVector providers;
172
+
173
+ /// Indicates when to save the workspace
174
+ SaveWorkspace save_workspace;
175
+
176
+ //
177
+ // Methods
178
+ //
179
+
180
+ explicit Verification(CommandLine const &cmdline);
181
+
182
+ void print_usage(std::ostream &out) const;
183
+ void print_options(std::ostream &out, int indent = 0) const;
184
+
185
+ /// Returns true if a provider is enabled
186
+ bool provider_enabled(library::Provider provider) const;
187
+
188
+ /// Returns the index of a provider if its enabled
189
+ size_t index(library::Provider provider) const;
190
+ };
191
+
192
+ /// Options related to profiling
193
+ struct Profiling {
194
+
195
+ /// Number of workspaces to rotate through to avoid cache-resident working sets
196
+ int workspace_count{0};
197
+
198
+ /// Number of iterations to warmup each kernel prior to profiling
199
+ int warmup_iterations{10};
200
+
201
+ /// Number of iterations to profile each kernel - if 0, kernels are launched up to the profiling duration
202
+ int iterations{100};
203
+
204
+ /// Number of ms to sleep between profiling periods (ms)
205
+ int sleep_duration{50};
206
+
207
+ /// If true, profiling is actually conducted.
208
+ bool enabled{true};
209
+
210
+ /// If true, profiling returns an error code if no kernels are found to match the filters.
211
+ bool error_on_no_match{false};
212
+
213
+ /// If true, profiling returns an error code if no kernel are profiled
214
+ // Sometimes the kernel matches but failed to profile (e.g. can_implement() error)
215
+ bool error_if_nothing_is_profiled{false};
216
+
217
+ /// List of providers of each functionality to be profiled
218
+ ProviderVector providers;
219
+
220
+ //
221
+ // Methods
222
+ //
223
+
224
+ explicit Profiling(CommandLine const &cmdline);
225
+
226
+ void print_usage(std::ostream &out) const;
227
+ void print_options(std::ostream &out, int indent = 0) const;
228
+
229
+ /// Returns true if a provider is enabled
230
+ bool provider_enabled(library::Provider provider) const;
231
+
232
+ /// Returns the index of a provider if its enabled
233
+ size_t index(library::Provider provider) const;
234
+ };
235
+
236
+ /// Options related to reporting
237
+ struct Report {
238
+
239
+ /// If true, result is appended to possibly existing file
240
+ bool append;
241
+
242
+ /// Path to a file containing results
243
+ std::string output_path;
244
+
245
+ /// Path to a file containing junit xml results
246
+ std::string junit_output_path;
247
+
248
+ /// Sequence of tags to attach to each result
249
+ std::vector<std::pair<std::string, std::string>> pivot_tags;
250
+
251
+ /// If true, reports status of all kernels including those that were
252
+ /// not run for the given arguments
253
+ bool report_not_run;
254
+
255
+ /// Prints human-readable text to stdout. If false, nothing is written to stdout
256
+ bool verbose;
257
+
258
+ /// Sort results by (currently by flops-per-byte)
259
+ bool sort_results;
260
+
261
+ /// Prints the name of the kernel being profiled before running the kernel.
262
+ /// This is useful for determining which kernel is causing a run of the profiler to hang
263
+ bool print_kernel_before_running;
264
+
265
+ //
266
+ // Methods
267
+ //
268
+
269
+ explicit Report(CommandLine const &cmdline);
270
+
271
+ void print_usage(std::ostream &out) const;
272
+ void print_options(std::ostream &out, int indent = 0) const;
273
+ };
274
+
275
+ /// Options related to printing usage and version information
276
+ struct About {
277
+
278
+ /// If true, usage is printed and the program ends.
279
+ bool help;
280
+
281
+ /// Prints version string
282
+ bool version;
283
+
284
+ /// Print information about devices
285
+ bool device_info;
286
+
287
+ //
288
+ // Methods
289
+ //
290
+
291
+ explicit About(CommandLine const &cmdline);
292
+
293
+ void print_usage(std::ostream &out) const;
294
+ void print_options(std::ostream &out, int indent = 0) const;
295
+
296
+ static void print_version(std::ostream &out);
297
+ };
298
+
299
+ public:
300
+
301
+ //
302
+ // Data members
303
+ //
304
+
305
+ /// Top-level execution mode
306
+ ExecutionMode execution_mode;
307
+
308
+ /// Name of math function to profile
309
+ library::OperationKind operation_kind;
310
+
311
+ /// Vector of operation name substrings
312
+ std::vector<std::string> operation_names;
313
+
314
+ /// Vector of operation name substrings
315
+ std::vector<std::string> excluded_operation_names;
316
+
317
+
318
+ //
319
+ // Detailed configuration options
320
+ //
321
+
322
+ /// Configuration
323
+ CommandLine cmdline;
324
+ Device device;
325
+ Initialization initialization;
326
+ Library library;
327
+ Verification verification;
328
+ Profiling profiling;
329
+ Report report;
330
+ About about;
331
+
332
+ public:
333
+
334
+ explicit Options(CommandLine const &cmdline);
335
+
336
+ void print_usage(std::ostream &out) const;
337
+ void print_options(std::ostream &out) const;
338
+
339
+ static std::string indent_str(int indent);
340
+ };
341
+
342
+ /////////////////////////////////////////////////////////////////////////////////////////////////
343
+
344
+ } // namespace profiler
345
+ } // namespace cutlass
sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/cutlass/tools/profiler/include/cutlass/profiler/performance_report.h ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ * SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice, this
9
+ * list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holder nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
+ *
30
+ **************************************************************************************************/
31
+ /* \file
32
+ \brief Class performing output during profiling
33
+ */
34
+
35
+ #pragma once
36
+
37
+ #include <vector>
38
+ #include <fstream>
39
+
40
+ // CUTLASS Profiler includes
41
+ #include "options.h"
42
+ #include "enumerated_types.h"
43
+ #include "performance_result.h"
44
+
45
+ // CUTLASS Library includes
46
+ #include "cutlass/library/library.h"
47
+
48
+ namespace cutlass {
49
+ namespace profiler {
50
+
51
+ /////////////////////////////////////////////////////////////////////////////////////////////////
52
+
53
+ class PerformanceReport {
54
+ private:
55
+
56
+ /// Reference to options
57
+ Options const &options_;
58
+
59
+ /// Operation kind
60
+ library::OperationKind op_kind_;
61
+
62
+ /// Operation file name containing performance report of op_kind
63
+ std::string op_file_name_;
64
+
65
+ /// Output file containing results
66
+ std::ofstream output_file_;
67
+
68
+ /// Operation file name containing junit performance report of op_kind
69
+ std::string op_junit_file_name_;
70
+
71
+ /// Output file containing junit results
72
+ std::ofstream junit_output_file_;
73
+
74
+ /// Flag indicating the performance report is valid
75
+ bool good_;
76
+
77
+ /// Vector of argument names
78
+ std::vector<std::string> argument_names_;
79
+
80
+ /// Counter uniquely identifying problem within the report
81
+ size_t problem_index_;
82
+
83
+ /// Collection of all results
84
+ PerformanceResultVector concatenated_results_;
85
+
86
+ public:
87
+
88
+ PerformanceReport(Options const &options, std::vector<std::string> const &argument_names, library::OperationKind const &op_kind);
89
+ ~PerformanceReport();
90
+
91
+ bool good() const { return good_; }
92
+
93
+ void next_problem();
94
+ void append_result(PerformanceResult result);
95
+ void sort_results(PerformanceResultVector &results);
96
+ void append_results(PerformanceResultVector const &results);
97
+
98
+ public:
99
+
100
+ /// Prints the CSV header
101
+ std::ostream & print_csv_header_(std::ostream &out);
102
+
103
+ /// Prints the CSV
104
+ std::ostream & print_result_csv_(std::ostream &out, PerformanceResult const &result);
105
+
106
+ /// @defgroup jUnit Result Generation
107
+ /// Functions related to generation of the jUnit results
108
+ /// @{
109
+
110
+ std::ostream & print_junit_header_(std::ostream &out);
111
+ std::ostream & print_junit_result_(std::ostream &out, PerformanceResult const &result);
112
+ std::ostream & print_junit_footer_(std::ostream &out);
113
+
114
+ /// @}
115
+
116
+ /// Prints the result in human readable form
117
+ std::ostream & print_result_pretty_(
118
+ std::ostream &out,
119
+ PerformanceResult const &result,
120
+ bool use_shell_coloring = true);
121
+ };
122
+
123
+ /////////////////////////////////////////////////////////////////////////////////////////////////
124
+
125
+ } // namespace profiler
126
+ } // namespace cutlass
127
+
sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/cutlass/tools/profiler/include/cutlass/profiler/performance_result.h ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ * SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice, this
9
+ * list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holder nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
+ *
30
+ **************************************************************************************************/
31
+ /* \file
32
+ \brief Defines a math function
33
+ */
34
+
35
+ #pragma once
36
+
37
+ #include <vector>
38
+
39
+ #include "cutlass/cutlass.h"
40
+
41
+ // CUTLASS Profiler includes
42
+ #include "enumerated_types.h"
43
+
44
+ // CUTLASS Library includes
45
+ #include "cutlass/library/library.h"
46
+
47
+ namespace cutlass {
48
+ namespace profiler {
49
+
50
+ /////////////////////////////////////////////////////////////////////////////////////////////////
51
+
52
+ /// Performance result object
53
+ struct PerformanceResult {
54
+
55
+ /// Index of problem
56
+ size_t problem_index;
57
+
58
+ /// library::Provider
59
+ library::Provider provider;
60
+
61
+ /// Operation kind
62
+ library::OperationKind op_kind;
63
+
64
+ /// CUTLASS status result from kernels (success or failure)
65
+ // Status does information on verification
66
+ Status status;
67
+
68
+ /// Outcome of verification (worst case verification result)
69
+ Disposition disposition;
70
+
71
+ /// Outcome of verification (all verification results)
72
+ DispositionMap verification_map;
73
+
74
+ /// Operation name
75
+ std::string operation_name;
76
+
77
+ /// Stringified vector of argument values
78
+ std::vector<std::pair<std::string, std::string> > arguments;
79
+
80
+ /// Number of bytes read or written
81
+ int64_t bytes;
82
+
83
+ /// Number of DL flops performed by the math function
84
+ int64_t flops;
85
+
86
+ /// Average runtime in ms
87
+ double runtime;
88
+
89
+ //
90
+ // Members
91
+ //
92
+
93
+ /// Ctor
94
+ PerformanceResult():
95
+ problem_index(0),
96
+ op_kind(library::OperationKind::kInvalid),
97
+ provider(library::Provider::kInvalid),
98
+ disposition(Disposition::kNotRun),
99
+ status(Status::kInvalid),
100
+ bytes(0),
101
+ flops(0),
102
+ runtime(0)
103
+ { }
104
+
105
+ /// Returns true if the runtime is valid
106
+ bool good() const {
107
+ return runtime > 0;
108
+ }
109
+
110
+ /// Math throughput in units of GFLOP/s
111
+ double gflops_per_sec() const {
112
+ return double(flops) / runtime / 1.0e6;
113
+ }
114
+
115
+ /// memory bandwidth in units of GiB/s
116
+ double gbytes_per_sec() const {
117
+ return double(bytes) / double(1 << 30) / runtime * 1000.0;
118
+ }
119
+
120
+ };
121
+
122
+ using PerformanceResultVector = std::vector<PerformanceResult>;
123
+
124
+ /////////////////////////////////////////////////////////////////////////////////////////////////
125
+
126
+ } // namespace profiler
127
+ } // namespace cutlass
128
+
sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/cutlass/tools/profiler/include/cutlass/profiler/rank_2k_operation_profiler.h ADDED
@@ -0,0 +1,229 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ * SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice, this
9
+ * list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holder nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
+ *
30
+ **************************************************************************************************/
31
+ /* \file
32
+ \brief Defines a math function
33
+
34
+
35
+ */
36
+
37
+ #pragma once
38
+
39
+ #include <vector>
40
+ #include <string>
41
+ #include <memory>
42
+ #include <algorithm>
43
+ #include <unordered_map>
44
+
45
+ // CUTLASS Library includes
46
+ #include "cutlass/blas3.h"
47
+ #include "cutlass/library/library.h"
48
+ #include "cutlass/library/util.h"
49
+ #include "cutlass/library/manifest.h"
50
+
51
+ // Profiler includes
52
+ #include "options.h"
53
+ #include "device_context.h"
54
+ #include "operation_profiler.h"
55
+ #include "performance_result.h"
56
+ #include "problem_space.h"
57
+
58
+ /////////////////////////////////////////////////////////////////////////////////////////////////
59
+
60
+ namespace cutlass {
61
+ namespace profiler {
62
+
63
+ /////////////////////////////////////////////////////////////////////////////////////////////////
64
+
65
+
66
+ /// Abstract base class for each math function
67
+ class Rank2KOperationProfiler : public OperationProfiler {
68
+ public:
69
+
70
+ /// Problem structure obtained from problem space
71
+ struct RankKProblem {
72
+ int64_t n;
73
+ int64_t k;
74
+ int64_t lda;
75
+ int64_t ldb;
76
+ int64_t ldc;
77
+ FillMode fill_mode;
78
+ BlasMode blas_mode;
79
+ std::vector<uint8_t> alpha;
80
+ std::vector<uint8_t> beta;
81
+ int64_t split_k_slices;
82
+ int64_t batch_count;
83
+
84
+ //
85
+ // Methods
86
+ //
87
+
88
+ RankKProblem():
89
+ n(16), k(16), lda(0), ldc(0),
90
+ fill_mode(FillMode::kInvalid), blas_mode(BlasMode::kInvalid),
91
+ split_k_slices(1), batch_count(1) { }
92
+
93
+ /// Parses the problem
94
+ Status parse(
95
+ library::RankKDescription const &operation_desc,
96
+ ProblemSpace const &problem_space,
97
+ ProblemSpace::Problem const &problem);
98
+
99
+ /// Total number of bytes loaded
100
+ int64_t bytes(library::RankKDescription const &operation_desc) const;
101
+
102
+ /// Total number of flops computed
103
+ int64_t flops(library::RankKDescription const &operation_desc) const;
104
+
105
+ /// Initializes a performance result
106
+ void initialize_result(
107
+ PerformanceResult &result,
108
+ library::RankKDescription const &operation_desc,
109
+ ProblemSpace const &problem_space);
110
+ };
111
+
112
+ /// Workspace used
113
+ struct RankKWorkspace {
114
+
115
+ DeviceAllocation *A;
116
+ DeviceAllocation *B;
117
+ DeviceAllocation *C;
118
+ DeviceAllocation *Computed;
119
+ DeviceAllocation *Reference;
120
+
121
+ library::RankKConfiguration configuration;
122
+ library::RankKArguments arguments;
123
+
124
+ /// Buffer used for the operation's host workspace
125
+ std::vector<uint8_t> host_workspace;
126
+
127
+ /// Buffer used for the operations' device workspace
128
+ DeviceAllocation device_workspace;
129
+
130
+ //
131
+ // Methods
132
+ //
133
+
134
+ RankKWorkspace():
135
+ A(nullptr), B(nullptr), C(nullptr), Computed(nullptr), Reference(nullptr) { }
136
+ };
137
+
138
+ protected:
139
+
140
+ //
141
+ // Data members
142
+ //
143
+
144
+ /// GEMM problem obtained from problem space
145
+ RankKProblem problem_;
146
+
147
+ /// Device memory allocations
148
+ RankKWorkspace rank_k_workspace_;
149
+
150
+
151
+ public:
152
+ //
153
+ // Methods
154
+ //
155
+
156
+ /// Ctor
157
+ Rank2KOperationProfiler(Options const &options);
158
+
159
+ /// Destructor
160
+ virtual ~Rank2KOperationProfiler();
161
+
162
+ /// Prints usage statement for the math function
163
+ virtual void print_usage(std::ostream &out) const;
164
+
165
+ /// Prints examples
166
+ virtual void print_examples(std::ostream &out) const;
167
+
168
+ /// Extracts the problem dimensions
169
+ virtual Status initialize_configuration(
170
+ Options const &options,
171
+ PerformanceReport &report,
172
+ DeviceContext &device_context,
173
+ library::Operation const *operation,
174
+ ProblemSpace const &problem_space,
175
+ ProblemSpace::Problem const &problem);
176
+
177
+ /// Initializes workspace
178
+ virtual Status initialize_workspace(
179
+ Options const &options,
180
+ PerformanceReport &report,
181
+ DeviceContext &device_context,
182
+ library::Operation const *operation,
183
+ ProblemSpace const &problem_space,
184
+ ProblemSpace::Problem const &problem);
185
+
186
+ /// Verifies CUTLASS against references
187
+ virtual bool verify_cutlass(
188
+ Options const &options,
189
+ PerformanceReport &report,
190
+ DeviceContext &device_context,
191
+ library::Operation const *operation,
192
+ ProblemSpace const &problem_space,
193
+ ProblemSpace::Problem const &problem);
194
+
195
+ /// Measures performance results
196
+ virtual bool profile(
197
+ Options const &options,
198
+ PerformanceReport &report,
199
+ DeviceContext &device_context,
200
+ library::Operation const *operation,
201
+ ProblemSpace const &problem_space,
202
+ ProblemSpace::Problem const &problem);
203
+
204
+ protected:
205
+
206
+ /// Initializes the performance result
207
+ void initialize_result_(
208
+ PerformanceResult &result,
209
+ Options const &options,
210
+ library::RankKDescription const &operation_desc,
211
+ ProblemSpace const &problem_space);
212
+
213
+ /// Verifies CUTLASS against references
214
+ bool verify_with_cublas_(
215
+ Options const &options,
216
+ PerformanceReport &report,
217
+ DeviceContext &device_context,
218
+ library::Operation const *operation,
219
+ ProblemSpace const &problem_space,
220
+ ProblemSpace::Problem const &problem);
221
+
222
+ };
223
+
224
+ /////////////////////////////////////////////////////////////////////////////////////////////////
225
+
226
+ } // namespace profiler
227
+ } // namespace cutlass
228
+
229
+ /////////////////////////////////////////////////////////////////////////////////////////////////
sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/cutlass/tools/profiler/include/cutlass/profiler/rank_k_operation_profiler.h ADDED
@@ -0,0 +1,227 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ * SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice, this
9
+ * list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holder nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
+ *
30
+ **************************************************************************************************/
31
+ /* \file
32
+ \brief Defines a math function
33
+
34
+
35
+ */
36
+
37
+ #pragma once
38
+
39
+ #include <vector>
40
+ #include <string>
41
+ #include <memory>
42
+ #include <algorithm>
43
+ #include <unordered_map>
44
+
45
+ // CUTLASS Library includes
46
+ #include "cutlass/blas3.h"
47
+ #include "cutlass/library/library.h"
48
+ #include "cutlass/library/util.h"
49
+ #include "cutlass/library/manifest.h"
50
+
51
+ // Profiler includes
52
+ #include "options.h"
53
+ #include "device_context.h"
54
+ #include "operation_profiler.h"
55
+ #include "performance_result.h"
56
+ #include "problem_space.h"
57
+
58
+ /////////////////////////////////////////////////////////////////////////////////////////////////
59
+
60
+ namespace cutlass {
61
+ namespace profiler {
62
+
63
+ /////////////////////////////////////////////////////////////////////////////////////////////////
64
+
65
+
66
+ /// Abstract base class for each math function
67
+ class RankKOperationProfiler : public OperationProfiler {
68
+ public:
69
+
70
+ /// Problem structure obtained from problem space
71
+ struct RankKProblem {
72
+ int64_t n;
73
+ int64_t k;
74
+ int64_t lda;
75
+ int64_t ldc;
76
+ FillMode fill_mode;
77
+ BlasMode blas_mode;
78
+ std::vector<uint8_t> alpha;
79
+ std::vector<uint8_t> beta;
80
+ int64_t split_k_slices;
81
+ int64_t batch_count;
82
+
83
+ //
84
+ // Methods
85
+ //
86
+
87
+ RankKProblem():
88
+ n(16), k(16), lda(0), ldc(0),
89
+ fill_mode(FillMode::kInvalid), blas_mode(BlasMode::kInvalid),
90
+ split_k_slices(1), batch_count(1) { }
91
+
92
+ /// Parses the problem
93
+ Status parse(
94
+ library::RankKDescription const &operation_desc,
95
+ ProblemSpace const &problem_space,
96
+ ProblemSpace::Problem const &problem);
97
+
98
+ /// Total number of bytes loaded
99
+ int64_t bytes(library::RankKDescription const &operation_desc) const;
100
+
101
+ /// Total number of flops computed
102
+ int64_t flops(library::RankKDescription const &operation_desc) const;
103
+
104
+ /// Initializes a performance result
105
+ void initialize_result(
106
+ PerformanceResult &result,
107
+ library::RankKDescription const &operation_desc,
108
+ ProblemSpace const &problem_space);
109
+ };
110
+
111
+ /// Workspace used
112
+ struct RankKWorkspace {
113
+
114
+ DeviceAllocation *A;
115
+ DeviceAllocation *C;
116
+ DeviceAllocation *Computed;
117
+ DeviceAllocation *Reference;
118
+
119
+ library::RankKConfiguration configuration;
120
+ library::RankKArguments arguments;
121
+
122
+ /// Buffer used for the operation's host workspace
123
+ std::vector<uint8_t> host_workspace;
124
+
125
+ /// Buffer used for the operations' device workspace
126
+ DeviceAllocation device_workspace;
127
+
128
+ //
129
+ // Methods
130
+ //
131
+
132
+ RankKWorkspace():
133
+ A(nullptr), C(nullptr), Computed(nullptr), Reference(nullptr) { }
134
+ };
135
+
136
+ protected:
137
+
138
+ //
139
+ // Data members
140
+ //
141
+
142
+ /// GEMM problem obtained from problem space
143
+ RankKProblem problem_;
144
+
145
+ /// Device memory allocations
146
+ RankKWorkspace rank_k_workspace_;
147
+
148
+
149
+ public:
150
+ //
151
+ // Methods
152
+ //
153
+
154
+ /// Ctor
155
+ RankKOperationProfiler(Options const &options);
156
+
157
+ /// Destructor
158
+ virtual ~RankKOperationProfiler();
159
+
160
+ /// Prints usage statement for the math function
161
+ virtual void print_usage(std::ostream &out) const;
162
+
163
+ /// Prints examples
164
+ virtual void print_examples(std::ostream &out) const;
165
+
166
+ /// Extracts the problem dimensions
167
+ virtual Status initialize_configuration(
168
+ Options const &options,
169
+ PerformanceReport &report,
170
+ DeviceContext &device_context,
171
+ library::Operation const *operation,
172
+ ProblemSpace const &problem_space,
173
+ ProblemSpace::Problem const &problem);
174
+
175
+ /// Initializes workspace
176
+ virtual Status initialize_workspace(
177
+ Options const &options,
178
+ PerformanceReport &report,
179
+ DeviceContext &device_context,
180
+ library::Operation const *operation,
181
+ ProblemSpace const &problem_space,
182
+ ProblemSpace::Problem const &problem);
183
+
184
+ /// Verifies CUTLASS against references
185
+ virtual bool verify_cutlass(
186
+ Options const &options,
187
+ PerformanceReport &report,
188
+ DeviceContext &device_context,
189
+ library::Operation const *operation,
190
+ ProblemSpace const &problem_space,
191
+ ProblemSpace::Problem const &problem);
192
+
193
+ /// Measures performance results
194
+ virtual bool profile(
195
+ Options const &options,
196
+ PerformanceReport &report,
197
+ DeviceContext &device_context,
198
+ library::Operation const *operation,
199
+ ProblemSpace const &problem_space,
200
+ ProblemSpace::Problem const &problem);
201
+
202
+ protected:
203
+
204
+ /// Initializes the performance result
205
+ void initialize_result_(
206
+ PerformanceResult &result,
207
+ Options const &options,
208
+ library::RankKDescription const &operation_desc,
209
+ ProblemSpace const &problem_space);
210
+
211
+ /// Verifies CUTLASS against references
212
+ bool verify_with_cublas_(
213
+ Options const &options,
214
+ PerformanceReport &report,
215
+ DeviceContext &device_context,
216
+ library::Operation const *operation,
217
+ ProblemSpace const &problem_space,
218
+ ProblemSpace::Problem const &problem);
219
+
220
+ };
221
+
222
+ /////////////////////////////////////////////////////////////////////////////////////////////////
223
+
224
+ } // namespace profiler
225
+ } // namespace cutlass
226
+
227
+ /////////////////////////////////////////////////////////////////////////////////////////////////
sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/cutlass/tools/profiler/include/cutlass/profiler/sparse_gemm_operation_profiler.h ADDED
@@ -0,0 +1,214 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ * SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice, this
9
+ * list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holder nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
+ *
30
+ **************************************************************************************************/
31
+ /* \file
32
+ \brief
33
+
34
+ */
35
+
36
+ #pragma once
37
+
38
+ #include <vector>
39
+ #include <string>
40
+ #include <memory>
41
+ #include <algorithm>
42
+ #include <unordered_map>
43
+
44
+ // CUTLASS Library includes
45
+ #include "cutlass/library/library.h"
46
+ #include "cutlass/library/util.h"
47
+ #include "cutlass/library/manifest.h"
48
+
49
+ // Profiler includes
50
+ #include "options.h"
51
+ #include "device_context.h"
52
+ #include "operation_profiler.h"
53
+ #include "performance_result.h"
54
+ #include "problem_space.h"
55
+ #include "gemm_operation_profiler.h"
56
+
57
+ /////////////////////////////////////////////////////////////////////////////////////////////////
58
+
59
+ namespace cutlass {
60
+ namespace profiler {
61
+
62
+ /////////////////////////////////////////////////////////////////////////////////////////////////
63
+
64
+ /// Abstract base class for each math function
65
+ class SparseGemmOperationProfiler : public OperationProfiler {
66
+ public:
67
+
68
+ /// Problem structure obtained from problem space
69
+ struct SparseGemmProblem {
70
+ int64_t m;
71
+ int64_t n;
72
+ int64_t k;
73
+ int64_t lda;
74
+ int64_t ldb;
75
+ int64_t ldc;
76
+ int64_t lde;
77
+ std::vector<uint8_t> alpha;
78
+ std::vector<uint8_t> beta;
79
+ int64_t split_k_slices;
80
+ int64_t batch_count;
81
+ static int const sparse = 2;
82
+ // every 128b ElementA uses one elementE
83
+ int elements_per_128b;
84
+
85
+ //
86
+ // Methods
87
+ //
88
+
89
+ SparseGemmProblem():
90
+ m(16), n(16), k(16), lda(0), ldb(0), ldc(0), lde(0), split_k_slices(1), batch_count(1) { }
91
+
92
+ /// Parses the problem
93
+ Status parse(
94
+ library::SparseGemmDescription const &operation_desc,
95
+ ProblemSpace const &problem_space,
96
+ ProblemSpace::Problem const &problem);
97
+
98
+ /// Initializes a performance result
99
+ void initialize_result(
100
+ PerformanceResult &result,
101
+ library::SparseGemmDescription const &operation_desc,
102
+ ProblemSpace const &problem_space);
103
+ };
104
+
105
+ /// Workspace used
106
+ struct SparseGemmWorkspace {
107
+
108
+ DeviceAllocation *A;
109
+ DeviceAllocation *B;
110
+ DeviceAllocation *C;
111
+ DeviceAllocation *E;
112
+ DeviceAllocation *Computed;
113
+ DeviceAllocation *Reference;
114
+
115
+ library::SparseGemmConfiguration configuration;
116
+ library::SparseGemmArguments arguments;
117
+
118
+ /// Buffer used for the operation's host workspace
119
+ std::vector<uint8_t> host_workspace;
120
+
121
+ /// Buffer used for the operations' device workspace
122
+ DeviceAllocation device_workspace;
123
+
124
+ //
125
+ // Methods
126
+ //
127
+
128
+ SparseGemmWorkspace():
129
+ A(nullptr), B(nullptr), C(nullptr), E(nullptr), Computed(nullptr), Reference(nullptr) { }
130
+ };
131
+
132
+ protected:
133
+
134
+ //
135
+ // Data members
136
+ //
137
+
138
+ // GEMM problem
139
+ SparseGemmProblem problem_;
140
+
141
+ /// Device memory allocations
142
+ SparseGemmWorkspace gemm_workspace_;
143
+
144
+
145
+ public:
146
+ //
147
+ // Methods
148
+ //
149
+
150
+ /// Ctor
151
+ SparseGemmOperationProfiler(Options const &options);
152
+
153
+ /// Destructor
154
+ virtual ~SparseGemmOperationProfiler();
155
+
156
+ /// Prints usage statement for the math function
157
+ virtual void print_usage(std::ostream &out) const;
158
+
159
+ /// Prints examples
160
+ virtual void print_examples(std::ostream &out) const;
161
+
162
+ /// Extracts the problem dimensions
163
+ virtual Status initialize_configuration(
164
+ Options const &options,
165
+ PerformanceReport &report,
166
+ DeviceContext &device_context,
167
+ library::Operation const *operation,
168
+ ProblemSpace const &problem_space,
169
+ ProblemSpace::Problem const &problem);
170
+
171
+ /// Initializes workspace
172
+ virtual Status initialize_workspace(
173
+ Options const &options,
174
+ PerformanceReport &report,
175
+ DeviceContext &device_context,
176
+ library::Operation const *operation,
177
+ ProblemSpace const &problem_space,
178
+ ProblemSpace::Problem const &problem);
179
+
180
+ /// Verifies CUTLASS against references
181
+ virtual bool verify_cutlass(
182
+ Options const &options,
183
+ PerformanceReport &report,
184
+ DeviceContext &device_context,
185
+ library::Operation const *operation,
186
+ ProblemSpace const &problem_space,
187
+ ProblemSpace::Problem const &problem);
188
+
189
+ /// Measures performance results
190
+ virtual bool profile(
191
+ Options const &options,
192
+ PerformanceReport &report,
193
+ DeviceContext &device_context,
194
+ library::Operation const *operation,
195
+ ProblemSpace const &problem_space,
196
+ ProblemSpace::Problem const &problem);
197
+
198
+ protected:
199
+
200
+ /// Initializes the performance result
201
+ void initialize_result_(
202
+ PerformanceResult &result,
203
+ Options const &options,
204
+ library::SparseGemmDescription const &operation_desc,
205
+ ProblemSpace const &problem_space);
206
+ };
207
+
208
+ /////////////////////////////////////////////////////////////////////////////////////////////////
209
+
210
+ } // namespace profiler
211
+ } // namespace cutlass
212
+
213
+ /////////////////////////////////////////////////////////////////////////////////////////////////
214
+
sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/cutlass/tools/profiler/include/cutlass/profiler/trmm_operation_profiler.h ADDED
@@ -0,0 +1,222 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ * SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice, this
9
+ * list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holder nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
+ *
30
+ **************************************************************************************************/
31
+ /* \file
32
+ \brief Defines a math function
33
+
34
+
35
+ */
36
+
37
+ #pragma once
38
+
39
+ #include <vector>
40
+ #include <string>
41
+ #include <memory>
42
+ #include <algorithm>
43
+ #include <unordered_map>
44
+
45
+ // CUTLASS Library includes
46
+ #include "cutlass/blas3.h"
47
+ #include "cutlass/library/library.h"
48
+ #include "cutlass/library/util.h"
49
+ #include "cutlass/library/manifest.h"
50
+
51
+ // Profiler includes
52
+ #include "options.h"
53
+ #include "device_context.h"
54
+ #include "operation_profiler.h"
55
+ #include "performance_result.h"
56
+ #include "problem_space.h"
57
+
58
+ /////////////////////////////////////////////////////////////////////////////////////////////////
59
+
60
+ namespace cutlass {
61
+ namespace profiler {
62
+
63
+ /////////////////////////////////////////////////////////////////////////////////////////////////
64
+
65
+ /// Abstract base class for each math function
66
+ class TrmmOperationProfiler : public OperationProfiler {
67
+ public:
68
+
69
+ /// Problem structure obtained from problem space
70
+ struct TrmmProblem {
71
+ int64_t m;
72
+ int64_t n;
73
+ int64_t lda;
74
+ int64_t ldb;
75
+ int64_t ldd;
76
+ SideMode side_mode;
77
+ FillMode fill_mode;
78
+ DiagType diag_type;
79
+ std::vector<uint8_t> alpha;
80
+ std::vector<uint8_t> beta;
81
+ int64_t split_k_slices;
82
+ int64_t batch_count;
83
+
84
+ //
85
+ // Methods
86
+ //
87
+
88
+ TrmmProblem():
89
+ m(16), n(16), lda(0), ldb(0), ldd(0), split_k_slices(1), batch_count(1) { }
90
+
91
+ /// Parses the problem
92
+ Status parse(
93
+ library::TrmmDescription const &operation_desc,
94
+ ProblemSpace const &problem_space,
95
+ ProblemSpace::Problem const &problem);
96
+
97
+ /// Initializes a performance result
98
+ void initialize_result(
99
+ PerformanceResult &result,
100
+ library::TrmmDescription const &operation_desc,
101
+ ProblemSpace const &problem_space);
102
+ };
103
+
104
+ /// Workspace used
105
+ struct TrmmWorkspace {
106
+
107
+ DeviceAllocation *A;
108
+ DeviceAllocation *B;
109
+ DeviceAllocation *D;
110
+ DeviceAllocation *Computed;
111
+ DeviceAllocation *Reference;
112
+
113
+ library::TrmmConfiguration configuration;
114
+ library::TrmmArguments arguments;
115
+
116
+ /// Buffer used for the operation's host workspace
117
+ std::vector<uint8_t> host_workspace;
118
+
119
+ /// Buffer used for the operations' device workspace
120
+ DeviceAllocation device_workspace;
121
+
122
+ //
123
+ // Methods
124
+ //
125
+
126
+ TrmmWorkspace():
127
+ A(nullptr), B(nullptr), D(nullptr), Computed(nullptr), Reference(nullptr) { }
128
+ };
129
+
130
+ protected:
131
+
132
+ //
133
+ // Data members
134
+ //
135
+
136
+ /// GEMM problem obtained from problem space
137
+ TrmmProblem problem_;
138
+
139
+ /// Device memory allocations
140
+ TrmmWorkspace trmm_workspace_;
141
+
142
+
143
+ public:
144
+ //
145
+ // Methods
146
+ //
147
+
148
+ /// Ctor
149
+ TrmmOperationProfiler(Options const &options);
150
+
151
+ /// Destructor
152
+ virtual ~TrmmOperationProfiler();
153
+
154
+ /// Prints usage statement for the math function
155
+ virtual void print_usage(std::ostream &out) const;
156
+
157
+ /// Prints examples
158
+ virtual void print_examples(std::ostream &out) const;
159
+
160
+ /// Extracts the problem dimensions
161
+ virtual Status initialize_configuration(
162
+ Options const &options,
163
+ PerformanceReport &report,
164
+ DeviceContext &device_context,
165
+ library::Operation const *operation,
166
+ ProblemSpace const &problem_space,
167
+ ProblemSpace::Problem const &problem);
168
+
169
+ /// Initializes workspace
170
+ virtual Status initialize_workspace(
171
+ Options const &options,
172
+ PerformanceReport &report,
173
+ DeviceContext &device_context,
174
+ library::Operation const *operation,
175
+ ProblemSpace const &problem_space,
176
+ ProblemSpace::Problem const &problem);
177
+
178
+ /// Verifies CUTLASS against references
179
+ virtual bool verify_cutlass(
180
+ Options const &options,
181
+ PerformanceReport &report,
182
+ DeviceContext &device_context,
183
+ library::Operation const *operation,
184
+ ProblemSpace const &problem_space,
185
+ ProblemSpace::Problem const &problem);
186
+
187
+ /// Measures performance results
188
+ virtual bool profile(
189
+ Options const &options,
190
+ PerformanceReport &report,
191
+ DeviceContext &device_context,
192
+ library::Operation const *operation,
193
+ ProblemSpace const &problem_space,
194
+ ProblemSpace::Problem const &problem);
195
+
196
+ protected:
197
+
198
+ /// Initializes the performance result
199
+ void initialize_result_(
200
+ PerformanceResult &result,
201
+ Options const &options,
202
+ library::TrmmDescription const &operation_desc,
203
+ ProblemSpace const &problem_space);
204
+
205
+ /// Verifies CUTLASS against references
206
+ bool verify_with_cublas_(
207
+ Options const &options,
208
+ PerformanceReport &report,
209
+ DeviceContext &device_context,
210
+ library::Operation const *operation,
211
+ ProblemSpace const &problem_space,
212
+ ProblemSpace::Problem const &problem);
213
+
214
+ };
215
+
216
+ /////////////////////////////////////////////////////////////////////////////////////////////////
217
+
218
+ } // namespace profiler
219
+ } // namespace cutlass
220
+
221
+ /////////////////////////////////////////////////////////////////////////////////////////////////
222
+
sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/cutlass/tools/profiler/src/conv2d_operation_profiler.cu ADDED
@@ -0,0 +1,1510 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ * SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice, this
9
+ * list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holder nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
+ *
30
+ **************************************************************************************************/
31
+ /* \file
32
+ \brief Convolution 2D profiling
33
+ */
34
+
35
+ #include <iostream>
36
+ #include <stdexcept>
37
+ #include <iomanip>
38
+ #include <ios>
39
+
40
+ #include "cutlass/core_io.h"
41
+
42
+ #include "cutlass/profiler/conv2d_operation_profiler.h"
43
+ #include "cutlass/profiler/gpu_timer.h"
44
+ /////////////////////////////////////////////////////////////////////////////////////////////////
45
+ using namespace cutlass::library;
46
+
47
+ namespace cutlass {
48
+ namespace profiler {
49
+
50
+
51
+ /////////////////////////////////////////////////////////////////////////////////////////////////
52
+
53
+ /// Ctor
54
+ Conv2dOperationProfiler::Conv2dOperationProfiler(Options const &options):
55
+ OperationProfiler(
56
+ options,
57
+ library::OperationKind::kConv2d,
58
+ {
59
+ {ArgumentTypeID::kEnumerated, {"conv_kind"}, "Convolutional operator (fprop, dgrad, wgrad)"},
60
+ {ArgumentTypeID::kInteger, {"n", "input_n"}, "Input N dimension of the Conv2d problem space"},
61
+ {ArgumentTypeID::kInteger, {"h", "input_h"}, "Input H dimension of the Conv2d problem space"},
62
+ {ArgumentTypeID::kInteger, {"w", "input_w"}, "Input W dimension of the Conv2d problem space"},
63
+ {ArgumentTypeID::kInteger, {"c", "input_c"}, "Input C dimension of the Conv2d problem space"},
64
+ {ArgumentTypeID::kInteger, {"k", "filter_k"}, "Filter K dimension of the Conv2d problem space"},
65
+ {ArgumentTypeID::kInteger, {"r", "filter_r"}, "Filter R dimension of the Conv2d problem space"},
66
+ {ArgumentTypeID::kInteger, {"s", "filter_s"}, "Filter S dimension of the Conv2d problem space"},
67
+ {ArgumentTypeID::kInteger, {"p", "output_p"}, "Output P dimension of the Conv2d problem space"},
68
+ {ArgumentTypeID::kInteger, {"q", "output_q"}, "Output Q dimension of the Conv2d problem space"},
69
+ {ArgumentTypeID::kInteger, {"g", "groups"}, "Number of convolution groups"},
70
+ {ArgumentTypeID::kInteger, {"pad_h"}, "Padding in H direction"},
71
+ {ArgumentTypeID::kInteger, {"pad_w"}, "Padding in W direction"},
72
+ {ArgumentTypeID::kInteger, {"stride_h"}, "Stride in H direction"},
73
+ {ArgumentTypeID::kInteger, {"stride_w"}, "Stride in W direction"},
74
+ {ArgumentTypeID::kInteger, {"dilation_h"}, "Dilation in H direction"},
75
+ {ArgumentTypeID::kInteger, {"dilation_w"}, "Dilation in W direction"},
76
+ {ArgumentTypeID::kTensor, {"Activation"}, "Tensor storing the Activation operand"},
77
+ {ArgumentTypeID::kTensor, {"Filter"}, "Tensor storing the Filter operand"},
78
+ {ArgumentTypeID::kTensor, {"Output"}, "Tensor storing the Output operand"},
79
+ {ArgumentTypeID::kEnumerated, {"conv_mode"}, "Convolution filter mode (conv, cross)"},
80
+ {ArgumentTypeID::kEnumerated, {"iterator_algorithm", "iterator_algo"}, "Convolution iterator algorithm (analytic, optimized)"},
81
+ {ArgumentTypeID::kScalar, {"alpha", "epilogue::alpha"}, "Epilogue scalar alpha"},
82
+ {ArgumentTypeID::kScalar, {"beta", "epilogue::beta"}, "Epilogue scalar beta"},
83
+ {ArgumentTypeID::kEnumerated, {"split_k_mode", "split-k-mode"}, "SplitK mode for serial or parallel reduction (serial, parallel)"},
84
+ {ArgumentTypeID::kInteger, {"split_k_slices", "split-k-slices"}, "Number of partitions of K dimension"},
85
+ {ArgumentTypeID::kEnumerated, {"eq_gemm_provider", "eq-gemm-provider"}, "Enable profiling equivalent gemm by the following providers (cutlass)"},
86
+ },
87
+ { library::Provider::kReferenceDevice, library::Provider::kReferenceHost, library::Provider::kCUDNN }
88
+ ) {
89
+
90
+ description_ = " Conv2d operation. Output(Tensor4D) = alpha * Input(Tensor4D) * Filter(Tensor4D) + beta * Input(Tensor4D)";
91
+
92
+ }
93
+
94
+ /// Destructor
95
+ Conv2dOperationProfiler::~Conv2dOperationProfiler() {
96
+
97
+ }
98
+
99
+
100
+ /// Prints usage statement for the math function
101
+ void Conv2dOperationProfiler::print_usage(std::ostream &out) const {
102
+ out << "Conv2d" << "\n\n";
103
+
104
+ OperationProfiler::print_usage(out);
105
+ }
106
+
107
+ /// Prints examples
108
+ void Conv2dOperationProfiler::print_examples(std::ostream &out) const {
109
+
110
+ out << "\nExamples:\n\n"
111
+ << "Profile a particular convolution (specify all the convolution parameters):\n"
112
+ << " $ cutlass_profiler --operation=Conv2d"
113
+ " --Activation=f16:nhwc --Filter=f16:nhwc --Output=f16 --accumulator-type=f32"
114
+ " --n=32 --h=14 --w=14 --c=8 --k=64 --r=3 --s=3"
115
+ " --pad_h=1 --pad_w=1"
116
+ " --stride_h=1 --stride_w=1"
117
+ " --dilation_h=1 --dilation_w=1\n\n";
118
+ }
119
+
120
+ #if 0
121
+ // used this for debugging
122
+ static std::string byte_string(std::vector<uint8_t> const &bytes) {
123
+ std::stringstream ss;
124
+
125
+ ss << "0x";
126
+
127
+ for (size_t idx = bytes.size(); idx > 0; --idx) {
128
+ ss << std::hex << std::setw(2) << std::setfill('0') << uint32_t(bytes.at(idx - 1));
129
+ }
130
+
131
+ return ss.str();
132
+ }
133
+ #endif
134
+
135
+ /////////////////////////////////////////////////////////////////////////////////////////////////
136
+
137
+ /// Total number of bytes loaded
138
+ int64_t Conv2dOperationProfiler::Conv2dProblem::bytes(
139
+ library::ConvDescription const &operation_desc) const {
140
+
141
+ cutlass::gemm::GemmCoord mnk = eq_gemm_size(operation_desc.conv_kind);
142
+
143
+ // Input bytes read and Output bytes written for the gemm problem
144
+ int64_t bytes_ =
145
+ int64_t(library::sizeof_bits(operation_desc.A.element) * mnk.m() / 8) * mnk.k() +
146
+ int64_t(library::sizeof_bits(operation_desc.B.element) * mnk.n() / 8) * mnk.k() +
147
+ int64_t(library::sizeof_bits(operation_desc.C.element) * mnk.m() / 8) * mnk.n();
148
+
149
+ // Set is_beta_zero true if beta is zero
150
+ bool is_beta_zero = std::all_of(beta.begin(), beta.end(), [](uint8_t i) { return i==0; });
151
+
152
+ // Output bytes read for the gemm problem for non-zero beta values
153
+ if (!is_beta_zero) {
154
+ bytes_ += int64_t(library::sizeof_bits(operation_desc.C.element) * mnk.m() / 8) * mnk.n();
155
+ }
156
+
157
+ return bytes_;
158
+ }
159
+
160
+ /// Total number of flops computed
161
+ int64_t Conv2dOperationProfiler::Conv2dProblem::flops(
162
+ library::ConvDescription const &operation_desc) const {
163
+
164
+ cutlass::gemm::GemmCoord mnk = eq_gemm_size(operation_desc.conv_kind);
165
+
166
+ int64_t flops_mainloop_ = int64_t(mnk.m()) * mnk.n() * mnk.k() * 2;
167
+ int64_t flops_epilogue_ = int64_t(mnk.m()) * int64_t(mnk.n()) * 2;
168
+
169
+ // Adjust mainloop flop for dgrad strided
170
+ if (operation_desc.conv_kind == library::ConvKind::kDgrad) {
171
+ flops_mainloop_ = flops_mainloop_ / (stride_h * stride_w);
172
+ }
173
+ int64_t flops_total_ = flops_mainloop_ + flops_epilogue_;
174
+
175
+ //complex-valued support
176
+ switch (operation_desc.tile_description.math_instruction.math_operation) {
177
+ case library::MathOperationID::kMultiplyAddComplex:
178
+ flops_total_ *=4;
179
+ break;
180
+
181
+ default: break;
182
+ }
183
+
184
+ return flops_total_;
185
+ }
186
+
187
+ /////////////////////////////////////////////////////////////////////////////////////////////////
188
+
189
+ /// Extracts the problem dimensions
190
+ Status Conv2dOperationProfiler::initialize_configuration(
191
+ Options const &options,
192
+ PerformanceReport &report,
193
+ DeviceContext &device_context,
194
+ library::Operation const *operation,
195
+ ProblemSpace const &problem_space,
196
+ ProblemSpace::Problem const &problem) {
197
+
198
+ library::ConvDescription const &operation_desc =
199
+ static_cast<library::ConvDescription const &>(operation->description());
200
+
201
+ if (!arg_as_int(problem_.n, "n", problem_space, problem)) {
202
+ // default value
203
+ problem_.n = 1;
204
+ }
205
+
206
+ if (!arg_as_int(problem_.h, "h", problem_space, problem)) {
207
+ // default value
208
+ problem_.h = 16;
209
+ }
210
+
211
+ if (!arg_as_int(problem_.w, "w", problem_space, problem)) {
212
+ // default value
213
+ problem_.w = 16;
214
+ }
215
+
216
+ if (!arg_as_int(problem_.c, "c", problem_space, problem)) {
217
+ // default value
218
+ problem_.c = 64;
219
+ }
220
+
221
+ if (!arg_as_int(problem_.k, "k", problem_space, problem)) {
222
+ // default value
223
+ problem_.k = 64;
224
+ }
225
+
226
+ if (!arg_as_int(problem_.r, "r", problem_space, problem)) {
227
+ // default value
228
+ problem_.r = 3;
229
+ }
230
+
231
+ if (!arg_as_int(problem_.s, "s", problem_space, problem)) {
232
+ // default value
233
+ problem_.s = 3;
234
+ }
235
+
236
+ if (!arg_as_int(problem_.groups, "g", problem_space, problem)) {
237
+ // default value
238
+ problem_.groups = 1;
239
+ }
240
+
241
+ if (!arg_as_int(problem_.pad_h, "pad_h", problem_space, problem)) {
242
+ // default value
243
+ problem_.pad_h = 1;
244
+ }
245
+
246
+ if (!arg_as_int(problem_.pad_w, "pad_w", problem_space, problem)) {
247
+ // default value
248
+ problem_.pad_w = 1;
249
+ }
250
+
251
+ if (!arg_as_int(problem_.stride_h, "stride_h", problem_space, problem)) {
252
+ // default value
253
+ problem_.stride_h = 1;
254
+ }
255
+
256
+ if (!arg_as_int(problem_.stride_w, "stride_w", problem_space, problem)) {
257
+ // default value
258
+ problem_.stride_w = 1;
259
+ }
260
+
261
+ if (!arg_as_int(problem_.dilation_h, "dilation_h", problem_space, problem)) {
262
+ // default value
263
+ problem_.dilation_h = 1;
264
+ }
265
+
266
+ if (!arg_as_int(problem_.dilation_w, "dilation_w", problem_space, problem)) {
267
+ // default value
268
+ problem_.dilation_w = 1;
269
+ }
270
+
271
+ //////////////////////// Convolution output dimensions p and q ////////////////////////
272
+ // Cutlass convolutions support arbitrary output sizes and not constrained by //
273
+ // input, filter, padding, striding, dilation sizes. //
274
+ // cuDNN sets the output dimensions (p, q) using following equations: //
275
+ // //
276
+ // output = div_up(input + 2 * pad - ((filter - 1) * dilation + 1) + 1, stride) //
277
+ // where; div_up(a, b) : (a - 1)/b + 1 //
278
+ // //
279
+ // Thus, when output p and q dimensions are unspecified by the user //
280
+ // cutlass profiler sets p and q which are cuDNN compliant. //
281
+ // //
282
+ ////////////////////////////////////////////////////////////////////////////////////////
283
+ // set convolution output p
284
+ if (!arg_as_int(problem_.p, "p", problem_space, problem)) {
285
+ // default value (set using cudnn formula for output height, when p is not provided)
286
+ problem_.p = (
287
+ problem_.h +
288
+ 2 * problem_.pad_h -
289
+ ((problem_.r - 1) * problem_.dilation_h + 1)
290
+ ) / (problem_.stride_h)
291
+ + 1;
292
+ }
293
+
294
+ // set convolution output q
295
+ if (!arg_as_int(problem_.q, "q", problem_space, problem)) {
296
+ // default value (set using cudnn formula for output width, when q is not provided)
297
+ problem_.q = (
298
+ problem_.w +
299
+ 2 * problem_.pad_w -
300
+ ((problem_.s - 1) * problem_.dilation_w + 1)
301
+ ) / (problem_.stride_w)
302
+ + 1;
303
+ }
304
+ /////////////////////////////////////////////////////////////////////////////////////////
305
+
306
+
307
+ if (!arg_as_SplitKModeID(problem_.split_k_mode, "split_k_mode", problem_space, problem)) {
308
+ // default value
309
+ problem_.split_k_mode = library::SplitKMode::kSerial;
310
+ }
311
+
312
+ if (!arg_as_int(problem_.split_k_slices, "split_k_slices", problem_space, problem)) {
313
+ // default value
314
+ problem_.split_k_slices = 1;
315
+ }
316
+
317
+ if (!arg_as_ConvModeID(problem_.conv_mode, "conv_mode", problem_space, problem)) {
318
+ // default value
319
+ problem_.conv_mode = library::ConvModeID::kCrossCorrelation;
320
+ }
321
+
322
+ if (!arg_as_ProviderID(problem_.eq_gemm_provider, "eq_gemm_provider", problem_space, problem)) {
323
+ // default value
324
+ problem_.eq_gemm_provider = library::Provider::kNone;
325
+ }
326
+
327
+ if (!conv_kind_satisfies(operation_desc.conv_kind, "conv_kind", problem_space, problem)) {
328
+ return Status::kErrorInvalidProblem;
329
+ }
330
+
331
+ if (!iterator_algorithm_satisfies(operation_desc.iterator_algorithm, "iterator_algorithm", problem_space, problem)) {
332
+ return Status::kErrorInvalidProblem;
333
+ }
334
+
335
+ if (!tensor_description_satisfies(operation_desc.activation(), "Activation", problem_space, problem)) {
336
+ return Status::kErrorInvalidProblem;
337
+ }
338
+
339
+ if (!tensor_description_satisfies(operation_desc.filter(), "Filter", problem_space, problem)) {
340
+ return Status::kErrorInvalidProblem;
341
+ }
342
+
343
+ if (!tensor_description_satisfies(operation_desc.output(), "Output", problem_space, problem)) {
344
+ return Status::kErrorInvalidProblem;
345
+ }
346
+
347
+ if (!arg_as_scalar(
348
+ problem_.alpha,
349
+ operation_desc.element_epilogue,
350
+ "alpha",
351
+ problem_space,
352
+ problem)) {
353
+
354
+ if (!cast_from_double(problem_.alpha, operation_desc.element_epilogue, 1)) {
355
+ return Status::kErrorInternal;
356
+ }
357
+ }
358
+
359
+ if (!arg_as_scalar(
360
+ problem_.beta,
361
+ operation_desc.element_epilogue,
362
+ "beta",
363
+ problem_space,
364
+ problem)) {
365
+
366
+ if (!cast_from_double(problem_.beta, operation_desc.element_epilogue, 0)) {
367
+ return Status::kErrorInternal;
368
+ }
369
+ }
370
+
371
+ // initialize library::Conv2dConfiguration
372
+ conv_workspace_.configuration.problem_size = conv::Conv2dProblemSize(
373
+ int(problem_.n),
374
+ int(problem_.h),
375
+ int(problem_.w),
376
+ int(problem_.c),
377
+ int(problem_.k),
378
+ int(problem_.r),
379
+ int(problem_.s),
380
+ int(problem_.p),
381
+ int(problem_.q),
382
+ int(problem_.pad_h),
383
+ int(problem_.pad_w),
384
+ int(problem_.stride_h),
385
+ int(problem_.stride_w),
386
+ int(problem_.dilation_h),
387
+ int(problem_.dilation_w),
388
+ static_cast<conv::Mode>(static_cast<int>(problem_.conv_mode)),
389
+ int(problem_.split_k_slices),
390
+ int(problem_.groups)
391
+ );
392
+
393
+ conv_workspace_.configuration.split_k_mode = static_cast<conv::SplitKMode>(static_cast<int>(problem_.split_k_mode));
394
+
395
+ conv_workspace_.set_stride_vector(
396
+ problem_, operation_desc.conv_kind, operation_desc.A.layout,
397
+ operation_desc.B.layout, operation_desc.C.layout);
398
+
399
+ // initialize library::ConvArguments
400
+ conv_workspace_.arguments.A = nullptr;
401
+ conv_workspace_.arguments.B = nullptr;
402
+ conv_workspace_.arguments.C = nullptr;
403
+ conv_workspace_.arguments.D = nullptr;
404
+ conv_workspace_.arguments.alpha = problem_.alpha.data();
405
+ conv_workspace_.arguments.beta = problem_.beta.data();
406
+ conv_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
407
+
408
+ // initialize reduction operation for parallel splitKMode
409
+ if(conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) {
410
+ if(!initialize_reduction_configuration_(options, report, device_context, operation, problem_space, problem)) {
411
+ return Status::kErrorInternal;
412
+ }
413
+ }
414
+
415
+ initialize_result_(this->model_result_, options, operation_desc, problem_space);
416
+
417
+ return operation->can_implement(&conv_workspace_.configuration, &conv_workspace_.arguments);
418
+ }
419
+
420
+ /// Initializes the performance result
421
+ void Conv2dOperationProfiler::initialize_result_(
422
+ PerformanceResult &result,
423
+ Options const &options,
424
+ library::ConvDescription const &operation_desc,
425
+ ProblemSpace const &problem_space) {
426
+
427
+ result.provider = library::Provider::kCUTLASS;
428
+ result.disposition = Disposition::kNotRun;
429
+ result.status = Status::kSuccess;
430
+ result.operation_name = operation_desc.name;
431
+
432
+ result.arguments.resize(problem_space.rank());
433
+
434
+ set_argument(result, "Activation", problem_space,
435
+ std::string(library::to_string(operation_desc.activation().element))
436
+ + ":" + library::to_string(operation_desc.activation().layout));
437
+
438
+ set_argument(result, "Filter", problem_space,
439
+ std::string(library::to_string(operation_desc.filter().element))
440
+ + ":" + library::to_string(operation_desc.filter().layout));
441
+
442
+ set_argument(result, "Output", problem_space,
443
+ std::string(library::to_string(operation_desc.output().element))
444
+ + ":" + library::to_string(operation_desc.output().layout));
445
+
446
+ set_argument(result, "conv_kind", problem_space, library::to_string(operation_desc.conv_kind));
447
+
448
+ set_argument(result, "iterator_algorithm", problem_space, std::string(library::to_string(operation_desc.iterator_algorithm)));
449
+
450
+ set_argument(result, "n", problem_space, problem_.n);
451
+ set_argument(result, "h", problem_space, problem_.h);
452
+ set_argument(result, "w", problem_space, problem_.w);
453
+ set_argument(result, "c", problem_space, problem_.c);
454
+
455
+ set_argument(result, "k", problem_space, problem_.k);
456
+ set_argument(result, "r", problem_space, problem_.r);
457
+ set_argument(result, "s", problem_space, problem_.s);
458
+
459
+ set_argument(result, "p", problem_space, problem_.p);
460
+ set_argument(result, "q", problem_space, problem_.q);
461
+
462
+ set_argument(result, "g", problem_space, problem_.groups);
463
+
464
+ set_argument(result, "pad_h", problem_space, problem_.pad_h);
465
+ set_argument(result, "pad_w", problem_space, problem_.pad_w);
466
+
467
+ set_argument(result, "stride_h", problem_space, problem_.stride_h);
468
+ set_argument(result, "stride_w", problem_space, problem_.stride_w);
469
+
470
+ set_argument(result, "dilation_h", problem_space, problem_.dilation_h);
471
+ set_argument(result, "dilation_w", problem_space, problem_.dilation_w);
472
+
473
+ set_argument(result, "split_k_mode", problem_space,
474
+ std::string(library::to_string(problem_.split_k_mode)));
475
+ set_argument(result, "split_k_slices", problem_space, problem_.split_k_slices);
476
+
477
+ set_argument(result, "conv_mode", problem_space,
478
+ std::string(library::to_string(problem_.conv_mode)));
479
+
480
+ set_argument(result, "alpha", problem_space,
481
+ library::lexical_cast(problem_.alpha, operation_desc.element_epilogue));
482
+
483
+ set_argument(result, "beta", problem_space,
484
+ library::lexical_cast(problem_.beta, operation_desc.element_epilogue));
485
+
486
+ set_argument(result, "eq_gemm_provider", problem_space,
487
+ std::string(library::to_string(problem_.eq_gemm_provider)));
488
+
489
+ OperationProfiler::initialize_result_(result, operation_desc, problem_space);
490
+
491
+ // Bytes of activation, filter, and output tensors
492
+ int64_t activation_bytes = int64_t(library::sizeof_bits(operation_desc.activation().element) / 8) *
493
+ conv_workspace_.configuration.problem_size.activation_size();
494
+
495
+ int64_t filter_bytes = int64_t(library::sizeof_bits(operation_desc.filter().element) / 8) *
496
+ conv_workspace_.configuration.problem_size.filter_size();
497
+
498
+ int64_t output_bytes = int64_t(library::sizeof_bits(operation_desc.output().element) / 8) *
499
+ conv_workspace_.configuration.problem_size.output_size();
500
+
501
+ // Bytes of activation, filter, and output tensors
502
+ result.bytes = problem_.bytes(operation_desc);
503
+
504
+ // Theoretical flops required for the computation
505
+ result.flops = problem_.flops(operation_desc);
506
+
507
+ // Measured runtime
508
+ result.runtime = 0;
509
+
510
+ }
511
+
512
+ /// Initialize reduction problem dimensions and library::Operation
513
+ bool Conv2dOperationProfiler::initialize_reduction_configuration_(
514
+ Options const &options,
515
+ PerformanceReport &report,
516
+ DeviceContext &device_context,
517
+ library::Operation const *operation,
518
+ ProblemSpace const &problem_space,
519
+ ProblemSpace::Problem const &problem) {
520
+
521
+ library::ConvDescription const &conv_desc =
522
+ static_cast<library::ConvDescription const &>(operation->description());
523
+
524
+ library::ConvKind const &conv_kind = conv_desc.conv_kind;
525
+
526
+ if (!cast_from_double(problem_.alpha_one, conv_desc.element_epilogue, 1)) {
527
+ return false;
528
+ }
529
+
530
+ if (!cast_from_double(problem_.beta_zero, conv_desc.element_epilogue, 0)) {
531
+ return false;
532
+ }
533
+
534
+ /// This chooses the appropriate stride element of the row-major C tensor.
535
+ int const & tensor_c_stride_idx = (conv_kind == library::ConvKind::kWgrad ? 2 : 0);
536
+
537
+ /// initialize library::ReductionConfiguration
538
+ conv_workspace_.reduction_configuration.problem_size = problem_.eq_gemm_size(conv_kind).mn();
539
+ conv_workspace_.reduction_configuration.partitions = int(problem_.split_k_slices);
540
+ conv_workspace_.reduction_configuration.partition_stride = problem_.eq_gemm_size(conv_kind).mn().product();
541
+ conv_workspace_.reduction_configuration.ldw =
542
+ conv_workspace_.configuration.stride_c[tensor_c_stride_idx];
543
+ conv_workspace_.reduction_configuration.lds =
544
+ conv_workspace_.configuration.stride_c[tensor_c_stride_idx];
545
+ conv_workspace_.reduction_configuration.ldd =
546
+ conv_workspace_.configuration.stride_c[tensor_c_stride_idx];
547
+
548
+ // find reduction operation
549
+ library::ReductionFunctionalKey reduction_key(
550
+ library::Provider::kCUTLASS,
551
+ conv_desc.tile_description.math_instruction.element_accumulator, // element workspace
552
+ conv_desc.tile_description.math_instruction.element_accumulator, // element accumulator
553
+ conv_desc.C.element, // element output
554
+ conv_desc.element_epilogue // element compute
555
+ );
556
+
557
+ #if 0// debug print to check which reduction instance is selected
558
+ std::cout << reduction_key << "\n";
559
+ #endif
560
+ auto reduction_it = Singleton::get().operation_table.reduction_operations.find(reduction_key);
561
+
562
+ if(reduction_it == Singleton::get().operation_table.reduction_operations.end()) {
563
+
564
+ return false;
565
+ }
566
+
567
+ // initialize reduction operation required for parallel split-k conv2d operator
568
+ reduction_op_ = reduction_it->second;
569
+
570
+ // reduction operation found and initialized
571
+ return true;
572
+ }
573
+
574
+
575
+ /// Initializes workspace
576
+ Status Conv2dOperationProfiler::initialize_workspace(
577
+ Options const &options,
578
+ PerformanceReport &report,
579
+ DeviceContext &device_context,
580
+ library::Operation const *operation,
581
+ ProblemSpace const &problem_space,
582
+ ProblemSpace::Problem const &problem) {
583
+
584
+ if (options.device.devices.size() != 1) {
585
+ throw std::runtime_error("This operation profiler only supports a single "
586
+ "device.");
587
+ }
588
+
589
+ cudaError_t result;
590
+ result = cudaSetDevice(options.device.device_id(0));
591
+ if (result != cudaSuccess) {
592
+ throw std::runtime_error("cudaSetDevice() failed.");
593
+ }
594
+
595
+ // initialize conv2d underlying operation to handle parallel reduction
596
+ library::Operation const* underlying_operation = operation;
597
+
598
+ if(conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) {
599
+ if (!(underlying_operation = library::find_conv_operation_for_parallel_reduction(operation))) {
600
+ return Status::kErrorNotSupported;
601
+ }
602
+ }
603
+
604
+ library::ConvDescription const &operation_desc =
605
+ static_cast<library::ConvDescription const &>(underlying_operation->description());
606
+
607
+ // Compute the number of copies of the problem to avoid L2 camping.
608
+ if (!options.profiling.workspace_count) {
609
+ int64_t bytes = problem_.bytes(operation_desc);
610
+ if (bytes < 3 * int64_t(options.device.properties[0].l2CacheSize)) {
611
+ conv_workspace_.problem_count =
612
+ 1 + int((3 * int64_t(options.device.properties[0].l2CacheSize)) / bytes);
613
+ }
614
+ else {
615
+ conv_workspace_.problem_count = 1;
616
+ }
617
+ }
618
+ else {
619
+ conv_workspace_.problem_count = options.profiling.workspace_count;
620
+ }
621
+
622
+
623
+ if (options.execution_mode != ExecutionMode::kDryRun) {
624
+ int seed_shift = 0;
625
+ conv_workspace_.A = device_context.allocate_and_initialize_tensor(
626
+ options,
627
+ "A",
628
+ operation_desc.A.element,
629
+ operation_desc.A.layout,
630
+ problem_.extent_a(operation_desc.conv_kind),
631
+ conv_workspace_.configuration.stride_a,
632
+ conv_workspace_.problem_count,
633
+ seed_shift++,
634
+ 0 // device_index
635
+ );
636
+
637
+ conv_workspace_.B = device_context.allocate_and_initialize_tensor(
638
+ options,
639
+ "B",
640
+ operation_desc.B.element,
641
+ operation_desc.B.layout,
642
+ problem_.extent_b(operation_desc.conv_kind),
643
+ conv_workspace_.configuration.stride_b,
644
+ conv_workspace_.problem_count,
645
+ seed_shift++,
646
+ 0 // device_index
647
+ );
648
+
649
+ if(problem_.groups == problem_.c && problem_.groups == problem_.k){
650
+ // Depthwise direct conv kernel needs reorder the filter.
651
+ conv_workspace_.reordered_B = device_context.allocate_and_initialize_tensor(
652
+ options,
653
+ "B",
654
+ operation_desc.B.element,
655
+ operation_desc.B.layout,
656
+ problem_.extent_b(operation_desc.conv_kind),
657
+ conv_workspace_.configuration.stride_b,
658
+ conv_workspace_.problem_count,
659
+ seed_shift++,
660
+ 0 // device_index
661
+ );
662
+ }
663
+
664
+ conv_workspace_.C = device_context.allocate_and_initialize_tensor(
665
+ options,
666
+ "C",
667
+ operation_desc.C.element,
668
+ operation_desc.C.layout,
669
+ problem_.extent_c(operation_desc.conv_kind),
670
+ conv_workspace_.configuration.stride_c,
671
+ conv_workspace_.problem_count,
672
+ seed_shift++,
673
+ 0 // device_index
674
+ );
675
+
676
+ conv_workspace_.Computed = device_context.allocate_tensor(
677
+ options,
678
+ "D",
679
+ operation_desc.C.element,
680
+ operation_desc.C.layout,
681
+ problem_.extent_c(operation_desc.conv_kind),
682
+ conv_workspace_.configuration.stride_c,
683
+ conv_workspace_.problem_count,
684
+ 0 // device_index
685
+ );
686
+
687
+ conv_workspace_.Reference = device_context.allocate_tensor(
688
+ options,
689
+ "Reference",
690
+ operation_desc.C.element,
691
+ operation_desc.C.layout,
692
+ problem_.extent_c(operation_desc.conv_kind),
693
+ conv_workspace_.configuration.stride_c,
694
+ conv_workspace_.problem_count,
695
+ 0 // device_index
696
+ );
697
+ }
698
+
699
+ //
700
+ // Initialize the CUTLASS operation
701
+ //
702
+ Status status = Status::kSuccess;
703
+
704
+ if (options.profiling.provider_enabled(library::Provider::kCUTLASS)) {
705
+
706
+ if (options.execution_mode != ExecutionMode::kDryRun) {
707
+
708
+ uint64_t workspace_size = underlying_operation->get_host_workspace_size(&conv_workspace_.configuration);
709
+ conv_workspace_.host_workspace.resize(workspace_size, 0);
710
+
711
+ workspace_size = underlying_operation->get_device_workspace_size(&conv_workspace_.configuration);
712
+ conv_workspace_.device_workspace.reset(library::NumericTypeID::kU8, workspace_size);
713
+
714
+ status = underlying_operation->initialize(
715
+ &conv_workspace_.configuration,
716
+ conv_workspace_.host_workspace.data(),
717
+ conv_workspace_.device_workspace.data());
718
+
719
+ if (status != Status::kSuccess) {
720
+ return status;
721
+ }
722
+
723
+ if (conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) {
724
+ workspace_size = reduction_op_->get_host_workspace_size(&conv_workspace_.reduction_configuration);
725
+ conv_workspace_.reduction_host_workspace.resize(workspace_size, 0);
726
+
727
+ status = reduction_op_->initialize(
728
+ &conv_workspace_.reduction_configuration,
729
+ conv_workspace_.reduction_host_workspace.data(),
730
+ nullptr);
731
+
732
+ if (status != Status::kSuccess) {
733
+ return status;
734
+ }
735
+ }
736
+ }
737
+
738
+ //
739
+ // If CUTLASS is enabled, generate a result for it
740
+ //
741
+ results_.push_back(model_result_);
742
+ results_.back().provider = library::Provider::kCUTLASS;
743
+ results_.back().op_kind = library::OperationKind::kConv2d;
744
+ results_.back().disposition = Disposition::kNotRun;
745
+
746
+ for(auto provider : verification_providers_) {
747
+ results_.back().verification_map[provider] = Disposition::kNotRun;
748
+ }
749
+ }
750
+
751
+ return status;
752
+ }
753
+
754
+ /////////////////////////////////////////////////////////////////////////////////////////////////
755
+
756
+ /// Verifies CUTLASS against references
757
+ bool Conv2dOperationProfiler::verify_cutlass(
758
+ Options const &options,
759
+ PerformanceReport &report,
760
+ DeviceContext &device_context,
761
+ library::Operation const *operation,
762
+ ProblemSpace const &problem_space,
763
+ ProblemSpace::Problem const &problem) {
764
+
765
+ if (!options.profiling.provider_enabled(library::Provider::kCUTLASS)) {
766
+ return true;
767
+ }
768
+
769
+ if (options.execution_mode == ExecutionMode::kDryRun) {
770
+ return true;
771
+ }
772
+
773
+ cudaError_t result;
774
+
775
+ // Initialize structure containing Conv2d arguments
776
+ conv_workspace_.arguments.A = conv_workspace_.A->data();
777
+ conv_workspace_.arguments.B = conv_workspace_.B->data();
778
+ conv_workspace_.arguments.C = conv_workspace_.C->data();
779
+ conv_workspace_.arguments.D = conv_workspace_.Computed->data();
780
+ conv_workspace_.arguments.alpha = problem_.alpha.data();
781
+ conv_workspace_.arguments.beta = problem_.beta.data();
782
+ conv_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
783
+
784
+ if (conv_workspace_.reordered_B != nullptr){
785
+ conv_workspace_.arguments.reordered_B = conv_workspace_.reordered_B->data();
786
+ }else{
787
+ conv_workspace_.arguments.reordered_B = nullptr;
788
+ }
789
+
790
+ conv_workspace_.Computed->copy_from_device(conv_workspace_.C->data());
791
+
792
+ if (conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) {
793
+ // update library::ConvArguments for parallel split-k reduction
794
+ conv_workspace_.arguments.D = conv_workspace_.device_workspace.data();
795
+ conv_workspace_.arguments.alpha = problem_.alpha_one.data();
796
+ conv_workspace_.arguments.beta = problem_.beta_zero.data();
797
+
798
+ /// initialize library::ReductionArguments
799
+ conv_workspace_.reduction_arguments.workspace = conv_workspace_.device_workspace.data();
800
+ conv_workspace_.reduction_arguments.source = conv_workspace_.C->data();
801
+ conv_workspace_.reduction_arguments.destination = conv_workspace_.Computed->data();
802
+ conv_workspace_.reduction_arguments.alpha = problem_.alpha.data();
803
+ conv_workspace_.reduction_arguments.beta = problem_.beta.data();
804
+ conv_workspace_.reduction_arguments.pointer_mode = library::ScalarPointerMode::kHost;
805
+ }
806
+
807
+ //
808
+ // Run the CUTLASS operation
809
+ //
810
+ // initialize conv2d underlying operation to handle parallel reduction
811
+ library::Operation const* underlying_operation = operation;
812
+
813
+ if(conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) {
814
+ if (!(underlying_operation = library::find_conv_operation_for_parallel_reduction(operation))) {
815
+ results_.back().disposition = Disposition::kFailed;
816
+ return false;
817
+ }
818
+ }
819
+
820
+ #if 0
821
+ std::cout << "profiling : " << std::endl
822
+ << "conv2d : " << operation->description().name << std::endl
823
+ << "underlying conv2d : " << underlying_operation->description().name << std::endl
824
+ << "reduction : " << reduction_op_->description().name << std::endl;
825
+ #endif
826
+
827
+ // run cutlass conv2d operation
828
+ results_.back().status = underlying_operation->run(
829
+ &conv_workspace_.arguments,
830
+ conv_workspace_.host_workspace.data(),
831
+ conv_workspace_.device_workspace.data());
832
+
833
+ if (results_.back().status != Status::kSuccess) {
834
+ results_.back().disposition = Disposition::kFailed;
835
+ return false;
836
+ }
837
+
838
+ // Run parallel reduction kernel for parallel split_k_mode
839
+ if (conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) {
840
+
841
+ results_.back().status = reduction_op_->run(
842
+ &conv_workspace_.reduction_arguments,
843
+ conv_workspace_.reduction_host_workspace.data(),
844
+ nullptr);
845
+
846
+ if (results_.back().status != Status::kSuccess) {
847
+ results_.back().disposition = Disposition::kFailed;
848
+ return false;
849
+ }
850
+
851
+ }
852
+
853
+ // Synchronize before running device reference
854
+ result = cudaDeviceSynchronize();
855
+ if (result != cudaSuccess) {
856
+ results_.back().disposition = Disposition::kFailed;
857
+ return false;
858
+ }
859
+
860
+ // CUTLASS op ran the but not yet verified against any verification provider
861
+ results_.back().disposition = Disposition::kNotVerified;
862
+
863
+ //
864
+ // Run verification providers
865
+ //
866
+
867
+ if (options.verification.enabled) {
868
+
869
+ #if CUTLASS_ENABLE_CUDNN
870
+ // Run verification cudnn reference
871
+ if (options.verification.provider_enabled(library::Provider::kCUDNN)) {
872
+
873
+ // Guard against unsupported cases
874
+ auto const & conv_desc = static_cast<library::ConvDescription const &>(operation->description());
875
+
876
+ Status status = cudnn_satisfies(conv_desc, conv_workspace_.configuration);
877
+
878
+ // Initialize reference data to the source data
879
+ conv_workspace_.Reference->copy_from_device(conv_workspace_.C->data());
880
+
881
+ if (status == Status::kSuccess) {
882
+ // call cudnn verification if supported
883
+ verify_with_cudnn_(
884
+ options,
885
+ report,
886
+ device_context,
887
+ operation,
888
+ problem_space,
889
+ problem);
890
+ }
891
+
892
+ else if (status == Status::kErrorInvalidProblem) {
893
+ results_.back().verification_map[library::Provider::kCUDNN] = Disposition::kInvalidProblem;
894
+ }
895
+
896
+ else {
897
+ // set verification map for cudnn to not supported
898
+ results_.back().verification_map[library::Provider::kCUDNN] = Disposition::kNotSupported;
899
+ }
900
+ }
901
+ #endif // #if CUTLASS_ENABLE_CUDNN
902
+
903
+ // Run verification device reference
904
+ if (options.verification.provider_enabled(library::Provider::kReferenceDevice)) {
905
+
906
+ // Restore reference data back to initial source data
907
+ conv_workspace_.Reference->copy_from_device(conv_workspace_.C->data());
908
+
909
+ verify_with_device_reference_(
910
+ options,
911
+ report,
912
+ device_context,
913
+ operation,
914
+ problem_space,
915
+ problem);
916
+ }
917
+
918
+ // Run verification host reference
919
+ if (options.verification.provider_enabled(library::Provider::kReferenceHost)) {
920
+
921
+ // Restore reference data back to initial source data
922
+ conv_workspace_.Reference->copy_from_device(conv_workspace_.C->data());
923
+
924
+ verify_with_host_reference_(
925
+ options,
926
+ report,
927
+ device_context,
928
+ operation,
929
+ problem_space,
930
+ problem);
931
+ }
932
+
933
+ // Update disposition to worst case verification outcome among all
934
+ // verification providers which are supported
935
+ bool is_any_verification_run_passed = false;
936
+ for(auto &m : results_.back().verification_map) {
937
+ if(m.second == Disposition::kFailed || m.second == Disposition::kIncorrect) {
938
+ results_.back().disposition = m.second;
939
+ return true;
940
+ }
941
+ if(!is_any_verification_run_passed && m.second == Disposition::kPassed) {
942
+ is_any_verification_run_passed = true;
943
+ }
944
+ }
945
+
946
+ if(is_any_verification_run_passed) {
947
+ results_.back().disposition = Disposition::kPassed;
948
+ }
949
+ }
950
+
951
+ // Return true means continue profiling
952
+ return true;
953
+ }
954
+
955
+
956
+ /// Verifies CUTLASS against host reference
957
+ bool Conv2dOperationProfiler::verify_with_host_reference_(
958
+ Options const &options,
959
+ PerformanceReport &report,
960
+ DeviceContext &device_context,
961
+ library::Operation const *operation,
962
+ ProblemSpace const &problem_space,
963
+ ProblemSpace::Problem const &problem) {
964
+
965
+ Status status;
966
+
967
+ //
968
+ // Find host reference operation using conv2d functional description key
969
+ //
970
+ library::OperationDescription const &desc = operation->description();
971
+
972
+ auto &conv_desc = static_cast<library::ConvDescription const &>(desc);
973
+
974
+ library::ConvFunctionalKey conv2d_key(
975
+ library::Provider::kReferenceHost,
976
+ conv_desc.conv_kind,
977
+ conv_desc.A.element,
978
+ conv_desc.A.layout,
979
+ conv_desc.B.element,
980
+ conv_desc.B.layout,
981
+ conv_desc.C.element,
982
+ conv_desc.C.layout,
983
+ conv_desc.tile_description.math_instruction.element_accumulator,
984
+ conv_desc.element_epilogue);
985
+
986
+ #if 0 // debug print to check which host reference instance is selected
987
+ std::cout << conv2d_key << "\n";
988
+ #endif
989
+
990
+ auto operators_it = Singleton::get().operation_table.conv2d_operations.find(conv2d_key);
991
+
992
+ if(operators_it == Singleton::get().operation_table.conv2d_operations.end()) {
993
+
994
+ results_.back().verification_map[library::Provider::kReferenceHost] = Disposition::kNotRun;
995
+ return true;
996
+ }
997
+
998
+ // conv2d host reference minimum cc is 0 (CPU) and no iterator algorithm
999
+ library::ConvPreferenceKey preference_key(0, library::IteratorAlgorithmID::kNone);
1000
+ auto cc_it = operators_it->second.find(preference_key);
1001
+
1002
+ if(cc_it == operators_it->second.end()) {
1003
+ results_.back().verification_map[library::Provider::kReferenceHost] = Disposition::kNotRun;
1004
+ return true;
1005
+ }
1006
+
1007
+ // host reference has only one instances in Conv2dOperationVectorMap
1008
+ library::Operation const *reference_op = cc_it->second[0];
1009
+
1010
+ //
1011
+ // Copy input tensors A, B, and C from device to host buffers
1012
+ //
1013
+ conv_workspace_.host_tensor_a.resize(conv_workspace_.A->bytes());
1014
+ conv_workspace_.host_tensor_b.resize(conv_workspace_.B->bytes());
1015
+ conv_workspace_.host_tensor_c.resize(conv_workspace_.C->bytes());
1016
+
1017
+ conv_workspace_.A->copy_to_host(conv_workspace_.host_tensor_a.data());
1018
+ conv_workspace_.B->copy_to_host(conv_workspace_.host_tensor_b.data());
1019
+ conv_workspace_.C->copy_to_host(conv_workspace_.host_tensor_c.data());
1020
+
1021
+ //
1022
+ // Initialize structure containing Conv2d arguments
1023
+ //
1024
+ conv_workspace_.arguments.A = conv_workspace_.host_tensor_a.data();
1025
+ conv_workspace_.arguments.B = conv_workspace_.host_tensor_b.data();
1026
+ conv_workspace_.arguments.C = conv_workspace_.host_tensor_c.data();
1027
+ conv_workspace_.arguments.D = conv_workspace_.host_tensor_c.data();
1028
+
1029
+ conv_workspace_.arguments.alpha = problem_.alpha.data();
1030
+ conv_workspace_.arguments.beta = problem_.beta.data();
1031
+ conv_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
1032
+
1033
+ //
1034
+ // Initialize host reference operation
1035
+ //
1036
+ std::vector<uint8_t> host_workspace_reference_op;
1037
+
1038
+ uint64_t workspace_size = reference_op->get_host_workspace_size(&conv_workspace_.configuration);
1039
+ host_workspace_reference_op.resize(workspace_size, 0);
1040
+
1041
+ reference_op->initialize(
1042
+ &conv_workspace_.configuration,
1043
+ host_workspace_reference_op.data());
1044
+
1045
+ //
1046
+ // Run host reference operation
1047
+ //
1048
+ status = reference_op->run(
1049
+ &conv_workspace_.arguments,
1050
+ host_workspace_reference_op.data());
1051
+
1052
+ // Handle errors
1053
+ if (status != Status::kSuccess) {
1054
+ results_.back().verification_map[library::Provider::kReferenceHost] = Disposition::kNotVerified;
1055
+ return true;
1056
+ }
1057
+
1058
+ //
1059
+ // Copy host reference output to device memory for equality check on device
1060
+ //
1061
+ conv_workspace_.Reference->copy_from_host(conv_workspace_.arguments.D);
1062
+
1063
+ //
1064
+ // Verify results
1065
+ //
1066
+ results_.back().verification_map[library::Provider::kReferenceHost] = compare_tensors(
1067
+ options,
1068
+ *conv_workspace_.Computed,
1069
+ *conv_workspace_.Reference,
1070
+ conv_workspace_.Computed->batch_stride()
1071
+ );
1072
+
1073
+ // Save workspace if incorrect
1074
+ if (options.verification.save_workspace == SaveWorkspace::kIncorrect &&
1075
+ results_.back().verification_map[library::Provider::kReferenceHost] == Disposition::kIncorrect) {
1076
+
1077
+ save_workspace(
1078
+ device_context,
1079
+ options,
1080
+ static_cast<library::ConvDescription const &>(operation->description()),
1081
+ library::Provider::kCUTLASS,
1082
+ library::Provider::kReferenceHost);
1083
+ }
1084
+
1085
+ // Return true means continue profiling
1086
+ return true;
1087
+ }
1088
+
1089
+
1090
+ /// Verifies CUTLASS against host reference
1091
+ bool Conv2dOperationProfiler::verify_with_device_reference_(
1092
+ Options const &options,
1093
+ PerformanceReport &report,
1094
+ DeviceContext &device_context,
1095
+ library::Operation const *operation,
1096
+ ProblemSpace const &problem_space,
1097
+ ProblemSpace::Problem const &problem) {
1098
+
1099
+ Status status;
1100
+
1101
+ //
1102
+ // Find device reference operation using conv2d functional description key
1103
+ //
1104
+ library::OperationDescription const &desc = operation->description();
1105
+
1106
+ auto &conv_desc = static_cast<library::ConvDescription const &>(desc);
1107
+
1108
+ library::ConvFunctionalKey conv2d_key(
1109
+ library::Provider::kReferenceDevice,
1110
+ conv_desc.conv_kind,
1111
+ conv_desc.A.element,
1112
+ conv_desc.A.layout,
1113
+ conv_desc.B.element,
1114
+ conv_desc.B.layout,
1115
+ conv_desc.C.element,
1116
+ conv_desc.C.layout,
1117
+ conv_desc.tile_description.math_instruction.element_accumulator,
1118
+ conv_desc.element_epilogue);
1119
+
1120
+ auto operators_it = Singleton::get().operation_table.conv2d_operations.find(conv2d_key);
1121
+
1122
+ if(operators_it == Singleton::get().operation_table.conv2d_operations.end()) {
1123
+
1124
+ results_.back().verification_map[library::Provider::kReferenceDevice] = Disposition::kNotRun;
1125
+
1126
+ return true;
1127
+ }
1128
+
1129
+ // conv2d device reference minimum cc is 50 and no iterator algorithm
1130
+ library::ConvPreferenceKey preference_key(50, library::IteratorAlgorithmID::kNone);
1131
+ auto cc_it = operators_it->second.find(preference_key);
1132
+
1133
+ if(cc_it == operators_it->second.end()) {
1134
+ results_.back().verification_map[library::Provider::kReferenceDevice] = Disposition::kNotRun;
1135
+
1136
+ return true;
1137
+ }
1138
+
1139
+ // device reference has only one instances in Conv2dOperationVectorMap
1140
+ library::Operation const *reference_op = cc_it->second[0];
1141
+
1142
+ //
1143
+ // Initialize device reference operation
1144
+ //
1145
+ std::vector<uint8_t> host_workspace_reference_op;
1146
+
1147
+ uint64_t workspace_size = reference_op->get_host_workspace_size(&conv_workspace_.configuration);
1148
+ host_workspace_reference_op.resize(workspace_size, 0);
1149
+
1150
+ reference_op->initialize(
1151
+ &conv_workspace_.configuration,
1152
+ host_workspace_reference_op.data());
1153
+
1154
+ // Initialize structure containing Conv2d arguments
1155
+ conv_workspace_.arguments.A = conv_workspace_.A->data();
1156
+ conv_workspace_.arguments.B = conv_workspace_.B->data();
1157
+ conv_workspace_.arguments.C = conv_workspace_.C->data();
1158
+ conv_workspace_.arguments.D = conv_workspace_.Reference->data();
1159
+ conv_workspace_.arguments.alpha = problem_.alpha.data();
1160
+ conv_workspace_.arguments.beta = problem_.beta.data();
1161
+ conv_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
1162
+
1163
+ //
1164
+ // Run device reference operation
1165
+ //
1166
+ status = reference_op->run(
1167
+ &conv_workspace_.arguments,
1168
+ host_workspace_reference_op.data());
1169
+
1170
+
1171
+ // Handle errors
1172
+ if (status != Status::kSuccess) {
1173
+ results_.back().verification_map[library::Provider::kReferenceDevice] = Disposition::kNotVerified;
1174
+ return true;
1175
+ }
1176
+
1177
+ //
1178
+ // Verify results
1179
+ //
1180
+ results_.back().verification_map[library::Provider::kReferenceDevice] = compare_tensors(
1181
+ options,
1182
+ *conv_workspace_.Computed,
1183
+ *conv_workspace_.Reference,
1184
+ conv_workspace_.Computed->batch_stride()
1185
+ );
1186
+
1187
+ // Save workspace if incorrect
1188
+ if (options.verification.save_workspace == SaveWorkspace::kIncorrect &&
1189
+ results_.back().verification_map[library::Provider::kReferenceDevice] == Disposition::kIncorrect) {
1190
+
1191
+ save_workspace(
1192
+ device_context,
1193
+ options,
1194
+ static_cast<library::ConvDescription const &>(operation->description()),
1195
+ library::Provider::kCUTLASS,
1196
+ library::Provider::kReferenceDevice);
1197
+ }
1198
+
1199
+ // Return true means continue profiling
1200
+ return true;
1201
+ }
1202
+
1203
+ /// Measures performance results
1204
+ bool Conv2dOperationProfiler::profile(
1205
+ Options const &options,
1206
+ PerformanceReport &report,
1207
+ DeviceContext &device_context,
1208
+ library::Operation const *operation,
1209
+ ProblemSpace const &problem_space,
1210
+ ProblemSpace::Problem const &problem) {
1211
+
1212
+
1213
+ if (options.profiling.provider_enabled(library::Provider::kCUTLASS)) {
1214
+
1215
+ // Initialize structure containing Conv2d arguments
1216
+ conv_workspace_.arguments.A = conv_workspace_.A->data();
1217
+ conv_workspace_.arguments.B = conv_workspace_.B->data();
1218
+ conv_workspace_.arguments.C = conv_workspace_.C->data();
1219
+ conv_workspace_.arguments.D = conv_workspace_.Computed->data();
1220
+ conv_workspace_.arguments.alpha = problem_.alpha.data();
1221
+ conv_workspace_.arguments.beta = problem_.beta.data();
1222
+ conv_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
1223
+
1224
+ if (conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) {
1225
+ // update library::ConvArguments for parallel split-k reduction
1226
+ conv_workspace_.arguments.D = conv_workspace_.device_workspace.data();
1227
+ conv_workspace_.arguments.alpha = problem_.alpha_one.data();
1228
+ conv_workspace_.arguments.beta = problem_.beta_zero.data();
1229
+
1230
+ /// initialize library::ReductionArguments
1231
+ conv_workspace_.reduction_arguments.workspace = conv_workspace_.device_workspace.data();
1232
+ conv_workspace_.reduction_arguments.source = conv_workspace_.C->data();
1233
+ conv_workspace_.reduction_arguments.destination = conv_workspace_.Computed->data();
1234
+ conv_workspace_.reduction_arguments.alpha = problem_.alpha.data();
1235
+ conv_workspace_.reduction_arguments.beta = problem_.beta.data();
1236
+ conv_workspace_.reduction_arguments.pointer_mode = library::ScalarPointerMode::kHost;
1237
+ }
1238
+
1239
+ results_.back().status = profile_cutlass_(
1240
+ results_.back().runtime,
1241
+ options,
1242
+ operation,
1243
+ &conv_workspace_.arguments,
1244
+ conv_workspace_.host_workspace.data(),
1245
+ conv_workspace_.device_workspace.data()
1246
+ );
1247
+ }
1248
+ return true;
1249
+
1250
+ }
1251
+
1252
+ /// Method to profile a CUTLASS Operation
1253
+ Status Conv2dOperationProfiler::profile_cutlass_(
1254
+ double &runtime,
1255
+ Options const &options,
1256
+ library::Operation const *operation,
1257
+ void *arguments,
1258
+ void *host_workspace,
1259
+ void *device_workspace) {
1260
+
1261
+ GpuTimer timer;
1262
+
1263
+ // initialize conv2d underlying operation to handle parallel reduction
1264
+ library::Operation const* underlying_operation = operation;
1265
+
1266
+ library::ConvArguments *conv_arguments = static_cast<library::ConvArguments *>(arguments);
1267
+
1268
+ if(conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) {
1269
+ if (!(underlying_operation = library::find_conv_operation_for_parallel_reduction(operation))) {
1270
+ return Status::kErrorNotSupported;
1271
+ }
1272
+ }
1273
+
1274
+ //
1275
+ // Optional sleep to limit power consumption and thermals
1276
+ //
1277
+
1278
+ sleep(options.profiling.sleep_duration);
1279
+
1280
+ //
1281
+ // Warmup loop
1282
+ //
1283
+
1284
+ Status status;
1285
+
1286
+ for (int iteration = 0; iteration < options.profiling.warmup_iterations; ++iteration) {
1287
+
1288
+ // Setup rotating workspace
1289
+ int workspace_idx = options.profiling.warmup_iterations + iteration;
1290
+ int problem_idx = (workspace_idx % conv_workspace_.problem_count);
1291
+
1292
+ conv_arguments->A = conv_workspace_.A->batch_data(problem_idx);
1293
+ conv_arguments->B = conv_workspace_.B->batch_data(problem_idx);
1294
+ conv_arguments->C = conv_workspace_.C->batch_data(problem_idx);
1295
+ conv_arguments->D = conv_workspace_.Computed->batch_data(problem_idx);
1296
+
1297
+ if (conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) {
1298
+ // update library::ConvArguments for parallel split-k reduction
1299
+ conv_arguments->D = conv_workspace_.device_workspace.data();
1300
+
1301
+ /// initialize library::ReductionArguments
1302
+ conv_workspace_.reduction_arguments.workspace = conv_workspace_.device_workspace.data();
1303
+ conv_workspace_.reduction_arguments.source = conv_workspace_.C->batch_data(problem_idx);
1304
+ conv_workspace_.reduction_arguments.destination = conv_workspace_.Computed->batch_data(problem_idx);
1305
+ }
1306
+
1307
+ // Run underlying conv2d operation
1308
+ status = underlying_operation->run(
1309
+ arguments,
1310
+ host_workspace,
1311
+ device_workspace);
1312
+
1313
+ // Run parallel reduction kernel for parallel split_k_mode
1314
+ if (conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) {
1315
+
1316
+ status = reduction_op_->run(
1317
+ &conv_workspace_.reduction_arguments,
1318
+ conv_workspace_.reduction_host_workspace.data(),
1319
+ nullptr);
1320
+ }
1321
+
1322
+ if (status != Status::kSuccess) {
1323
+ return status;
1324
+ }
1325
+ }
1326
+
1327
+ //
1328
+ // Initialize GPU timer
1329
+ //
1330
+
1331
+ timer.start();
1332
+
1333
+ //
1334
+ // Profiling loop
1335
+ //
1336
+
1337
+ int Iterations = options.profiling.iterations;
1338
+
1339
+ int iteration = 0;
1340
+ for (; iteration < Iterations; ++iteration) {
1341
+
1342
+ // Setup rotating workspace
1343
+ int problem_idx = (iteration % conv_workspace_.problem_count);
1344
+
1345
+ conv_arguments->A = conv_workspace_.A->batch_data(problem_idx);
1346
+ conv_arguments->B = conv_workspace_.B->batch_data(problem_idx);
1347
+ conv_arguments->C = conv_workspace_.C->batch_data(problem_idx);
1348
+ conv_arguments->D = conv_workspace_.Computed->batch_data(problem_idx);
1349
+
1350
+ if (conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) {
1351
+ // update library::ConvArguments for parallel split-k reduction
1352
+ conv_arguments->D = conv_workspace_.device_workspace.data();
1353
+
1354
+ /// initialize library::ReductionArguments
1355
+ conv_workspace_.reduction_arguments.workspace = conv_workspace_.device_workspace.data();
1356
+ conv_workspace_.reduction_arguments.source = conv_workspace_.C->batch_data(problem_idx);
1357
+ conv_workspace_.reduction_arguments.destination = conv_workspace_.Computed->batch_data(problem_idx);
1358
+ }
1359
+
1360
+ // Run underlying conv2d operation
1361
+ status = underlying_operation->run(
1362
+ arguments,
1363
+ host_workspace,
1364
+ device_workspace);
1365
+
1366
+ // Run parallel reduction kernel for parallel split_k_mode
1367
+ if (conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) {
1368
+
1369
+ status = reduction_op_->run(
1370
+ &conv_workspace_.reduction_arguments,
1371
+ conv_workspace_.reduction_host_workspace.data(),
1372
+ nullptr);
1373
+ }
1374
+
1375
+ if (status != Status::kSuccess) {
1376
+ return status;
1377
+ }
1378
+ }
1379
+
1380
+ //
1381
+ // Wait for completion
1382
+ //
1383
+
1384
+ timer.stop_and_wait();
1385
+
1386
+ //
1387
+ // Update performance result
1388
+ //
1389
+
1390
+ runtime = timer.duration(iteration);
1391
+
1392
+ return status;
1393
+ }
1394
+
1395
+ /////////////////////////////////////////////////////////////////////////////////////////////////
1396
+ #if CUTLASS_ENABLE_CUDNN
1397
+
1398
+ /// Verifies CUTLASS against cudnn reference
1399
+ bool Conv2dOperationProfiler::verify_with_cudnn_(
1400
+ Options const &options,
1401
+ PerformanceReport &report,
1402
+ DeviceContext &device_context,
1403
+ library::Operation const *operation,
1404
+ ProblemSpace const &problem_space,
1405
+ ProblemSpace::Problem const &problem) {
1406
+
1407
+ auto &conv_desc = static_cast<library::ConvDescription const &>(operation->description());
1408
+
1409
+ //
1410
+ // Construct cudnn operators
1411
+ //
1412
+
1413
+ CudnnCreate handle;
1414
+ cudnnStatus_t status = handle.get_cudnn_create_status();
1415
+
1416
+ if (status != CUDNN_STATUS_SUCCESS) {
1417
+
1418
+ results_.back().verification_map[library::Provider::kCUDNN] = get_cutlass_disposition(status);
1419
+ return true;
1420
+ }
1421
+
1422
+ //
1423
+ // Initialize state
1424
+ //
1425
+
1426
+ // Initialize structure containing Conv2d arguments
1427
+ conv_workspace_.arguments.A = conv_workspace_.A->data();
1428
+ conv_workspace_.arguments.B = conv_workspace_.B->data();
1429
+ conv_workspace_.arguments.D = conv_workspace_.Reference->data();
1430
+ conv_workspace_.arguments.alpha = problem_.alpha.data();
1431
+ conv_workspace_.arguments.beta = problem_.beta.data();
1432
+ conv_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
1433
+
1434
+ // cuDNN does not support four tensor arguments, so we copy the tensor C data into
1435
+ // tensor D.
1436
+ conv_workspace_.Reference->copy_from_device(conv_workspace_.C->data());
1437
+ conv_workspace_.arguments.C = conv_workspace_.arguments.D;
1438
+
1439
+ try {
1440
+
1441
+ //
1442
+ // Construct dispatcher to cudnn operator
1443
+ //
1444
+
1445
+ detail::cudnnConvDispatcher conv_op(
1446
+ conv_desc,
1447
+ conv_workspace_.configuration,
1448
+ conv_workspace_.arguments,
1449
+ handle
1450
+ );
1451
+
1452
+ if (conv_op.status != Status::kSuccess) {
1453
+ if (conv_op.status == Status::kErrorNotSupported) {
1454
+ results_.back().verification_map[library::Provider::kCUDNN] = Disposition::kNotSupported;
1455
+
1456
+ } else {
1457
+ results_.back().verification_map[library::Provider::kCUDNN] = Disposition::kFailed;
1458
+ }
1459
+ return true;
1460
+ }
1461
+
1462
+
1463
+ status = conv_op(handle);
1464
+
1465
+ // Handle errors
1466
+ if (status != CUDNN_STATUS_SUCCESS) {
1467
+
1468
+ results_.back().verification_map[library::Provider::kCUDNN] = get_cutlass_disposition(status);
1469
+ return true;
1470
+ }
1471
+
1472
+ //
1473
+ // Verify results
1474
+ //
1475
+
1476
+ results_.back().verification_map[library::Provider::kCUDNN] = compare_tensors(
1477
+ options,
1478
+ *conv_workspace_.Computed,
1479
+ *conv_workspace_.Reference,
1480
+ conv_workspace_.Computed->batch_stride()
1481
+ );
1482
+
1483
+ // Save workspace if incorrect
1484
+ if (options.verification.save_workspace == SaveWorkspace::kIncorrect &&
1485
+ results_.back().verification_map[library::Provider::kCUDNN] == Disposition::kIncorrect) {
1486
+
1487
+ save_workspace(
1488
+ device_context,
1489
+ options,
1490
+ conv_desc,
1491
+ library::Provider::kCUTLASS,
1492
+ library::Provider::kCUDNN);
1493
+ }
1494
+ }
1495
+ catch (...) {
1496
+ results_.back().verification_map[library::Provider::kCUDNN] = Disposition::kFailed;
1497
+ }
1498
+
1499
+ // Return true means continue profiling
1500
+ return true;
1501
+ }
1502
+
1503
+ #endif // #if CUTLASS_ENABLE_CUDNN
1504
+
1505
+ /////////////////////////////////////////////////////////////////////////////////////////////////
1506
+
1507
+ } // namespace profiler
1508
+ } // namespace cutlass
1509
+
1510
+ /////////////////////////////////////////////////////////////////////////////////////////////////
sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/cutlass/tools/profiler/src/cudnn_helpers.cpp ADDED
@@ -0,0 +1,496 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ * SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice, this
9
+ * list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holder nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
+ *
30
+ **************************************************************************************************/
31
+ /* \file
32
+ \brief Helper functions for mapping CUTLASS concepts to cuDNN.
33
+ */
34
+ #if CUTLASS_ENABLE_CUDNN
35
+
36
+ #include <stdexcept>
37
+
38
+ #include "cutlass/profiler/cudnn_helpers.h"
39
+
40
+ namespace cutlass {
41
+ namespace profiler {
42
+
43
+ /////////////////////////////////////////////////////////////////////////////////////////////////
44
+ /// Converts a cuDNN status to cutlass::Status
45
+ Status get_cutlass_status(cudnnStatus_t cudnn_status) {
46
+
47
+ if (cudnn_status == CUDNN_STATUS_SUCCESS) {
48
+ return Status::kSuccess;
49
+ }
50
+ else if (cudnn_status == CUDNN_STATUS_INVALID_VALUE) {
51
+ return Status::kErrorInvalidProblem;
52
+ }
53
+ if (cudnn_status == CUDNN_STATUS_NOT_SUPPORTED) {
54
+ return Status::kErrorNotSupported;
55
+ }
56
+ return Status::kErrorInternal;
57
+ }
58
+
59
+ /// Converts a cuDNN status to cutlass::profiler::Disposition
60
+ Disposition get_cutlass_disposition(cudnnStatus_t cudnn_status) {
61
+
62
+ if (cudnn_status == CUDNN_STATUS_INVALID_VALUE) {
63
+ return Disposition::kInvalidProblem;
64
+ }
65
+ else if (cudnn_status == CUDNN_STATUS_NOT_SUPPORTED) {
66
+ return Disposition::kNotSupported;
67
+ }
68
+ return Disposition::kFailed;
69
+ }
70
+
71
+ /// Checks cudnnStatus_t converts to cutlas status and returns if Status::kSuccess o.w. throws exception
72
+ Status checkCudnnErr(cudnnStatus_t cudnn_status) {
73
+ Status cutlass_status = get_cutlass_status(cudnn_status);
74
+ if(cutlass_status != Status::kSuccess) {
75
+ throw std::runtime_error("checkCudnnErr failed");
76
+ }
77
+ return cutlass_status;
78
+ }
79
+
80
+ /// Maps a CUTLASS conv mode to a cuDNN cudnnConvolutionMode_t
81
+ bool get_cudnn_conv_mode(cudnnConvolutionMode_t &cudnn_conv_mode, conv::Mode conv_mode) {
82
+ switch (conv_mode) {
83
+ case conv::Mode::kCrossCorrelation:
84
+ cudnn_conv_mode = CUDNN_CROSS_CORRELATION;
85
+ return true;
86
+ case conv::Mode::kConvolution:
87
+ cudnn_conv_mode = CUDNN_CONVOLUTION;
88
+ return true;
89
+ default: break;
90
+ }
91
+ return false;
92
+ }
93
+
94
+ /// Maps a CUTLASS tensor layout to a cuDNN cudnnTensorFormat_t
95
+ bool get_cudnn_layout(cudnnTensorFormat_t &cudnn_layout, library::LayoutTypeID layout) {
96
+ switch (layout) {
97
+ // cudnn uses the same enum for TensorNC*HW along nDim (ConvDescription::conv_dim)
98
+ case library::LayoutTypeID::kTensorNCHW:
99
+ case library::LayoutTypeID::kTensorNCDHW:
100
+ cudnn_layout = CUDNN_TENSOR_NCHW;
101
+ return true;
102
+ case library::LayoutTypeID::kTensorNHWC:
103
+ case library::LayoutTypeID::kTensorNDHWC:
104
+ cudnn_layout = CUDNN_TENSOR_NHWC;
105
+ return true;
106
+ default: break;
107
+ }
108
+ return false;
109
+ }
110
+
111
+ /// Maps a CUTLASS numeric type to a cuDNN cudnnDataType_t
112
+ bool get_cudnn_datatype(cudnnDataType_t &cudnn_element_type, library::NumericTypeID element_type) {
113
+ switch (element_type) {
114
+ case library::NumericTypeID::kF16:
115
+ cudnn_element_type = CUDNN_DATA_HALF;
116
+ return true;
117
+
118
+ case library::NumericTypeID::kF32:
119
+ cudnn_element_type = CUDNN_DATA_FLOAT;
120
+ return true;
121
+
122
+ case library::NumericTypeID::kF64:
123
+ cudnn_element_type = CUDNN_DATA_DOUBLE;
124
+ return true;
125
+
126
+ case library::NumericTypeID::kS2:
127
+ break;
128
+
129
+ case library::NumericTypeID::kS4:
130
+ break;
131
+
132
+ case library::NumericTypeID::kS8:
133
+ cudnn_element_type = CUDNN_DATA_INT8;
134
+ return true;
135
+
136
+ case library::NumericTypeID::kS16:
137
+ break;
138
+
139
+ case library::NumericTypeID::kS32:
140
+ cudnn_element_type = CUDNN_DATA_INT32;
141
+ return true;
142
+
143
+ case library::NumericTypeID::kS64:
144
+ break;
145
+
146
+ case library::NumericTypeID::kU2:
147
+ break;
148
+
149
+ case library::NumericTypeID::kU4:
150
+ break;
151
+
152
+ case library::NumericTypeID::kU8:
153
+ cudnn_element_type = CUDNN_DATA_UINT8;
154
+ return true;
155
+
156
+ case library::NumericTypeID::kU16:
157
+ break;
158
+
159
+ case library::NumericTypeID::kU32:
160
+ break;
161
+
162
+ case library::NumericTypeID::kU64:
163
+ break;
164
+
165
+ case library::NumericTypeID::kB1:
166
+ break;
167
+
168
+ case library::NumericTypeID::kInvalid:
169
+
170
+ default:
171
+ break;
172
+ }
173
+
174
+ return false;
175
+ }
176
+
177
+ /// Maps CUTLASS math OpcodeClassID and MathOperationID to cuDNN math_type
178
+ bool get_cudnn_mathtype(cudnnMathType_t &cudnn_math_type, library::ConvDescription const &conv_desc) {
179
+
180
+ switch (conv_desc.tile_description.math_instruction.opcode_class) {
181
+
182
+ case library::OpcodeClassID::kTensorOp:
183
+ {
184
+ cudnn_math_type = CUDNN_TENSOR_OP_MATH;
185
+
186
+ library::MathOperationID math_op = conv_desc.tile_description.math_instruction.math_operation;
187
+
188
+ // Allow conversion on input data type for fast math operations
189
+ if (math_op == library::MathOperationID::kMultiplyAddFastF16 ||
190
+ math_op == library::MathOperationID::kMultiplyAddFastBF16)
191
+ {
192
+ cudnn_math_type = CUDNN_TENSOR_OP_MATH_ALLOW_CONVERSION;
193
+ }
194
+
195
+ return true;
196
+ }
197
+ case library::OpcodeClassID::kSimt:
198
+ #if (defined(CUDNN_VERSION) && CUDNN_VERSION <= 8000)
199
+ cudnn_math_type = CUDNN_DEFAULT_MATH;
200
+ #else
201
+ cudnn_math_type = CUDNN_FMA_MATH;
202
+ #endif
203
+ return true;
204
+ }
205
+
206
+ return false;
207
+ }
208
+
209
+ /// Cudnn compute type seems to be hardcoded to float (To handle a possible cudnn issue)
210
+ float cast_cudnn_compute_type_to_float(library::NumericTypeID type, void const * src) {
211
+
212
+ switch (type) {
213
+ case library::NumericTypeID::kF16:
214
+ {
215
+ return float(*(static_cast<half_t const*>(src)));
216
+ }
217
+ case library::NumericTypeID::kF32:
218
+ {
219
+ return float(*(static_cast<float const*>(src)));
220
+ }
221
+ case library::NumericTypeID::kS32:
222
+ {
223
+ return float(*(static_cast<int const*>(src)));
224
+ }
225
+ default:
226
+ throw std::runtime_error("Data type handled in cast_compute_type_to_float");
227
+ }
228
+ }
229
+
230
+ /////////////////////////////////////////////////////////////////////////////////////////////////
231
+ /// Returns a status if cuDNN can satisfy a particular Conv2d description
232
+ Status cudnn_satisfies(
233
+ library::ConvDescription const &desc,
234
+ library::Conv2dConfiguration const &configuration) {
235
+
236
+ auto const &a_tensor = desc.A;
237
+ auto const &b_tensor = desc.B;
238
+ auto const &c_tensor = desc.C;
239
+ auto const &math_instruction = desc.tile_description.math_instruction;
240
+
241
+ if(a_tensor.element != b_tensor.element) {
242
+ return Status::kErrorInvalidDataType;
243
+ }
244
+
245
+ //////////////////////// Convolution output dimensions p and q ///////////////////////
246
+ // Cutlass convolutions support arbitrary output dimensions and not constrained by //
247
+ // input, filter, padding, striding, dilation sizes. //
248
+ // cuDNN sets the output dimensions (p, q) using following equations: //
249
+ // //
250
+ // output = div_up(input + 2 * pad - ((filter - 1) * dilation + 1) + 1, stride) //
251
+ // where; div_up(a, b) : (a - 1)/b + 1 //
252
+ // //
253
+ // Before launching cudnn verification or profiling check that output p and q //
254
+ // dimensions are cuDNN compliant. //
255
+ // //
256
+ // If user sets output p and q which do not follow above constraints, cutlass conv, //
257
+ // host reference, device reference can run. However, cudnn convolution returns //
258
+ // "Invalid problem" //
259
+ // //
260
+ ///////////////////////////////////////////////////////////////////////////////////////
261
+
262
+ // check conv output dimension p for cudnn
263
+ int cudnn_output_p =
264
+ (
265
+ (
266
+ configuration.problem_size.H +
267
+ 2 * configuration.problem_size.pad_h -
268
+ ((configuration.problem_size.R - 1) *
269
+ configuration.problem_size.dilation_h + 1)
270
+ ) /
271
+ (configuration.problem_size.stride_h)
272
+ + 1
273
+ );
274
+
275
+ if (cudnn_output_p != configuration.problem_size.P) {
276
+ return Status::kErrorInvalidProblem;
277
+ }
278
+
279
+ // check conv output dimension q for cudnn
280
+ int cudnn_output_q =
281
+ (
282
+ (
283
+ configuration.problem_size.W +
284
+ 2 * configuration.problem_size.pad_w -
285
+ ((configuration.problem_size.S - 1) *
286
+ configuration.problem_size.dilation_w + 1)
287
+ ) /
288
+ (configuration.problem_size.stride_w)
289
+ + 1
290
+ );
291
+
292
+ if (cudnn_output_q != configuration.problem_size.Q) {
293
+ return Status::kErrorInvalidProblem;
294
+ }
295
+ //////////////////////////////////////////////////////////////////////////////////////
296
+
297
+ // conv operator with input=FP16, accumulator=FP32, output=FP32 datatype
298
+ if (a_tensor.element == library::NumericTypeID::kF16 &&
299
+ b_tensor.element == library::NumericTypeID::kF16 &&
300
+ math_instruction.element_accumulator == library::NumericTypeID::kF32 &&
301
+ c_tensor.element == library::NumericTypeID::kF32
302
+ ) {
303
+
304
+ return Status::kErrorNotSupported;
305
+ }
306
+
307
+ if (a_tensor.element == library::NumericTypeID::kBF16 ||
308
+ b_tensor.element == library::NumericTypeID::kBF16 ||
309
+ c_tensor.element == library::NumericTypeID::kBF16
310
+ ) {
311
+
312
+ return Status::kErrorNotSupported;
313
+ }
314
+
315
+ // TF32 input not supported in cuDNN
316
+ if (a_tensor.element == library::NumericTypeID::kTF32 ||
317
+ b_tensor.element == library::NumericTypeID::kTF32 ||
318
+ c_tensor.element == library::NumericTypeID::kTF32
319
+ ) {
320
+
321
+ return Status::kErrorNotSupported;
322
+ }
323
+
324
+ if (a_tensor.element == library::NumericTypeID::kS8 ||
325
+ b_tensor.element == library::NumericTypeID::kS8 ||
326
+ c_tensor.element == library::NumericTypeID::kS8
327
+ ) {
328
+
329
+ return Status::kErrorNotSupported;
330
+ }
331
+
332
+ if (a_tensor.element == library::NumericTypeID::kU8 ||
333
+ b_tensor.element == library::NumericTypeID::kU8 ||
334
+ c_tensor.element == library::NumericTypeID::kU8
335
+ ) {
336
+
337
+ return Status::kErrorNotSupported;
338
+ }
339
+
340
+ if (a_tensor.element == library::NumericTypeID::kS4 ||
341
+ b_tensor.element == library::NumericTypeID::kS4 ||
342
+ c_tensor.element == library::NumericTypeID::kS4
343
+ ) {
344
+
345
+ return Status::kErrorNotSupported;
346
+ }
347
+
348
+ if (a_tensor.element == library::NumericTypeID::kU4 ||
349
+ b_tensor.element == library::NumericTypeID::kU4 ||
350
+ c_tensor.element == library::NumericTypeID::kU4
351
+ ) {
352
+
353
+ return Status::kErrorNotSupported;
354
+ }
355
+
356
+ return Status::kSuccess;
357
+ }
358
+
359
+ /////////////////////////////////////////////////////////////////////////////////////////////////
360
+
361
+ /// Returns a status if cuDNN can satisfy a particular Conv3d description
362
+ Status cudnn_satisfies(
363
+ library::ConvDescription const &desc,
364
+ library::Conv3dConfiguration const &configuration) {
365
+
366
+ auto const &a_tensor = desc.A;
367
+ auto const &b_tensor = desc.B;
368
+ auto const &c_tensor = desc.C;
369
+ auto const &math_instruction = desc.tile_description.math_instruction;
370
+
371
+ if(a_tensor.element != b_tensor.element) {
372
+ return Status::kErrorInvalidDataType;
373
+ }
374
+
375
+ //////////////////////// Convolution output dimensions p and q ///////////////////////
376
+ // Cutlass convolutions support arbitrary output dimensions and not constrained by //
377
+ // input, filter, padding, striding, dilation sizes. //
378
+ // cuDNN sets the output dimensions (p, q) using following equations: //
379
+ // //
380
+ // output = div_up(input + 2 * pad - ((filter - 1) * dilation + 1) + 1, stride) //
381
+ // where; div_up(a, b) : (a - 1)/b + 1 //
382
+ // //
383
+ // Before launching cudnn verification or profiling check that output p and q //
384
+ // dimensions are cuDNN compliant. //
385
+ // //
386
+ // If user sets output p and q which do not follow above constraints, cutlass conv, //
387
+ // host reference, device reference can run. However, cudnn convolution returns //
388
+ // "Invalid problem" //
389
+ // //
390
+ ///////////////////////////////////////////////////////////////////////////////////////
391
+
392
+ // check conv output dimension z for cudnn
393
+ int cudnn_output_z =
394
+ (
395
+ (
396
+ configuration.problem_size.D +
397
+ 2 * configuration.problem_size.pad_d -
398
+ ((configuration.problem_size.T - 1) *
399
+ configuration.problem_size.dilation_d + 1)
400
+ ) /
401
+ (configuration.problem_size.stride_d)
402
+ + 1
403
+ );
404
+
405
+ if (cudnn_output_z != configuration.problem_size.Z) {
406
+ return Status::kErrorInvalidProblem;
407
+ }
408
+
409
+ // check conv output dimension p for cudnn
410
+ int cudnn_output_p =
411
+ (
412
+ (
413
+ configuration.problem_size.H +
414
+ 2 * configuration.problem_size.pad_h -
415
+ ((configuration.problem_size.R - 1) *
416
+ configuration.problem_size.dilation_h + 1)
417
+ ) /
418
+ (configuration.problem_size.stride_h)
419
+ + 1
420
+ );
421
+
422
+ if (cudnn_output_p != configuration.problem_size.P) {
423
+ return Status::kErrorInvalidProblem;
424
+ }
425
+
426
+ // check conv output dimension q for cudnn
427
+ int cudnn_output_q =
428
+ (
429
+ (
430
+ configuration.problem_size.W +
431
+ 2 * configuration.problem_size.pad_w -
432
+ ((configuration.problem_size.S - 1) *
433
+ configuration.problem_size.dilation_w + 1)
434
+ ) /
435
+ (configuration.problem_size.stride_w)
436
+ + 1
437
+ );
438
+
439
+ if (cudnn_output_q != configuration.problem_size.Q) {
440
+ return Status::kErrorInvalidProblem;
441
+ }
442
+ //////////////////////////////////////////////////////////////////////////////////////
443
+
444
+ // conv operator with input, accumulator, output datatype of (hss) are not supported
445
+ // in cuDNN
446
+ if (a_tensor.element == library::NumericTypeID::kF16 &&
447
+ b_tensor.element == library::NumericTypeID::kF16 &&
448
+ math_instruction.element_accumulator == library::NumericTypeID::kF32 &&
449
+ c_tensor.element == library::NumericTypeID::kF32
450
+ ) {
451
+
452
+ return Status::kErrorNotSupported;
453
+ }
454
+
455
+ if (a_tensor.element == library::NumericTypeID::kBF16 ||
456
+ b_tensor.element == library::NumericTypeID::kBF16 ||
457
+ c_tensor.element == library::NumericTypeID::kBF16
458
+ ) {
459
+
460
+ return Status::kErrorNotSupported;
461
+ }
462
+
463
+ if (a_tensor.element == library::NumericTypeID::kTF32 ||
464
+ b_tensor.element == library::NumericTypeID::kTF32 ||
465
+ c_tensor.element == library::NumericTypeID::kTF32
466
+ ) {
467
+
468
+ return Status::kErrorNotSupported;
469
+ }
470
+
471
+ if (a_tensor.element == library::NumericTypeID::kS8 ||
472
+ b_tensor.element == library::NumericTypeID::kS8 ||
473
+ c_tensor.element == library::NumericTypeID::kS8
474
+ ) {
475
+
476
+ return Status::kErrorNotSupported;
477
+ }
478
+
479
+ // S4 not supported in cuDNN
480
+ if (a_tensor.element == library::NumericTypeID::kS4 ||
481
+ b_tensor.element == library::NumericTypeID::kS4 ||
482
+ c_tensor.element == library::NumericTypeID::kS4
483
+ ) {
484
+
485
+ return Status::kErrorNotSupported;
486
+ }
487
+
488
+ return Status::kSuccess;
489
+ }
490
+
491
+ /////////////////////////////////////////////////////////////////////////////////////////////////
492
+
493
+ } // namespace profiler
494
+ } // namespace cutlass
495
+
496
+ #endif
sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/cutlass/tools/profiler/src/cutlass_profiler.cu ADDED
@@ -0,0 +1,214 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ * SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice, this
9
+ * list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holder nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
+ *
30
+ **************************************************************************************************/
31
+ /* \file
32
+ \brief Execution environment
33
+ */
34
+
35
+ #include <iostream>
36
+ #include <stdexcept>
37
+
38
+ // Profiler includes
39
+ #include "cutlass/profiler/cutlass_profiler.h"
40
+ #include "cutlass/profiler/gemm_operation_profiler.h"
41
+ #include "cutlass/profiler/rank_k_operation_profiler.h"
42
+ #include "cutlass/profiler/rank_2k_operation_profiler.h"
43
+ #include "cutlass/profiler/trmm_operation_profiler.h"
44
+ #include "cutlass/profiler/symm_operation_profiler.h"
45
+ #include "cutlass/profiler/conv2d_operation_profiler.h"
46
+ #include "cutlass/profiler/conv3d_operation_profiler.h"
47
+ #include "cutlass/profiler/sparse_gemm_operation_profiler.h"
48
+
49
+ /////////////////////////////////////////////////////////////////////////////////////////////////
50
+
51
+ namespace cutlass {
52
+ namespace profiler {
53
+
54
+ /////////////////////////////////////////////////////////////////////////////////////////////////
55
+
56
+ CutlassProfiler::CutlassProfiler(
57
+ Options const &options
58
+ ):
59
+ options_(options) {
60
+
61
+ operation_profilers_.emplace_back(new GemmOperationProfiler(options));
62
+
63
+ operation_profilers_.emplace_back(new SparseGemmOperationProfiler(options));
64
+
65
+ operation_profilers_.emplace_back(new Conv2dOperationProfiler(options));
66
+
67
+ operation_profilers_.emplace_back(new Conv3dOperationProfiler(options));
68
+
69
+ operation_profilers_.emplace_back(new RankKOperationProfiler(options));
70
+
71
+ operation_profilers_.emplace_back(new Rank2KOperationProfiler(options));
72
+
73
+ operation_profilers_.emplace_back(new TrmmOperationProfiler(options));
74
+
75
+ operation_profilers_.emplace_back(new SymmOperationProfiler(options));
76
+ }
77
+
78
+ CutlassProfiler::~CutlassProfiler() {
79
+
80
+ }
81
+
82
+ /////////////////////////////////////////////////////////////////////////////////////////////////
83
+
84
+ /// Execute the program
85
+ int CutlassProfiler::operator()() {
86
+
87
+ if (options_.cmdline.num_naked_args() > 0) {
88
+ std::cerr << "Unknown args: \n";
89
+ options_.cmdline.print_naked_args(std::cerr);
90
+ std::cerr << "\n\n\n";
91
+
92
+ print_usage_(std::cout);
93
+ return 1;
94
+ }
95
+
96
+ if (options_.about.help) {
97
+ if (options_.operation_kind == library::OperationKind::kInvalid) {
98
+ print_usage_(std::cout);
99
+ }
100
+ else {
101
+ for (auto & profiler : operation_profilers_) {
102
+ if (profiler->kind() == options_.operation_kind) {
103
+ profiler->print_usage(std::cout);
104
+ profiler->print_examples(std::cout);
105
+ return 0;
106
+ }
107
+ }
108
+ }
109
+ return 0;
110
+ }
111
+ else if (options_.about.version) {
112
+ options_.about.print_version(std::cout);
113
+
114
+ std::cout << std::endl;
115
+ return 0;
116
+ }
117
+ else if (options_.about.device_info) {
118
+ options_.device.print_device_info(std::cout);
119
+ return 0;
120
+ }
121
+
122
+ if (options_.execution_mode == ExecutionMode::kProfile ||
123
+ options_.execution_mode == ExecutionMode::kDryRun ||
124
+ options_.execution_mode == ExecutionMode::kTrace) {
125
+
126
+ // Profiles all operations
127
+ return profile_();
128
+ }
129
+ else if (options_.execution_mode == ExecutionMode::kEnumerate) {
130
+ // Enumerates all operations
131
+ enumerate_();
132
+ }
133
+ return 0;
134
+ }
135
+
136
+ /////////////////////////////////////////////////////////////////////////////////////////////////
137
+
138
+ /// Enumerates all operations
139
+ void CutlassProfiler::enumerate_() {
140
+
141
+ }
142
+
143
+ /// Profiles all operations
144
+ int CutlassProfiler::profile_() {
145
+
146
+ // Keep track of all device memory tensor in map
147
+ DeviceContext device_context;
148
+
149
+ int result = 0;
150
+ // For all profilers (e.g. gemm/sparse_gemm/conv2d...)
151
+ for (auto & profiler : operation_profilers_) {
152
+
153
+ if (options_.operation_kind == library::OperationKind::kInvalid ||
154
+ options_.operation_kind == profiler->kind()) {
155
+
156
+ result = profiler->profile_all(options_, library::Singleton::get().manifest, device_context);
157
+
158
+ // If some profile failed, terminate immediately
159
+ if (result) {
160
+ return result;
161
+ }
162
+ }
163
+ }
164
+
165
+ return result;
166
+ }
167
+
168
+ /////////////////////////////////////////////////////////////////////////////////////////////////
169
+
170
+ /// Prints all options
171
+ void CutlassProfiler::print_usage_(std::ostream &out) {
172
+ options_.print_usage(out);
173
+
174
+ out << "\nOperations:\n\n";
175
+
176
+ // For all profilers
177
+ for (auto & profiler : operation_profilers_) {
178
+
179
+
180
+ std::string kind_str = library::to_string(profiler->kind());
181
+
182
+ size_t kAlignment = 40;
183
+ size_t columns = 0;
184
+
185
+ if (kind_str.size() < kAlignment) {
186
+ columns = kAlignment - kind_str.size();
187
+ }
188
+
189
+ out << " " << kind_str << std::string(columns, ' ') << profiler->description() << "\n";
190
+
191
+ }
192
+
193
+ out << "\n\nFor details about a particular function, specify the function name with --help.\n\nExample:\n\n"
194
+ << " $ cutlass_profiler --operation=Gemm --help\n\n"
195
+ << " $ cutlass_profiler --operation=RankK --help\n\n"
196
+ << " $ cutlass_profiler --operation=Trmm --help\n\n"
197
+ << " $ cutlass_profiler --operation=Symm --help\n\n"
198
+ << " $ cutlass_profiler --operation=Conv3d --help\n\n"
199
+ << " $ cutlass_profiler --operation=Conv2d --help\n\n"
200
+ << " $ cutlass_profiler --operation=SparseGemm --help\n\n"
201
+ ;
202
+ }
203
+
204
+ /// Prints usage
205
+ void CutlassProfiler::print_options_(std::ostream &out) {
206
+ options_.print_options(out);
207
+ }
208
+
209
+ /////////////////////////////////////////////////////////////////////////////////////////////////
210
+
211
+ } // namespace profiler
212
+ } // namespace cutlass
213
+
214
+ /////////////////////////////////////////////////////////////////////////////////////////////////
sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/cutlass/tools/profiler/src/device_allocation.cu ADDED
@@ -0,0 +1,2483 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ * SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice, this
9
+ * list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holder nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
+ *
30
+ **************************************************************************************************/
31
+ /* \file
32
+ \brief Execution environment
33
+ */
34
+
35
+ #include <cstring>
36
+
37
+ #include "cutlass/numeric_types.h"
38
+ #include "cutlass/layout/matrix.h"
39
+ #include "cutlass/layout/tensor.h"
40
+
41
+ #include "cutlass/util/reference/device/tensor_compare.h"
42
+ #include "cutlass/util/reference/device/tensor_fill.h"
43
+ #include "cutlass/util/reference/host/tensor_fill.h"
44
+ #include "cutlass/util/host_tensor.h"
45
+ #include "cutlass/util/tensor_view_io.h"
46
+
47
+ #include "cutlass/library/util.h"
48
+
49
+ #include "cutlass/profiler/device_allocation.h"
50
+
51
+ namespace cutlass {
52
+ namespace profiler {
53
+
54
+ /////////////////////////////////////////////////////////////////////////////////////////////////
55
+
56
+ size_t DeviceAllocation::bytes(library::NumericTypeID type, size_t capacity) {
57
+ return size_t(cutlass::library::sizeof_bits(type)) * capacity / 8;
58
+ }
59
+
60
+ /////////////////////////////////////////////////////////////////////////////////////////////////
61
+
62
+ template <typename Layout>
63
+ static std::vector<int64_t> get_packed_layout_stride(std::vector<int> const &extent) {
64
+
65
+ typename Layout::TensorCoord extent_coord;
66
+ typename Layout::Stride stride_coord;
67
+
68
+ if (extent.size() != size_t(Layout::kRank)) {
69
+ throw std::runtime_error("Layout does not have same rank as extent vector.");
70
+ }
71
+
72
+ for (int i = 0; i < Layout::kRank; ++i) {
73
+ extent_coord[i] = extent.at(i);
74
+ }
75
+
76
+ std::vector<int64_t> stride;
77
+ stride.resize(Layout::kStrideRank, 0);
78
+
79
+ Layout layout = Layout::packed(extent_coord);
80
+ stride_coord = layout.stride();
81
+
82
+ for (int i = 0; i < Layout::kStrideRank; ++i) {
83
+ stride.at(i) = (int64_t)stride_coord[i];
84
+ }
85
+
86
+ return stride;
87
+ }
88
+
89
+ /// Returns the stride of a packed layout
90
+ std::vector<int64_t> DeviceAllocation::get_packed_layout(
91
+ library::LayoutTypeID layout_id,
92
+ std::vector<int> const &extent) {
93
+
94
+ std::vector<int64_t> stride;
95
+
96
+ switch (layout_id) {
97
+ case library::LayoutTypeID::kColumnMajor:
98
+ stride = get_packed_layout_stride<cutlass::layout::ColumnMajor>(extent);
99
+ break;
100
+ case library::LayoutTypeID::kRowMajor:
101
+ stride = get_packed_layout_stride<cutlass::layout::RowMajor>(extent);
102
+ break;
103
+ case library::LayoutTypeID::kColumnMajorInterleavedK2:
104
+ stride = get_packed_layout_stride<cutlass::layout::ColumnMajorInterleaved<2>>(extent);
105
+ break;
106
+ case library::LayoutTypeID::kRowMajorInterleavedK2:
107
+ stride = get_packed_layout_stride<cutlass::layout::RowMajorInterleaved<2>>(extent);
108
+ break;
109
+ case library::LayoutTypeID::kColumnMajorInterleavedK4:
110
+ stride = get_packed_layout_stride<cutlass::layout::ColumnMajorInterleaved<4>>(extent);
111
+ break;
112
+ case library::LayoutTypeID::kRowMajorInterleavedK4:
113
+ stride = get_packed_layout_stride<cutlass::layout::RowMajorInterleaved<4>>(extent);
114
+ break;
115
+ case library::LayoutTypeID::kColumnMajorInterleavedK16:
116
+ stride = get_packed_layout_stride<cutlass::layout::ColumnMajorInterleaved<16>>(extent);
117
+ break;
118
+ case library::LayoutTypeID::kRowMajorInterleavedK16:
119
+ stride = get_packed_layout_stride<cutlass::layout::RowMajorInterleaved<16>>(extent);
120
+ break;
121
+ case library::LayoutTypeID::kColumnMajorInterleavedK32:
122
+ stride = get_packed_layout_stride<cutlass::layout::ColumnMajorInterleaved<32>>(extent);
123
+ break;
124
+ case library::LayoutTypeID::kRowMajorInterleavedK32:
125
+ stride = get_packed_layout_stride<cutlass::layout::RowMajorInterleaved<32>>(extent);
126
+ break;
127
+ case library::LayoutTypeID::kColumnMajorInterleavedK64:
128
+ stride = get_packed_layout_stride<cutlass::layout::ColumnMajorInterleaved<64>>(extent);
129
+ break;
130
+ case library::LayoutTypeID::kRowMajorInterleavedK64:
131
+ stride = get_packed_layout_stride<cutlass::layout::RowMajorInterleaved<64>>(extent);
132
+ break;
133
+ case library::LayoutTypeID::kTensorNCHW:
134
+ stride = get_packed_layout_stride<cutlass::layout::TensorNCHW>(extent);
135
+ break;
136
+ case library::LayoutTypeID::kTensorNHWC:
137
+ stride = get_packed_layout_stride<cutlass::layout::TensorNHWC>(extent);
138
+ break;
139
+ case library::LayoutTypeID::kTensorNDHWC:
140
+ stride = get_packed_layout_stride<cutlass::layout::TensorNDHWC>(extent);
141
+ break;
142
+ case library::LayoutTypeID::kTensorNC32HW32:
143
+ stride = get_packed_layout_stride<cutlass::layout::TensorNCxHWx<32>>(extent);
144
+ break;
145
+ case library::LayoutTypeID::kTensorNC64HW64:
146
+ stride = get_packed_layout_stride<cutlass::layout::TensorNCxHWx<64>>(extent);
147
+ break;
148
+ case library::LayoutTypeID::kTensorC32RSK32:
149
+ stride = get_packed_layout_stride<cutlass::layout::TensorCxRSKx<32>>(extent);
150
+ break;
151
+ case library::LayoutTypeID::kTensorC64RSK64:
152
+ stride = get_packed_layout_stride<cutlass::layout::TensorCxRSKx<64>>(extent);
153
+ break;
154
+ default: break;
155
+ }
156
+
157
+ return stride;
158
+ }
159
+
160
+ /////////////////////////////////////////////////////////////////////////////////////////////////
161
+
162
+ /// Template to use CUTLASS Layout functions to
163
+ template <typename Layout>
164
+ static size_t construct_layout_(
165
+ void *bytes,
166
+ library::LayoutTypeID layout_id,
167
+ std::vector<int> const &extent,
168
+ std::vector<int64_t> &stride) {
169
+
170
+ if (extent.size() != Layout::kRank) {
171
+ throw std::runtime_error(
172
+ "Layout must have same rank as extent vector.");
173
+ }
174
+
175
+ if (Layout::kStrideRank && stride.empty()) {
176
+
177
+ stride = get_packed_layout_stride<Layout>(extent);
178
+
179
+ return construct_layout_<Layout>(
180
+ bytes,
181
+ layout_id,
182
+ extent,
183
+ stride);
184
+ }
185
+ else if (Layout::kStrideRank && stride.size() != Layout::kStrideRank) {
186
+ throw std::runtime_error(
187
+ "Layout requires either empty stride or stride vector matching Layout::kStrideRank");
188
+ }
189
+
190
+ typename Layout::Stride stride_coord;
191
+ for (int i = 0; i < Layout::kStrideRank; ++i) {
192
+ stride_coord[i] = (int)stride.at(i);
193
+ }
194
+
195
+ typename Layout::TensorCoord extent_coord;
196
+ for (int i = 0; i < Layout::kRank; ++i) {
197
+ extent_coord[i] = extent.at(i);
198
+ }
199
+
200
+ // Construct the CUTLASS layout object from the stride object
201
+ Layout layout(stride_coord);
202
+
203
+ // Pack it into bytes
204
+ if (bytes) {
205
+ *reinterpret_cast<Layout *>(bytes) = layout;
206
+ }
207
+
208
+ // Return capacity
209
+ size_t capacity_ = layout.capacity(extent_coord);
210
+
211
+ return capacity_;
212
+ }
213
+
214
+ /// returns the capacity needed
215
+ size_t DeviceAllocation::construct_layout(
216
+ void *bytes,
217
+ library::LayoutTypeID layout_id,
218
+ std::vector<int> const &extent,
219
+ std::vector<int64_t> &stride) {
220
+
221
+ switch (layout_id) {
222
+ case library::LayoutTypeID::kColumnMajor:
223
+ return construct_layout_<cutlass::layout::ColumnMajor>(bytes, layout_id, extent, stride);
224
+
225
+ case library::LayoutTypeID::kRowMajor:
226
+ return construct_layout_<cutlass::layout::RowMajor>(bytes, layout_id, extent, stride);
227
+
228
+ case library::LayoutTypeID::kColumnMajorInterleavedK2:
229
+ return construct_layout_<cutlass::layout::ColumnMajorInterleaved<2>>(bytes, layout_id, extent, stride);
230
+
231
+ case library::LayoutTypeID::kRowMajorInterleavedK2:
232
+ return construct_layout_<cutlass::layout::RowMajorInterleaved<2>>(bytes, layout_id, extent, stride);
233
+
234
+ case library::LayoutTypeID::kColumnMajorInterleavedK4:
235
+ return construct_layout_<cutlass::layout::ColumnMajorInterleaved<4>>(bytes, layout_id, extent, stride);
236
+
237
+ case library::LayoutTypeID::kRowMajorInterleavedK4:
238
+ return construct_layout_<cutlass::layout::RowMajorInterleaved<4>>(bytes, layout_id, extent, stride);
239
+
240
+ case library::LayoutTypeID::kColumnMajorInterleavedK16:
241
+ return construct_layout_<cutlass::layout::ColumnMajorInterleaved<16>>(bytes, layout_id, extent, stride);
242
+
243
+ case library::LayoutTypeID::kRowMajorInterleavedK16:
244
+ return construct_layout_<cutlass::layout::RowMajorInterleaved<16>>(bytes, layout_id, extent, stride);
245
+
246
+ case library::LayoutTypeID::kColumnMajorInterleavedK32:
247
+ return construct_layout_<cutlass::layout::ColumnMajorInterleaved<32>>(bytes, layout_id, extent, stride);
248
+
249
+ case library::LayoutTypeID::kRowMajorInterleavedK32:
250
+ return construct_layout_<cutlass::layout::RowMajorInterleaved<32>>(bytes, layout_id, extent, stride);
251
+
252
+ case library::LayoutTypeID::kColumnMajorInterleavedK64:
253
+ return construct_layout_<cutlass::layout::ColumnMajorInterleaved<64>>(bytes, layout_id, extent, stride);
254
+
255
+ case library::LayoutTypeID::kRowMajorInterleavedK64:
256
+ return construct_layout_<cutlass::layout::RowMajorInterleaved<64>>(bytes, layout_id, extent, stride);
257
+
258
+ case library::LayoutTypeID::kTensorNCHW:
259
+ return construct_layout_<cutlass::layout::TensorNHWC>(bytes, layout_id, extent, stride);
260
+
261
+ case library::LayoutTypeID::kTensorNHWC:
262
+ return construct_layout_<cutlass::layout::TensorNHWC>(bytes, layout_id, extent, stride);
263
+
264
+ case library::LayoutTypeID::kTensorNDHWC:
265
+ return construct_layout_<cutlass::layout::TensorNDHWC>(bytes, layout_id, extent, stride);
266
+
267
+ case library::LayoutTypeID::kTensorNC32HW32:
268
+ return construct_layout_<cutlass::layout::TensorNCxHWx<32>>(bytes, layout_id, extent, stride);
269
+
270
+ case library::LayoutTypeID::kTensorNC64HW64:
271
+ return construct_layout_<cutlass::layout::TensorNCxHWx<64>>(bytes, layout_id, extent, stride);
272
+
273
+ case library::LayoutTypeID::kTensorC32RSK32:
274
+ return construct_layout_<cutlass::layout::TensorCxRSKx<32>>(bytes, layout_id, extent, stride);
275
+
276
+ case library::LayoutTypeID::kTensorC64RSK64:
277
+ return construct_layout_<cutlass::layout::TensorCxRSKx<64>>(bytes, layout_id, extent, stride);
278
+
279
+ default: break;
280
+ }
281
+
282
+ return 0;
283
+ }
284
+
285
+ /////////////////////////////////////////////////////////////////////////////////////////////////
286
+
287
+ DeviceAllocation::DeviceAllocation():
288
+ type_(library::NumericTypeID::kInvalid),
289
+ batch_stride_(0),
290
+ capacity_(0),
291
+ pointer_(nullptr),
292
+ layout_(library::LayoutTypeID::kUnknown),
293
+ batch_count_(1),
294
+ device_(-1) {
295
+
296
+ }
297
+
298
+ DeviceAllocation::DeviceAllocation(
299
+ library::NumericTypeID type,
300
+ size_t capacity,
301
+ int device
302
+ ):
303
+ type_(type), batch_stride_(capacity), capacity_(capacity), pointer_(nullptr),
304
+ layout_(library::LayoutTypeID::kUnknown), batch_count_(1), device_(device) {
305
+
306
+ cudaError_t result = this->malloc((void **)&pointer_, bytes(type, capacity));
307
+
308
+ if (result != cudaSuccess) {
309
+ type_ = library::NumericTypeID::kInvalid;
310
+ capacity_ = 0;
311
+ pointer_ = nullptr;
312
+ throw std::bad_alloc();
313
+ }
314
+ }
315
+
316
+ DeviceAllocation::DeviceAllocation(
317
+ library::NumericTypeID type,
318
+ library::LayoutTypeID layout_id,
319
+ std::vector<int> const &extent,
320
+ std::vector<int64_t> const &stride,
321
+ int batch_count,
322
+ int device
323
+ ):
324
+ type_(type), batch_stride_(size_t(0)), capacity_(size_t(0)),
325
+ pointer_(nullptr), batch_count_(1), device_(device) {
326
+
327
+ reset(type, layout_id, extent, stride, batch_count);
328
+ }
329
+
330
+ DeviceAllocation::~DeviceAllocation() {
331
+ if (pointer_) {
332
+ cudaFree(pointer_);
333
+ }
334
+ }
335
+
336
+ DeviceAllocation &DeviceAllocation::reset() {
337
+ if (pointer_) {
338
+ cudaFree(pointer_);
339
+ }
340
+
341
+ type_ = library::NumericTypeID::kInvalid;
342
+ batch_stride_ = 0;
343
+ capacity_ = 0;
344
+ pointer_ = nullptr;
345
+ layout_ = library::LayoutTypeID::kUnknown;
346
+ stride_.clear();
347
+ extent_.clear();
348
+ tensor_ref_buffer_.clear();
349
+ batch_count_ = 1;
350
+
351
+ return *this;
352
+ }
353
+
354
+ DeviceAllocation &DeviceAllocation::reset(library::NumericTypeID type, size_t capacity) {
355
+
356
+ reset();
357
+
358
+ type_ = type;
359
+ batch_stride_ = capacity;
360
+ capacity_ = capacity;
361
+
362
+ cudaError_t result = this->malloc((void **)&pointer_, bytes(type_, capacity_));
363
+ if (result != cudaSuccess) {
364
+ throw std::bad_alloc();
365
+ }
366
+
367
+ layout_ = library::LayoutTypeID::kUnknown;
368
+ stride_.clear();
369
+ extent_.clear();
370
+ batch_count_ = 1;
371
+
372
+ tensor_ref_buffer_.resize(sizeof(pointer_), 0);
373
+ std::memcpy(tensor_ref_buffer_.data(), &pointer_, sizeof(pointer_));
374
+
375
+ return *this;
376
+ }
377
+
378
+ /// Allocates memory for a given layout and tensor
379
+ DeviceAllocation &DeviceAllocation::reset(
380
+ library::NumericTypeID type,
381
+ library::LayoutTypeID layout_id,
382
+ std::vector<int> const &extent,
383
+ std::vector<int64_t> const &stride,
384
+ int batch_count) {
385
+
386
+ reset();
387
+
388
+ tensor_ref_buffer_.resize(sizeof(pointer_) + (sizeof(int64_t) * library::get_layout_stride_rank(layout_id)), 0);
389
+
390
+ type_ = type;
391
+
392
+ layout_ = layout_id;
393
+ stride_ = stride;
394
+ extent_ = extent;
395
+ batch_count_ = batch_count;
396
+
397
+ batch_stride_ = construct_layout(
398
+ tensor_ref_buffer_.data() + sizeof(pointer_),
399
+ layout_id,
400
+ extent,
401
+ stride_);
402
+
403
+ capacity_ = batch_stride_ * batch_count_;
404
+
405
+ cudaError_t result = this->malloc((void **)&pointer_, bytes(type, capacity_));
406
+ if (result != cudaSuccess) {
407
+ throw std::bad_alloc();
408
+ }
409
+
410
+ std::memcpy(tensor_ref_buffer_.data(), &pointer_, sizeof(pointer_));
411
+
412
+ return *this;
413
+ }
414
+
415
+ bool DeviceAllocation::good() const {
416
+ return (capacity_ && pointer_);
417
+ }
418
+
419
+ library::NumericTypeID DeviceAllocation::type() const {
420
+ return type_;
421
+ }
422
+
423
+ void *DeviceAllocation::data() const {
424
+ return pointer_;
425
+ }
426
+
427
+ void *DeviceAllocation::batch_data(int batch_idx) const {
428
+ return static_cast<char *>(data()) + batch_stride_bytes() * batch_idx;
429
+ }
430
+
431
+ library::LayoutTypeID DeviceAllocation::layout() const {
432
+ return layout_;
433
+ }
434
+
435
+ std::vector<int64_t> const & DeviceAllocation::stride() const {
436
+ return stride_;
437
+ }
438
+
439
+ /// Gets the extent vector
440
+ std::vector<int> const & DeviceAllocation::extent() const {
441
+ return extent_;
442
+ }
443
+
444
+ /// Gets the number of adjacent tensors in memory
445
+ int DeviceAllocation::batch_count() const {
446
+ return batch_count_;
447
+ }
448
+
449
+ /// Gets the stride (in units of elements) between items
450
+ int64_t DeviceAllocation::batch_stride() const {
451
+ return batch_stride_;
452
+ }
453
+
454
+ /// Gets the stride (in units of bytes) between items
455
+ int64_t DeviceAllocation::batch_stride_bytes() const {
456
+ return bytes(type_, batch_stride_);
457
+ }
458
+
459
+ size_t DeviceAllocation::capacity() const {
460
+ return capacity_;
461
+ }
462
+
463
+ size_t DeviceAllocation::bytes() const {
464
+ return bytes(type_, capacity_);
465
+ }
466
+
467
+ /// Copies from an equivalent-sized tensor in device memory
468
+ void DeviceAllocation::copy_from_device(void const *ptr) {
469
+ if (!bytes()) {
470
+ #ifndef NDEBUG
471
+ std::cout << "Skipping copy of size 0 allocation\n";
472
+ #endif
473
+ return;
474
+ }
475
+
476
+ cudaError_t result = cudaMemcpy(data(), ptr, bytes(), cudaMemcpyDeviceToDevice);
477
+ if (result != cudaSuccess) {
478
+ throw std::runtime_error("Failed device-to-device copy");
479
+ }
480
+ }
481
+
482
+ /// Copies from an equivalent-sized tensor in device memory
483
+ void DeviceAllocation::copy_from_host(void const *ptr) {
484
+ if (!bytes()) {
485
+ #ifndef NDEBUG
486
+ std::cout << "Skipping copy of size 0 allocation\n";
487
+ #endif
488
+ return;
489
+ }
490
+
491
+ cudaError_t result = cudaMemcpy(data(), ptr, bytes(), cudaMemcpyHostToDevice);
492
+ if (result != cudaSuccess) {
493
+ throw std::runtime_error("Failed host-to-device copy");
494
+ }
495
+ }
496
+
497
+ /// Copies from an equivalent-sized tensor in device memory
498
+ void DeviceAllocation::copy_to_host(void *ptr) {
499
+ if (!bytes()) {
500
+ #ifndef NDEBUG
501
+ std::cout << "Skipping copy of size 0 allocation\n";
502
+ #endif
503
+ return;
504
+ }
505
+
506
+ cudaError_t result = cudaMemcpy(ptr, data(), bytes(), cudaMemcpyDeviceToHost);
507
+ if (result != cudaSuccess) {
508
+ throw std::runtime_error("Failed device-to-host copy");
509
+ }
510
+ }
511
+
512
+ void DeviceAllocation::initialize_random_device(int seed, Distribution dist) {
513
+ if (!bytes()) {
514
+ #ifndef NDEBUG
515
+ std::cout << "Skipping initialization of size 0 allocation\n";
516
+ #endif
517
+ return;
518
+ }
519
+
520
+ if (!data()) {
521
+ throw std::runtime_error("Attempting to initialize invalid allocation.");
522
+ }
523
+
524
+ // Instantiate calls to CURAND here. This file takes a long time to compile for
525
+ // this reason.
526
+
527
+ switch (type_) {
528
+ case library::NumericTypeID::kF16:
529
+ cutlass::reference::device::BlockFillRandom<cutlass::half_t>(
530
+ reinterpret_cast<cutlass::half_t *>(pointer_),
531
+ capacity_,
532
+ seed,
533
+ dist
534
+ );
535
+ break;
536
+ case library::NumericTypeID::kBF16:
537
+ cutlass::reference::device::BlockFillRandom<cutlass::bfloat16_t>(
538
+ reinterpret_cast<cutlass::bfloat16_t *>(pointer_),
539
+ capacity_,
540
+ seed,
541
+ dist
542
+ );
543
+ break;
544
+ case library::NumericTypeID::kTF32:
545
+ cutlass::reference::device::BlockFillRandom<cutlass::tfloat32_t>(
546
+ reinterpret_cast<cutlass::tfloat32_t *>(pointer_),
547
+ capacity_,
548
+ seed,
549
+ dist
550
+ );
551
+ break;
552
+ case library::NumericTypeID::kF32:
553
+ cutlass::reference::device::BlockFillRandom<float>(
554
+ reinterpret_cast<float *>(pointer_),
555
+ capacity_,
556
+ seed,
557
+ dist
558
+ );
559
+ break;
560
+ case library::NumericTypeID::kCBF16:
561
+ cutlass::reference::device::BlockFillRandom<complex<bfloat16_t>>(
562
+ reinterpret_cast<complex<bfloat16_t> *>(pointer_),
563
+ capacity_,
564
+ seed,
565
+ dist
566
+ );
567
+ break;
568
+ case library::NumericTypeID::kCTF32:
569
+ cutlass::reference::device::BlockFillRandom<cutlass::complex<cutlass::tfloat32_t>>(
570
+ reinterpret_cast<cutlass::complex<cutlass::tfloat32_t> *>(pointer_),
571
+ capacity_,
572
+ seed,
573
+ dist
574
+ );
575
+ break;
576
+ case library::NumericTypeID::kCF32:
577
+ cutlass::reference::device::BlockFillRandom<cutlass::complex<float>>(
578
+ reinterpret_cast<cutlass::complex<float> *>(pointer_),
579
+ capacity_,
580
+ seed,
581
+ dist
582
+ );
583
+ break;
584
+ case library::NumericTypeID::kFE4M3:
585
+ cutlass::reference::device::BlockFillRandom<cutlass::float_e4m3_t>(
586
+ reinterpret_cast<cutlass::float_e4m3_t *>(pointer_),
587
+ capacity_,
588
+ seed,
589
+ dist
590
+ );
591
+ break;
592
+ case library::NumericTypeID::kFE5M2:
593
+ cutlass::reference::device::BlockFillRandom<cutlass::float_e5m2_t>(
594
+ reinterpret_cast<cutlass::float_e5m2_t *>(pointer_),
595
+ capacity_,
596
+ seed,
597
+ dist
598
+ );
599
+ break;
600
+ case library::NumericTypeID::kF64:
601
+ cutlass::reference::device::BlockFillRandom<double>(
602
+ reinterpret_cast<double *>(pointer_),
603
+ capacity_,
604
+ seed,
605
+ dist
606
+ );
607
+ break;
608
+ case library::NumericTypeID::kCF64:
609
+ cutlass::reference::device::BlockFillRandom<complex<double>>(
610
+ reinterpret_cast<complex<double> *>(pointer_),
611
+ capacity_,
612
+ seed,
613
+ dist
614
+ );
615
+ break;
616
+ case library::NumericTypeID::kS2:
617
+ cutlass::reference::device::BlockFillRandom<int2b_t>(
618
+ reinterpret_cast<int2b_t *>(pointer_),
619
+ capacity_,
620
+ seed,
621
+ dist
622
+ );
623
+ break;
624
+ case library::NumericTypeID::kS4:
625
+ cutlass::reference::device::BlockFillRandom<int4b_t>(
626
+ reinterpret_cast<int4b_t *>(pointer_),
627
+ capacity_,
628
+ seed,
629
+ dist
630
+ );
631
+ break;
632
+ case library::NumericTypeID::kS8:
633
+ cutlass::reference::device::BlockFillRandom<int8_t>(
634
+ reinterpret_cast<int8_t *>(pointer_),
635
+ capacity_,
636
+ seed,
637
+ dist
638
+ );
639
+ break;
640
+ case library::NumericTypeID::kS16:
641
+ cutlass::reference::device::BlockFillRandom<int16_t>(
642
+ reinterpret_cast<int16_t *>(pointer_),
643
+ capacity_,
644
+ seed,
645
+ dist
646
+ );
647
+ break;
648
+ case library::NumericTypeID::kS32:
649
+ cutlass::reference::device::BlockFillRandom<int32_t>(
650
+ reinterpret_cast<int32_t *>(pointer_),
651
+ capacity_,
652
+ seed,
653
+ dist
654
+ );
655
+ break;
656
+ case library::NumericTypeID::kS64:
657
+ cutlass::reference::device::BlockFillRandom<int64_t>(
658
+ reinterpret_cast<int64_t *>(pointer_),
659
+ capacity_,
660
+ seed,
661
+ dist
662
+ );
663
+ break;
664
+ case library::NumericTypeID::kB1:
665
+ cutlass::reference::device::BlockFillRandom<uint1b_t>(
666
+ reinterpret_cast<uint1b_t *>(pointer_),
667
+ capacity_,
668
+ seed,
669
+ dist
670
+ );
671
+ break;
672
+ case library::NumericTypeID::kU2:
673
+ cutlass::reference::device::BlockFillRandom<uint2b_t>(
674
+ reinterpret_cast<uint2b_t *>(pointer_),
675
+ capacity_,
676
+ seed,
677
+ dist
678
+ );
679
+ break;
680
+ case library::NumericTypeID::kU4:
681
+ cutlass::reference::device::BlockFillRandom<uint4b_t>(
682
+ reinterpret_cast<uint4b_t *>(pointer_),
683
+ capacity_,
684
+ seed,
685
+ dist
686
+ );
687
+ break;
688
+ case library::NumericTypeID::kU8:
689
+ cutlass::reference::device::BlockFillRandom<uint8_t>(
690
+ reinterpret_cast<uint8_t *>(pointer_),
691
+ capacity_,
692
+ seed,
693
+ dist
694
+ );
695
+ break;
696
+ case library::NumericTypeID::kU16:
697
+ cutlass::reference::device::BlockFillRandom<uint16_t>(
698
+ reinterpret_cast<uint16_t *>(pointer_),
699
+ capacity_,
700
+ seed,
701
+ dist
702
+ );
703
+ break;
704
+ case library::NumericTypeID::kU32:
705
+ cutlass::reference::device::BlockFillRandom<uint32_t>(
706
+ reinterpret_cast<uint32_t *>(pointer_),
707
+ capacity_,
708
+ seed,
709
+ dist
710
+ );
711
+ break;
712
+ case library::NumericTypeID::kU64:
713
+ cutlass::reference::device::BlockFillRandom<uint64_t>(
714
+ reinterpret_cast<uint64_t *>(pointer_),
715
+ capacity_,
716
+ seed,
717
+ dist
718
+ );
719
+ break;
720
+ default: break;
721
+ }
722
+ }
723
+
724
+ void DeviceAllocation::initialize_random_host(int seed, Distribution dist) {
725
+ if (!bytes()) {
726
+ #ifndef NDEBUG
727
+ std::cout << "Skipping initialization of size 0 allocation\n";
728
+ #endif
729
+ return;
730
+ }
731
+
732
+ if (!data()) {
733
+ throw std::runtime_error("Attempting to initialize invalid allocation.");
734
+ }
735
+
736
+ std::vector<uint8_t> host_data(bytes());
737
+
738
+ switch (type_) {
739
+ case library::NumericTypeID::kFE4M3:
740
+ cutlass::reference::host::BlockFillRandom<cutlass::float_e4m3_t>(
741
+ reinterpret_cast<cutlass::float_e4m3_t *>(host_data.data()),
742
+ capacity_,
743
+ seed,
744
+ dist
745
+ );
746
+ break;
747
+ case library::NumericTypeID::kFE5M2:
748
+ cutlass::reference::host::BlockFillRandom<cutlass::float_e5m2_t>(
749
+ reinterpret_cast<cutlass::float_e5m2_t *>(host_data.data()),
750
+ capacity_,
751
+ seed,
752
+ dist
753
+ );
754
+ break;
755
+ case library::NumericTypeID::kF16:
756
+ cutlass::reference::host::BlockFillRandom<cutlass::half_t>(
757
+ reinterpret_cast<cutlass::half_t *>(host_data.data()),
758
+ capacity_,
759
+ seed,
760
+ dist
761
+ );
762
+ break;
763
+ case library::NumericTypeID::kBF16:
764
+ cutlass::reference::host::BlockFillRandom<cutlass::bfloat16_t>(
765
+ reinterpret_cast<cutlass::bfloat16_t *>(host_data.data()),
766
+ capacity_,
767
+ seed,
768
+ dist
769
+ );
770
+ break;
771
+ case library::NumericTypeID::kTF32:
772
+ cutlass::reference::host::BlockFillRandom<cutlass::tfloat32_t>(
773
+ reinterpret_cast<cutlass::tfloat32_t *>(host_data.data()),
774
+ capacity_,
775
+ seed,
776
+ dist
777
+ );
778
+ break;
779
+ case library::NumericTypeID::kF32:
780
+ cutlass::reference::host::BlockFillRandom<float>(
781
+ reinterpret_cast<float *>(host_data.data()),
782
+ capacity_,
783
+ seed,
784
+ dist
785
+ );
786
+ break;
787
+ case library::NumericTypeID::kCF16:
788
+ cutlass::reference::host::BlockFillRandom<cutlass::complex<cutlass::half_t>>(
789
+ reinterpret_cast<cutlass::complex<cutlass::half_t> *>(host_data.data()),
790
+ capacity_,
791
+ seed,
792
+ dist
793
+ );
794
+ break;
795
+ case library::NumericTypeID::kCBF16:
796
+ cutlass::reference::host::BlockFillRandom<cutlass::complex<cutlass::bfloat16_t>>(
797
+ reinterpret_cast<cutlass::complex<cutlass::bfloat16_t> *>(host_data.data()),
798
+ capacity_,
799
+ seed,
800
+ dist
801
+ );
802
+ break;
803
+ case library::NumericTypeID::kCTF32:
804
+ cutlass::reference::host::BlockFillRandom<cutlass::complex<cutlass::tfloat32_t>>(
805
+ reinterpret_cast<cutlass::complex<cutlass::tfloat32_t> *>(host_data.data()),
806
+ capacity_,
807
+ seed,
808
+ dist
809
+ );
810
+ break;
811
+ case library::NumericTypeID::kCF32:
812
+ cutlass::reference::host::BlockFillRandom<cutlass::complex<float>>(
813
+ reinterpret_cast<cutlass::complex<float> *>(host_data.data()),
814
+ capacity_,
815
+ seed,
816
+ dist
817
+ );
818
+ break;
819
+ case library::NumericTypeID::kF64:
820
+ cutlass::reference::host::BlockFillRandom<double>(
821
+ reinterpret_cast<double *>(host_data.data()),
822
+ capacity_,
823
+ seed,
824
+ dist
825
+ );
826
+ break;
827
+ case library::NumericTypeID::kCF64:
828
+ cutlass::reference::host::BlockFillRandom<cutlass::complex<double>>(
829
+ reinterpret_cast<cutlass::complex<double> *>(host_data.data()),
830
+ capacity_,
831
+ seed,
832
+ dist
833
+ );
834
+ break;
835
+ case library::NumericTypeID::kS2:
836
+ cutlass::reference::host::BlockFillRandom<int2b_t>(
837
+ reinterpret_cast<int2b_t *>(host_data.data()),
838
+ capacity_,
839
+ seed,
840
+ dist
841
+ );
842
+ break;
843
+ case library::NumericTypeID::kS4:
844
+ cutlass::reference::host::BlockFillRandom<int4b_t>(
845
+ reinterpret_cast<int4b_t *>(host_data.data()),
846
+ capacity_,
847
+ seed,
848
+ dist
849
+ );
850
+ break;
851
+ case library::NumericTypeID::kS8:
852
+ cutlass::reference::host::BlockFillRandom<int8_t>(
853
+ reinterpret_cast<int8_t *>(host_data.data()),
854
+ capacity_,
855
+ seed,
856
+ dist
857
+ );
858
+ break;
859
+ case library::NumericTypeID::kS16:
860
+ cutlass::reference::host::BlockFillRandom<int16_t>(
861
+ reinterpret_cast<int16_t *>(host_data.data()),
862
+ capacity_,
863
+ seed,
864
+ dist
865
+ );
866
+ break;
867
+ case library::NumericTypeID::kS32:
868
+ cutlass::reference::host::BlockFillRandom<int32_t>(
869
+ reinterpret_cast<int32_t *>(host_data.data()),
870
+ capacity_,
871
+ seed,
872
+ dist
873
+ );
874
+ break;
875
+ case library::NumericTypeID::kS64:
876
+ cutlass::reference::host::BlockFillRandom<int64_t>(
877
+ reinterpret_cast<int64_t *>(host_data.data()),
878
+ capacity_,
879
+ seed,
880
+ dist
881
+ );
882
+ break;
883
+ case library::NumericTypeID::kB1:
884
+ cutlass::reference::host::BlockFillRandom<uint1b_t>(
885
+ reinterpret_cast<uint1b_t *>(host_data.data()),
886
+ capacity_,
887
+ seed,
888
+ dist
889
+ );
890
+ break;
891
+ case library::NumericTypeID::kU2:
892
+ cutlass::reference::host::BlockFillRandom<uint2b_t>(
893
+ reinterpret_cast<uint2b_t *>(host_data.data()),
894
+ capacity_,
895
+ seed,
896
+ dist
897
+ );
898
+ break;
899
+ case library::NumericTypeID::kU4:
900
+ cutlass::reference::host::BlockFillRandom<uint4b_t>(
901
+ reinterpret_cast<uint4b_t *>(host_data.data()),
902
+ capacity_,
903
+ seed,
904
+ dist
905
+ );
906
+ break;
907
+ case library::NumericTypeID::kU8:
908
+ cutlass::reference::host::BlockFillRandom<uint8_t>(
909
+ reinterpret_cast<uint8_t *>(host_data.data()),
910
+ capacity_,
911
+ seed,
912
+ dist
913
+ );
914
+ break;
915
+ case library::NumericTypeID::kU16:
916
+ cutlass::reference::host::BlockFillRandom<uint16_t>(
917
+ reinterpret_cast<uint16_t *>(host_data.data()),
918
+ capacity_,
919
+ seed,
920
+ dist
921
+ );
922
+ break;
923
+ case library::NumericTypeID::kU32:
924
+ cutlass::reference::host::BlockFillRandom<uint32_t>(
925
+ reinterpret_cast<uint32_t *>(host_data.data()),
926
+ capacity_,
927
+ seed,
928
+ dist
929
+ );
930
+ break;
931
+ case library::NumericTypeID::kU64:
932
+ cutlass::reference::host::BlockFillRandom<uint64_t>(
933
+ reinterpret_cast<uint64_t *>(host_data.data()),
934
+ capacity_,
935
+ seed,
936
+ dist
937
+ );
938
+ break;
939
+ default: break;
940
+ }
941
+
942
+ copy_from_host(host_data.data());
943
+ }
944
+
945
+ void DeviceAllocation::initialize_sequential_device(Distribution dist) {
946
+ if (!bytes()) {
947
+ #ifndef NDEBUG
948
+ std::cout << "Skipping initialization of size 0 allocation\n";
949
+ #endif
950
+ return;
951
+ }
952
+
953
+ if (!data()) {
954
+ throw std::runtime_error("Attempting to initialize invalid allocation.");
955
+ }
956
+
957
+ switch (type_) {
958
+ case library::NumericTypeID::kFE4M3:
959
+ cutlass::reference::device::BlockFillSequential<cutlass::float_e4m3_t>(
960
+ reinterpret_cast<cutlass::float_e4m3_t *>(pointer_),
961
+ capacity_,
962
+ static_cast<cutlass::float_e4m3_t>(dist.sequential.delta),
963
+ static_cast<cutlass::float_e4m3_t>(dist.sequential.start)
964
+ );
965
+ break;
966
+ case library::NumericTypeID::kFE5M2:
967
+ cutlass::reference::device::BlockFillSequential<cutlass::float_e5m2_t>(
968
+ reinterpret_cast<cutlass::float_e5m2_t *>(pointer_),
969
+ capacity_,
970
+ static_cast<cutlass::float_e5m2_t>(dist.sequential.delta),
971
+ static_cast<cutlass::float_e5m2_t>(dist.sequential.start)
972
+ );
973
+ break;
974
+ case library::NumericTypeID::kF16:
975
+ cutlass::reference::device::BlockFillSequential<cutlass::half_t>(
976
+ reinterpret_cast<cutlass::half_t *>(pointer_),
977
+ capacity_,
978
+ static_cast<cutlass::half_t>(dist.sequential.delta),
979
+ static_cast<cutlass::half_t>(dist.sequential.start)
980
+ );
981
+ break;
982
+ case library::NumericTypeID::kBF16:
983
+ cutlass::reference::device::BlockFillSequential<cutlass::bfloat16_t>(
984
+ reinterpret_cast<cutlass::bfloat16_t *>(pointer_),
985
+ capacity_,
986
+ static_cast<cutlass::bfloat16_t>(dist.sequential.delta),
987
+ static_cast<cutlass::bfloat16_t>(dist.sequential.start)
988
+ );
989
+ break;
990
+ case library::NumericTypeID::kTF32:
991
+ cutlass::reference::device::BlockFillSequential<cutlass::tfloat32_t>(
992
+ reinterpret_cast<cutlass::tfloat32_t *>(pointer_),
993
+ capacity_,
994
+ static_cast<cutlass::tfloat32_t>(dist.sequential.delta),
995
+ static_cast<cutlass::tfloat32_t>(dist.sequential.start)
996
+ );
997
+ break;
998
+ case library::NumericTypeID::kF32:
999
+ cutlass::reference::device::BlockFillSequential<float>(
1000
+ reinterpret_cast<float *>(pointer_),
1001
+ capacity_,
1002
+ static_cast<float>(dist.sequential.delta),
1003
+ static_cast<float>(dist.sequential.start)
1004
+ );
1005
+ break;
1006
+ case library::NumericTypeID::kCF16:
1007
+ cutlass::reference::device::BlockFillSequential<cutlass::complex<cutlass::half_t>>(
1008
+ reinterpret_cast<cutlass::complex<cutlass::half_t> *>(pointer_),
1009
+ capacity_,
1010
+ cutlass::complex<cutlass::half_t>(
1011
+ static_cast<cutlass::half_t>(dist.sequential.delta)),
1012
+ cutlass::complex<cutlass::half_t>(
1013
+ static_cast<cutlass::half_t>(dist.sequential.start))
1014
+ );
1015
+ break;
1016
+ case library::NumericTypeID::kCBF16:
1017
+ cutlass::reference::device::BlockFillSequential<cutlass::complex<cutlass::bfloat16_t>>(
1018
+ reinterpret_cast<cutlass::complex<cutlass::bfloat16_t> *>(pointer_),
1019
+ capacity_,
1020
+ cutlass::complex<cutlass::bfloat16_t>(
1021
+ static_cast<cutlass::bfloat16_t>(dist.sequential.delta)),
1022
+ cutlass::complex<cutlass::bfloat16_t>(
1023
+ static_cast<cutlass::bfloat16_t>(dist.sequential.start))
1024
+ );
1025
+ break;
1026
+ case library::NumericTypeID::kCTF32:
1027
+ cutlass::reference::device::BlockFillSequential<cutlass::complex<cutlass::tfloat32_t>>(
1028
+ reinterpret_cast<cutlass::complex<cutlass::tfloat32_t> *>(pointer_),
1029
+ capacity_,
1030
+ cutlass::complex<cutlass::tfloat32_t>(
1031
+ static_cast<cutlass::tfloat32_t>(dist.sequential.delta)),
1032
+ cutlass::complex<cutlass::tfloat32_t>(
1033
+ static_cast<cutlass::tfloat32_t>(dist.sequential.start))
1034
+ );
1035
+ break;
1036
+ case library::NumericTypeID::kCF32:
1037
+ cutlass::reference::device::BlockFillSequential<cutlass::complex<float>>(
1038
+ reinterpret_cast<cutlass::complex<float> *>(pointer_),
1039
+ capacity_,
1040
+ cutlass::complex<float>(
1041
+ static_cast<float>(dist.sequential.delta)),
1042
+ cutlass::complex<float>(
1043
+ static_cast<float>(dist.sequential.start))
1044
+ );
1045
+ break;
1046
+ case library::NumericTypeID::kF64:
1047
+ cutlass::reference::device::BlockFillSequential<double>(
1048
+ reinterpret_cast<double *>(pointer_),
1049
+ capacity_,
1050
+ static_cast<double>(dist.sequential.delta),
1051
+ static_cast<double>(dist.sequential.start)
1052
+ );
1053
+ break;
1054
+ case library::NumericTypeID::kCF64:
1055
+ cutlass::reference::device::BlockFillSequential<cutlass::complex<double>>(
1056
+ reinterpret_cast<cutlass::complex<double> *>(pointer_),
1057
+ capacity_,
1058
+ cutlass::complex<double>(
1059
+ static_cast<double>(dist.sequential.delta)),
1060
+ cutlass::complex<double>(
1061
+ static_cast<double>(dist.sequential.start))
1062
+ );
1063
+ break;
1064
+ case library::NumericTypeID::kS2:
1065
+ cutlass::reference::device::BlockFillSequential<int2b_t>(
1066
+ reinterpret_cast<int2b_t *>(pointer_),
1067
+ capacity_,
1068
+ static_cast<int2b_t>(dist.sequential.delta),
1069
+ static_cast<int2b_t>(dist.sequential.start)
1070
+ );
1071
+ break;
1072
+ case library::NumericTypeID::kS4:
1073
+ cutlass::reference::device::BlockFillSequential<int4b_t>(
1074
+ reinterpret_cast<int4b_t *>(pointer_),
1075
+ capacity_,
1076
+ static_cast<int4b_t>(dist.sequential.delta),
1077
+ static_cast<int4b_t>(dist.sequential.start)
1078
+ );
1079
+ break;
1080
+ case library::NumericTypeID::kS8:
1081
+ cutlass::reference::device::BlockFillSequential<int8_t>(
1082
+ reinterpret_cast<int8_t *>(pointer_),
1083
+ capacity_,
1084
+ static_cast<int8_t>(dist.sequential.delta),
1085
+ static_cast<int8_t>(dist.sequential.start)
1086
+ );
1087
+ break;
1088
+ case library::NumericTypeID::kS16:
1089
+ cutlass::reference::device::BlockFillSequential<int16_t>(
1090
+ reinterpret_cast<int16_t *>(pointer_),
1091
+ capacity_,
1092
+ static_cast<int16_t>(dist.sequential.delta),
1093
+ static_cast<int16_t>(dist.sequential.start)
1094
+ );
1095
+ break;
1096
+ case library::NumericTypeID::kS32:
1097
+ cutlass::reference::device::BlockFillSequential<int32_t>(
1098
+ reinterpret_cast<int32_t *>(pointer_),
1099
+ capacity_,
1100
+ static_cast<int32_t>(dist.sequential.delta),
1101
+ static_cast<int32_t>(dist.sequential.start)
1102
+ );
1103
+ break;
1104
+ case library::NumericTypeID::kS64:
1105
+ cutlass::reference::device::BlockFillSequential<int64_t>(
1106
+ reinterpret_cast<int64_t *>(pointer_),
1107
+ capacity_,
1108
+ static_cast<int64_t>(dist.sequential.delta),
1109
+ static_cast<int64_t>(dist.sequential.start)
1110
+ );
1111
+ break;
1112
+ case library::NumericTypeID::kB1:
1113
+ cutlass::reference::device::BlockFillSequential<uint1b_t>(
1114
+ reinterpret_cast<uint1b_t *>(pointer_),
1115
+ capacity_,
1116
+ static_cast<uint1b_t>(dist.sequential.delta),
1117
+ static_cast<uint1b_t>(dist.sequential.start)
1118
+ );
1119
+ break;
1120
+ case library::NumericTypeID::kU2:
1121
+ cutlass::reference::device::BlockFillSequential<uint2b_t>(
1122
+ reinterpret_cast<uint2b_t *>(pointer_),
1123
+ capacity_,
1124
+ static_cast<uint2b_t>(dist.sequential.delta),
1125
+ static_cast<uint2b_t>(dist.sequential.start)
1126
+ );
1127
+ break;
1128
+ case library::NumericTypeID::kU4:
1129
+ cutlass::reference::device::BlockFillSequential<uint4b_t>(
1130
+ reinterpret_cast<uint4b_t *>(pointer_),
1131
+ capacity_,
1132
+ static_cast<uint4b_t>(dist.sequential.delta),
1133
+ static_cast<uint4b_t>(dist.sequential.start)
1134
+ );
1135
+ break;
1136
+ case library::NumericTypeID::kU8:
1137
+ cutlass::reference::device::BlockFillSequential<uint8_t>(
1138
+ reinterpret_cast<uint8_t *>(pointer_),
1139
+ capacity_,
1140
+ static_cast<uint8_t>(dist.sequential.delta),
1141
+ static_cast<uint8_t>(dist.sequential.start)
1142
+ );
1143
+ break;
1144
+ case library::NumericTypeID::kU16:
1145
+ cutlass::reference::device::BlockFillSequential<uint16_t>(
1146
+ reinterpret_cast<uint16_t *>(pointer_),
1147
+ capacity_,
1148
+ static_cast<uint16_t>(dist.sequential.delta),
1149
+ static_cast<uint16_t>(dist.sequential.start)
1150
+ );
1151
+ break;
1152
+ case library::NumericTypeID::kU32:
1153
+ cutlass::reference::device::BlockFillSequential<uint32_t>(
1154
+ reinterpret_cast<uint32_t *>(pointer_),
1155
+ capacity_,
1156
+ static_cast<uint32_t>(dist.sequential.delta),
1157
+ static_cast<uint32_t>(dist.sequential.start)
1158
+ );
1159
+ break;
1160
+ case library::NumericTypeID::kU64:
1161
+ cutlass::reference::device::BlockFillSequential<uint64_t>(
1162
+ reinterpret_cast<uint64_t *>(pointer_),
1163
+ capacity_,
1164
+ static_cast<uint64_t>(dist.sequential.delta),
1165
+ static_cast<uint64_t>(dist.sequential.start)
1166
+ );
1167
+ break;
1168
+ default: break;
1169
+ }
1170
+
1171
+ }
1172
+
1173
+ void DeviceAllocation::initialize_sequential_host(Distribution dist) {
1174
+ if (!bytes()) {
1175
+ #ifndef NDEBUG
1176
+ std::cout << "Skipping initialization of size 0 allocation\n";
1177
+ #endif
1178
+ return;
1179
+ }
1180
+
1181
+ if (!data()) {
1182
+ throw std::runtime_error("Attempting to initialize invalid allocation.");
1183
+ }
1184
+
1185
+ std::vector<uint8_t> host_data(bytes());
1186
+
1187
+ switch (type_) {
1188
+ case library::NumericTypeID::kFE4M3:
1189
+ cutlass::reference::host::BlockFillSequential<cutlass::float_e4m3_t>(
1190
+ reinterpret_cast<cutlass::float_e4m3_t *>(host_data.data()),
1191
+ capacity_,
1192
+ static_cast<cutlass::float_e4m3_t>(dist.sequential.delta),
1193
+ static_cast<cutlass::float_e4m3_t>(dist.sequential.start)
1194
+ );
1195
+ break;
1196
+ case library::NumericTypeID::kFE5M2:
1197
+ cutlass::reference::host::BlockFillSequential<cutlass::float_e5m2_t>(
1198
+ reinterpret_cast<cutlass::float_e5m2_t *>(host_data.data()),
1199
+ capacity_,
1200
+ static_cast<cutlass::float_e5m2_t>(dist.sequential.delta),
1201
+ static_cast<cutlass::float_e5m2_t>(dist.sequential.start)
1202
+ );
1203
+ break;
1204
+ case library::NumericTypeID::kF16:
1205
+ cutlass::reference::host::BlockFillSequential<cutlass::half_t>(
1206
+ reinterpret_cast<cutlass::half_t *>(host_data.data()),
1207
+ capacity_,
1208
+ static_cast<cutlass::half_t>(dist.sequential.delta),
1209
+ static_cast<cutlass::half_t>(dist.sequential.start)
1210
+ );
1211
+ break;
1212
+ case library::NumericTypeID::kBF16:
1213
+ cutlass::reference::host::BlockFillSequential<cutlass::bfloat16_t>(
1214
+ reinterpret_cast<cutlass::bfloat16_t *>(host_data.data()),
1215
+ capacity_,
1216
+ static_cast<cutlass::bfloat16_t>(dist.sequential.delta),
1217
+ static_cast<cutlass::bfloat16_t>(dist.sequential.start)
1218
+ );
1219
+ break;
1220
+ case library::NumericTypeID::kTF32:
1221
+ cutlass::reference::host::BlockFillSequential<cutlass::tfloat32_t>(
1222
+ reinterpret_cast<cutlass::tfloat32_t *>(host_data.data()),
1223
+ capacity_,
1224
+ static_cast<cutlass::tfloat32_t>(dist.sequential.delta),
1225
+ static_cast<cutlass::tfloat32_t>(dist.sequential.start)
1226
+ );
1227
+ break;
1228
+ case library::NumericTypeID::kF32:
1229
+ cutlass::reference::host::BlockFillSequential<float>(
1230
+ reinterpret_cast<float *>(host_data.data()),
1231
+ capacity_,
1232
+ static_cast<float>(dist.sequential.delta),
1233
+ static_cast<float>(dist.sequential.start)
1234
+ );
1235
+ break;
1236
+ case library::NumericTypeID::kCF16:
1237
+ cutlass::reference::host::BlockFillSequential<cutlass::complex<cutlass::half_t>>(
1238
+ reinterpret_cast<cutlass::complex<cutlass::half_t> *>(host_data.data()),
1239
+ capacity_,
1240
+ cutlass::complex<cutlass::half_t>(
1241
+ static_cast<cutlass::half_t>(dist.sequential.delta)),
1242
+ cutlass::complex<cutlass::half_t>(
1243
+ static_cast<cutlass::half_t>(dist.sequential.start))
1244
+ );
1245
+ break;
1246
+ case library::NumericTypeID::kCBF16:
1247
+ cutlass::reference::host::BlockFillSequential<cutlass::complex<cutlass::bfloat16_t>>(
1248
+ reinterpret_cast<cutlass::complex<cutlass::bfloat16_t> *>(host_data.data()),
1249
+ capacity_,
1250
+ cutlass::complex<cutlass::bfloat16_t>(
1251
+ static_cast<cutlass::bfloat16_t>(dist.sequential.delta)),
1252
+ cutlass::complex<cutlass::bfloat16_t>(
1253
+ static_cast<cutlass::bfloat16_t>(dist.sequential.start))
1254
+ );
1255
+ break;
1256
+ case library::NumericTypeID::kCTF32:
1257
+ cutlass::reference::host::BlockFillSequential<cutlass::complex<cutlass::tfloat32_t>>(
1258
+ reinterpret_cast<cutlass::complex<cutlass::tfloat32_t> *>(host_data.data()),
1259
+ capacity_,
1260
+ cutlass::complex<cutlass::tfloat32_t>(
1261
+ static_cast<cutlass::tfloat32_t>(dist.sequential.delta)),
1262
+ cutlass::complex<cutlass::tfloat32_t>(
1263
+ static_cast<cutlass::tfloat32_t>(dist.sequential.start))
1264
+ );
1265
+ break;
1266
+ case library::NumericTypeID::kCF32:
1267
+ cutlass::reference::host::BlockFillSequential<cutlass::complex<float>>(
1268
+ reinterpret_cast<cutlass::complex<float> *>(host_data.data()),
1269
+ capacity_,
1270
+ cutlass::complex<float>(
1271
+ static_cast<float>(dist.sequential.delta)),
1272
+ cutlass::complex<float>(
1273
+ static_cast<float>(dist.sequential.start))
1274
+ );
1275
+ break;
1276
+ case library::NumericTypeID::kF64:
1277
+ cutlass::reference::host::BlockFillSequential<double>(
1278
+ reinterpret_cast<double *>(host_data.data()),
1279
+ capacity_,
1280
+ static_cast<double>(dist.sequential.delta),
1281
+ static_cast<double>(dist.sequential.start)
1282
+ );
1283
+ break;
1284
+ case library::NumericTypeID::kCF64:
1285
+ cutlass::reference::host::BlockFillSequential<cutlass::complex<double>>(
1286
+ reinterpret_cast<cutlass::complex<double> *>(host_data.data()),
1287
+ capacity_,
1288
+ cutlass::complex<double>(
1289
+ static_cast<double>(dist.sequential.delta)),
1290
+ cutlass::complex<double>(
1291
+ static_cast<double>(dist.sequential.start))
1292
+ );
1293
+ break;
1294
+ case library::NumericTypeID::kS2:
1295
+ cutlass::reference::host::BlockFillSequential<int2b_t>(
1296
+ reinterpret_cast<int2b_t *>(host_data.data()),
1297
+ capacity_,
1298
+ static_cast<int2b_t>(dist.sequential.delta),
1299
+ static_cast<int2b_t>(dist.sequential.start)
1300
+ );
1301
+ break;
1302
+ case library::NumericTypeID::kS4:
1303
+ cutlass::reference::host::BlockFillSequential<int4b_t>(
1304
+ reinterpret_cast<int4b_t *>(host_data.data()),
1305
+ capacity_,
1306
+ static_cast<int4b_t>(dist.sequential.delta),
1307
+ static_cast<int4b_t>(dist.sequential.start)
1308
+ );
1309
+ break;
1310
+ case library::NumericTypeID::kS8:
1311
+ cutlass::reference::host::BlockFillSequential<int8_t>(
1312
+ reinterpret_cast<int8_t *>(host_data.data()),
1313
+ capacity_,
1314
+ static_cast<int8_t>(dist.sequential.delta),
1315
+ static_cast<int8_t>(dist.sequential.start)
1316
+ );
1317
+ break;
1318
+ case library::NumericTypeID::kS16:
1319
+ cutlass::reference::host::BlockFillSequential<int16_t>(
1320
+ reinterpret_cast<int16_t *>(host_data.data()),
1321
+ capacity_,
1322
+ static_cast<int16_t>(dist.sequential.delta),
1323
+ static_cast<int16_t>(dist.sequential.start)
1324
+ );
1325
+ break;
1326
+ case library::NumericTypeID::kS32:
1327
+ cutlass::reference::host::BlockFillSequential<int32_t>(
1328
+ reinterpret_cast<int32_t *>(host_data.data()),
1329
+ capacity_,
1330
+ static_cast<int32_t>(dist.sequential.delta),
1331
+ static_cast<int32_t>(dist.sequential.start)
1332
+ );
1333
+ break;
1334
+ case library::NumericTypeID::kS64:
1335
+ cutlass::reference::host::BlockFillSequential<int64_t>(
1336
+ reinterpret_cast<int64_t *>(host_data.data()),
1337
+ capacity_,
1338
+ static_cast<int64_t>(dist.sequential.delta),
1339
+ static_cast<int64_t>(dist.sequential.start)
1340
+ );
1341
+ break;
1342
+ case library::NumericTypeID::kB1:
1343
+ cutlass::reference::host::BlockFillSequential<uint1b_t>(
1344
+ reinterpret_cast<uint1b_t *>(host_data.data()),
1345
+ capacity_,
1346
+ static_cast<uint1b_t>(dist.sequential.delta),
1347
+ static_cast<uint1b_t>(dist.sequential.start)
1348
+ );
1349
+ break;
1350
+ case library::NumericTypeID::kU2:
1351
+ cutlass::reference::host::BlockFillSequential<uint2b_t>(
1352
+ reinterpret_cast<uint2b_t *>(host_data.data()),
1353
+ capacity_,
1354
+ static_cast<uint2b_t>(dist.sequential.delta),
1355
+ static_cast<uint2b_t>(dist.sequential.start)
1356
+ );
1357
+ break;
1358
+ case library::NumericTypeID::kU4:
1359
+ cutlass::reference::host::BlockFillSequential<uint4b_t>(
1360
+ reinterpret_cast<uint4b_t *>(host_data.data()),
1361
+ capacity_,
1362
+ static_cast<uint4b_t>(dist.sequential.delta),
1363
+ static_cast<uint4b_t>(dist.sequential.start)
1364
+ );
1365
+ break;
1366
+ case library::NumericTypeID::kU8:
1367
+ cutlass::reference::host::BlockFillSequential<uint8_t>(
1368
+ reinterpret_cast<uint8_t *>(host_data.data()),
1369
+ capacity_,
1370
+ static_cast<uint8_t>(dist.sequential.delta),
1371
+ static_cast<uint8_t>(dist.sequential.start)
1372
+ );
1373
+ break;
1374
+ case library::NumericTypeID::kU16:
1375
+ cutlass::reference::host::BlockFillSequential<uint16_t>(
1376
+ reinterpret_cast<uint16_t *>(host_data.data()),
1377
+ capacity_,
1378
+ static_cast<uint16_t>(dist.sequential.delta),
1379
+ static_cast<uint16_t>(dist.sequential.start)
1380
+ );
1381
+ break;
1382
+ case library::NumericTypeID::kU32:
1383
+ cutlass::reference::host::BlockFillSequential<uint32_t>(
1384
+ reinterpret_cast<uint32_t *>(host_data.data()),
1385
+ capacity_,
1386
+ static_cast<uint32_t>(dist.sequential.delta),
1387
+ static_cast<uint32_t>(dist.sequential.start)
1388
+ );
1389
+ break;
1390
+ case library::NumericTypeID::kU64:
1391
+ cutlass::reference::host::BlockFillSequential<uint64_t>(
1392
+ reinterpret_cast<uint64_t *>(host_data.data()),
1393
+ capacity_,
1394
+ static_cast<uint64_t>(dist.sequential.delta),
1395
+ static_cast<uint64_t>(dist.sequential.start)
1396
+ );
1397
+ break;
1398
+ default: break;
1399
+ }
1400
+
1401
+ copy_from_host(host_data.data());
1402
+ }
1403
+
1404
+ void DeviceAllocation::initialize_random_sparsemeta_device(int seed, int MetaSizeInBits) {
1405
+ if (!bytes()) {
1406
+ #ifndef NDEBUG
1407
+ std::cout << "Skipping initialization of size 0 allocation\n";
1408
+ #endif
1409
+ return;
1410
+ }
1411
+
1412
+ if (!data()) {
1413
+ throw std::runtime_error("Attempting to initialize invalid allocation.");
1414
+ }
1415
+
1416
+ // Instantiate calls to CURAND here. This file takes a long time to compile for
1417
+ // this reason.
1418
+
1419
+ switch (type_) {
1420
+ case library::NumericTypeID::kU16:
1421
+ cutlass::reference::device::BlockFillRandomSparseMeta<uint16_t>(
1422
+ reinterpret_cast<uint16_t *>(pointer_),
1423
+ capacity_,
1424
+ seed,
1425
+ MetaSizeInBits
1426
+ );
1427
+ break;
1428
+ case library::NumericTypeID::kU32:
1429
+ cutlass::reference::device::BlockFillRandomSparseMeta<uint32_t>(
1430
+ reinterpret_cast<uint32_t *>(pointer_),
1431
+ capacity_,
1432
+ seed,
1433
+ MetaSizeInBits
1434
+ );
1435
+ break;
1436
+ default:
1437
+ break;
1438
+ }
1439
+ }
1440
+
1441
+ void DeviceAllocation::initialize_random_sparsemeta_host(int seed, int MetaSizeInBits) {
1442
+ if (!bytes()) {
1443
+ #ifndef NDEBUG
1444
+ std::cout << "Skipping initialization of size 0 allocation\n";
1445
+ #endif
1446
+ return;
1447
+ }
1448
+
1449
+ if (!data()) {
1450
+ throw std::runtime_error("Attempting to initialize invalid allocation.");
1451
+ }
1452
+
1453
+ std::vector<uint8_t> host_data(bytes());
1454
+
1455
+ switch (type_) {
1456
+ case library::NumericTypeID::kS16:
1457
+ cutlass::reference::host::BlockFillRandomSparseMeta<uint16_t>(
1458
+ reinterpret_cast<uint16_t *>(host_data.data()),
1459
+ capacity_,
1460
+ seed,
1461
+ MetaSizeInBits
1462
+ );
1463
+ break;
1464
+ case library::NumericTypeID::kS32:
1465
+ cutlass::reference::host::BlockFillRandomSparseMeta<uint32_t>(
1466
+ reinterpret_cast<uint32_t *>(host_data.data()),
1467
+ capacity_,
1468
+ seed,
1469
+ MetaSizeInBits
1470
+ );
1471
+ break;
1472
+ default:
1473
+ break;
1474
+ }
1475
+
1476
+ copy_from_host(host_data.data());
1477
+ }
1478
+
1479
+ /////////////////////////////////////////////////////////////////////////////////////////////////
1480
+
1481
+ /// Returns true if two blocks have exactly the same value
1482
+ bool DeviceAllocation::block_compare_equal(
1483
+ library::NumericTypeID numeric_type,
1484
+ void const *ptr_A,
1485
+ void const *ptr_B,
1486
+ size_t capacity) {
1487
+
1488
+ switch (numeric_type) {
1489
+ case library::NumericTypeID::kFE4M3:
1490
+ return reference::device::BlockCompareEqual<float_e4m3_t>(
1491
+ reinterpret_cast<float_e4m3_t const *>(ptr_A),
1492
+ reinterpret_cast<float_e4m3_t const *>(ptr_B),
1493
+ capacity);
1494
+
1495
+ case library::NumericTypeID::kFE5M2:
1496
+ return reference::device::BlockCompareEqual<float_e5m2_t>(
1497
+ reinterpret_cast<float_e5m2_t const *>(ptr_A),
1498
+ reinterpret_cast<float_e5m2_t const *>(ptr_B),
1499
+ capacity);
1500
+ case library::NumericTypeID::kF16:
1501
+ return reference::device::BlockCompareEqual<half_t>(
1502
+ reinterpret_cast<half_t const *>(ptr_A),
1503
+ reinterpret_cast<half_t const *>(ptr_B),
1504
+ capacity);
1505
+
1506
+ case library::NumericTypeID::kBF16:
1507
+ return reference::device::BlockCompareEqual<bfloat16_t>(
1508
+ reinterpret_cast<bfloat16_t const *>(ptr_A),
1509
+ reinterpret_cast<bfloat16_t const *>(ptr_B),
1510
+ capacity);
1511
+
1512
+ case library::NumericTypeID::kTF32:
1513
+ return reference::device::BlockCompareEqual<tfloat32_t>(
1514
+ reinterpret_cast<tfloat32_t const *>(ptr_A),
1515
+ reinterpret_cast<tfloat32_t const *>(ptr_B),
1516
+ capacity);
1517
+
1518
+ case library::NumericTypeID::kF32:
1519
+ return reference::device::BlockCompareEqual<float>(
1520
+ reinterpret_cast<float const *>(ptr_A),
1521
+ reinterpret_cast<float const *>(ptr_B),
1522
+ capacity);
1523
+
1524
+ case library::NumericTypeID::kCF32:
1525
+ return reference::device::BlockCompareEqual<cutlass::complex<float> >(
1526
+ reinterpret_cast<complex<float> const *>(ptr_A),
1527
+ reinterpret_cast<complex<float> const *>(ptr_B),
1528
+ capacity);
1529
+
1530
+ case library::NumericTypeID::kCF16:
1531
+ return reference::device::BlockCompareEqual<complex<half_t>>(
1532
+ reinterpret_cast<complex<half_t> const *>(ptr_A),
1533
+ reinterpret_cast<complex<half_t> const *>(ptr_B),
1534
+ capacity);
1535
+
1536
+ case library::NumericTypeID::kCBF16:
1537
+ return reference::device::BlockCompareEqual<complex<bfloat16_t>>(
1538
+ reinterpret_cast<complex<bfloat16_t> const *>(ptr_A),
1539
+ reinterpret_cast<complex<bfloat16_t> const *>(ptr_B),
1540
+ capacity);
1541
+
1542
+ case library::NumericTypeID::kCTF32:
1543
+ return reference::device::BlockCompareEqual<complex<tfloat32_t>>(
1544
+ reinterpret_cast<complex<tfloat32_t> const *>(ptr_A),
1545
+ reinterpret_cast<complex<tfloat32_t> const *>(ptr_B),
1546
+ capacity);
1547
+
1548
+ case library::NumericTypeID::kF64:
1549
+ return reference::device::BlockCompareEqual<double>(
1550
+ reinterpret_cast<double const *>(ptr_A),
1551
+ reinterpret_cast<double const *>(ptr_B),
1552
+ capacity);
1553
+
1554
+ case library::NumericTypeID::kCF64:
1555
+ return reference::device::BlockCompareEqual<complex<double>>(
1556
+ reinterpret_cast<complex<double> const *>(ptr_A),
1557
+ reinterpret_cast<complex<double> const *>(ptr_B),
1558
+ capacity);
1559
+
1560
+ case library::NumericTypeID::kS2:
1561
+ return reference::device::BlockCompareEqual<int2b_t>(
1562
+ reinterpret_cast<int2b_t const *>(ptr_A),
1563
+ reinterpret_cast<int2b_t const *>(ptr_B),
1564
+ capacity);
1565
+
1566
+ case library::NumericTypeID::kS4:
1567
+ return reference::device::BlockCompareEqual<int4b_t>(
1568
+ reinterpret_cast<int4b_t const *>(ptr_A),
1569
+ reinterpret_cast<int4b_t const *>(ptr_B),
1570
+ capacity);
1571
+
1572
+ case library::NumericTypeID::kS8:
1573
+ return reference::device::BlockCompareEqual<int8_t>(
1574
+ reinterpret_cast<int8_t const *>(ptr_A),
1575
+ reinterpret_cast<int8_t const *>(ptr_B),
1576
+ capacity);
1577
+
1578
+ case library::NumericTypeID::kS16:
1579
+ return reference::device::BlockCompareEqual<int16_t>(
1580
+ reinterpret_cast<int16_t const *>(ptr_A),
1581
+ reinterpret_cast<int16_t const *>(ptr_B),
1582
+ capacity);
1583
+
1584
+ case library::NumericTypeID::kS32:
1585
+ return reference::device::BlockCompareEqual<int32_t>(
1586
+ reinterpret_cast<int32_t const *>(ptr_A),
1587
+ reinterpret_cast<int32_t const *>(ptr_B),
1588
+ capacity);
1589
+
1590
+ case library::NumericTypeID::kS64:
1591
+ return reference::device::BlockCompareEqual<int64_t>(
1592
+ reinterpret_cast<int64_t const *>(ptr_A),
1593
+ reinterpret_cast<int64_t const *>(ptr_B),
1594
+ capacity);
1595
+
1596
+ case library::NumericTypeID::kB1:
1597
+ return reference::device::BlockCompareEqual<uint1b_t>(
1598
+ reinterpret_cast<uint1b_t const *>(ptr_A),
1599
+ reinterpret_cast<uint1b_t const *>(ptr_B),
1600
+ capacity);
1601
+
1602
+ case library::NumericTypeID::kU2:
1603
+ return reference::device::BlockCompareEqual<uint2b_t>(
1604
+ reinterpret_cast<uint2b_t const *>(ptr_A),
1605
+ reinterpret_cast<uint2b_t const *>(ptr_B),
1606
+ capacity);
1607
+
1608
+ case library::NumericTypeID::kU4:
1609
+ return reference::device::BlockCompareEqual<uint4b_t>(
1610
+ reinterpret_cast<uint4b_t const *>(ptr_A),
1611
+ reinterpret_cast<uint4b_t const *>(ptr_B),
1612
+ capacity);
1613
+
1614
+ case library::NumericTypeID::kU8:
1615
+ return reference::device::BlockCompareEqual<uint8_t>(
1616
+ reinterpret_cast<uint8_t const *>(ptr_A),
1617
+ reinterpret_cast<uint8_t const *>(ptr_B),
1618
+ capacity);
1619
+
1620
+ case library::NumericTypeID::kU16:
1621
+ return reference::device::BlockCompareEqual<uint16_t>(
1622
+ reinterpret_cast<uint16_t const *>(ptr_A),
1623
+ reinterpret_cast<uint16_t const *>(ptr_B),
1624
+ capacity);
1625
+
1626
+ case library::NumericTypeID::kU32:
1627
+ return reference::device::BlockCompareEqual<uint32_t>(
1628
+ reinterpret_cast<uint32_t const *>(ptr_A),
1629
+ reinterpret_cast<uint32_t const *>(ptr_B),
1630
+ capacity);
1631
+
1632
+ case library::NumericTypeID::kU64:
1633
+ return reference::device::BlockCompareEqual<uint64_t>(
1634
+ reinterpret_cast<uint64_t const *>(ptr_A),
1635
+ reinterpret_cast<uint64_t const *>(ptr_B),
1636
+ capacity);
1637
+
1638
+ default:
1639
+ throw std::runtime_error(std::string("Unsupported numeric type: ") + to_string(numeric_type));
1640
+ }
1641
+ }
1642
+
1643
+ /// Returns true if two blocks have approximately the same value
1644
+ bool DeviceAllocation::block_compare_relatively_equal(
1645
+ library::NumericTypeID numeric_type,
1646
+ void const *ptr_A,
1647
+ void const *ptr_B,
1648
+ size_t capacity,
1649
+ double epsilon,
1650
+ double nonzero_floor) {
1651
+
1652
+ switch (numeric_type) {
1653
+ case library::NumericTypeID::kFE4M3:
1654
+ return reference::device::BlockCompareRelativelyEqual<float_e4m3_t>(
1655
+ reinterpret_cast<float_e4m3_t const *>(ptr_A),
1656
+ reinterpret_cast<float_e4m3_t const *>(ptr_B),
1657
+ capacity,
1658
+ static_cast<float_e4m3_t>(epsilon),
1659
+ static_cast<float_e4m3_t>(nonzero_floor));
1660
+
1661
+ case library::NumericTypeID::kFE5M2:
1662
+ return reference::device::BlockCompareRelativelyEqual<float_e5m2_t>(
1663
+ reinterpret_cast<float_e5m2_t const *>(ptr_A),
1664
+ reinterpret_cast<float_e5m2_t const *>(ptr_B),
1665
+ capacity,
1666
+ static_cast<float_e5m2_t>(epsilon),
1667
+ static_cast<float_e5m2_t>(nonzero_floor));
1668
+ case library::NumericTypeID::kF16:
1669
+ return reference::device::BlockCompareRelativelyEqual<half_t>(
1670
+ reinterpret_cast<half_t const *>(ptr_A),
1671
+ reinterpret_cast<half_t const *>(ptr_B),
1672
+ capacity,
1673
+ static_cast<half_t>(epsilon),
1674
+ static_cast<half_t>(nonzero_floor));
1675
+
1676
+ case library::NumericTypeID::kBF16:
1677
+ return reference::device::BlockCompareRelativelyEqual<bfloat16_t>(
1678
+ reinterpret_cast<bfloat16_t const *>(ptr_A),
1679
+ reinterpret_cast<bfloat16_t const *>(ptr_B),
1680
+ capacity,
1681
+ static_cast<bfloat16_t>(epsilon),
1682
+ static_cast<bfloat16_t>(nonzero_floor));
1683
+
1684
+ case library::NumericTypeID::kTF32:
1685
+ return reference::device::BlockCompareRelativelyEqual<tfloat32_t>(
1686
+ reinterpret_cast<tfloat32_t const *>(ptr_A),
1687
+ reinterpret_cast<tfloat32_t const *>(ptr_B),
1688
+ capacity,
1689
+ static_cast<tfloat32_t>(epsilon),
1690
+ static_cast<tfloat32_t>(nonzero_floor));
1691
+
1692
+ case library::NumericTypeID::kF32:
1693
+ return reference::device::BlockCompareRelativelyEqual<float>(
1694
+ reinterpret_cast<float const *>(ptr_A),
1695
+ reinterpret_cast<float const *>(ptr_B),
1696
+ capacity,
1697
+ static_cast<float>(epsilon),
1698
+ static_cast<float>(nonzero_floor));
1699
+
1700
+ case library::NumericTypeID::kF64:
1701
+ return reference::device::BlockCompareRelativelyEqual<double>(
1702
+ reinterpret_cast<double const *>(ptr_A),
1703
+ reinterpret_cast<double const *>(ptr_B),
1704
+ capacity,
1705
+ static_cast<double>(epsilon),
1706
+ static_cast<double>(nonzero_floor));
1707
+
1708
+ case library::NumericTypeID::kS2:
1709
+ return reference::device::BlockCompareRelativelyEqual<int2b_t>(
1710
+ reinterpret_cast<int2b_t const *>(ptr_A),
1711
+ reinterpret_cast<int2b_t const *>(ptr_B),
1712
+ capacity,
1713
+ static_cast<int2b_t>(epsilon),
1714
+ static_cast<int2b_t>(nonzero_floor));
1715
+
1716
+ case library::NumericTypeID::kS4:
1717
+ return reference::device::BlockCompareRelativelyEqual<int4b_t>(
1718
+ reinterpret_cast<int4b_t const *>(ptr_A),
1719
+ reinterpret_cast<int4b_t const *>(ptr_B),
1720
+ capacity,
1721
+ static_cast<int4b_t>(epsilon),
1722
+ static_cast<int4b_t>(nonzero_floor));
1723
+
1724
+ case library::NumericTypeID::kS8:
1725
+ return reference::device::BlockCompareRelativelyEqual<int8_t>(
1726
+ reinterpret_cast<int8_t const *>(ptr_A),
1727
+ reinterpret_cast<int8_t const *>(ptr_B),
1728
+ capacity,
1729
+ static_cast<int8_t>(epsilon),
1730
+ static_cast<int8_t>(nonzero_floor));
1731
+
1732
+ case library::NumericTypeID::kS16:
1733
+ return reference::device::BlockCompareRelativelyEqual<int16_t>(
1734
+ reinterpret_cast<int16_t const *>(ptr_A),
1735
+ reinterpret_cast<int16_t const *>(ptr_B),
1736
+ capacity,
1737
+ static_cast<int16_t>(epsilon),
1738
+ static_cast<int16_t>(nonzero_floor));
1739
+
1740
+ case library::NumericTypeID::kS32:
1741
+ return reference::device::BlockCompareRelativelyEqual<int32_t>(
1742
+ reinterpret_cast<int32_t const *>(ptr_A),
1743
+ reinterpret_cast<int32_t const *>(ptr_B),
1744
+ capacity,
1745
+ static_cast<int32_t>(epsilon),
1746
+ static_cast<int32_t>(nonzero_floor));
1747
+
1748
+ case library::NumericTypeID::kS64:
1749
+ return reference::device::BlockCompareRelativelyEqual<int64_t>(
1750
+ reinterpret_cast<int64_t const *>(ptr_A),
1751
+ reinterpret_cast<int64_t const *>(ptr_B),
1752
+ capacity,
1753
+ static_cast<int64_t>(epsilon),
1754
+ static_cast<int64_t>(nonzero_floor));
1755
+
1756
+ case library::NumericTypeID::kB1:
1757
+ return reference::device::BlockCompareRelativelyEqual<uint1b_t>(
1758
+ reinterpret_cast<uint1b_t const *>(ptr_A),
1759
+ reinterpret_cast<uint1b_t const *>(ptr_B),
1760
+ capacity,
1761
+ static_cast<uint1b_t>(epsilon),
1762
+ static_cast<uint1b_t>(nonzero_floor));
1763
+
1764
+ case library::NumericTypeID::kU2:
1765
+ return reference::device::BlockCompareRelativelyEqual<uint2b_t>(
1766
+ reinterpret_cast<uint2b_t const *>(ptr_A),
1767
+ reinterpret_cast<uint2b_t const *>(ptr_B),
1768
+ capacity,
1769
+ static_cast<uint2b_t>(epsilon),
1770
+ static_cast<uint2b_t>(nonzero_floor));
1771
+
1772
+ case library::NumericTypeID::kU4:
1773
+ return reference::device::BlockCompareRelativelyEqual<uint4b_t>(
1774
+ reinterpret_cast<uint4b_t const *>(ptr_A),
1775
+ reinterpret_cast<uint4b_t const *>(ptr_B),
1776
+ capacity,
1777
+ static_cast<uint4b_t>(epsilon),
1778
+ static_cast<uint4b_t>(nonzero_floor));
1779
+
1780
+ case library::NumericTypeID::kU8:
1781
+ return reference::device::BlockCompareRelativelyEqual<uint8_t>(
1782
+ reinterpret_cast<uint8_t const *>(ptr_A),
1783
+ reinterpret_cast<uint8_t const *>(ptr_B),
1784
+ capacity,
1785
+ static_cast<uint8_t>(epsilon),
1786
+ static_cast<uint8_t>(nonzero_floor));
1787
+
1788
+ case library::NumericTypeID::kU16:
1789
+ return reference::device::BlockCompareRelativelyEqual<uint16_t>(
1790
+ reinterpret_cast<uint16_t const *>(ptr_A),
1791
+ reinterpret_cast<uint16_t const *>(ptr_B),
1792
+ capacity,
1793
+ static_cast<uint16_t>(epsilon),
1794
+ static_cast<uint16_t>(nonzero_floor));
1795
+
1796
+ case library::NumericTypeID::kU32:
1797
+ return reference::device::BlockCompareRelativelyEqual<uint32_t>(
1798
+ reinterpret_cast<uint32_t const *>(ptr_A),
1799
+ reinterpret_cast<uint32_t const *>(ptr_B),
1800
+ capacity,
1801
+ static_cast<uint32_t>(epsilon),
1802
+ static_cast<uint32_t>(nonzero_floor));
1803
+
1804
+ case library::NumericTypeID::kU64:
1805
+ return reference::device::BlockCompareRelativelyEqual<uint64_t>(
1806
+ reinterpret_cast<uint64_t const *>(ptr_A),
1807
+ reinterpret_cast<uint64_t const *>(ptr_B),
1808
+ capacity,
1809
+ static_cast<uint64_t>(epsilon),
1810
+ static_cast<uint64_t>(nonzero_floor));
1811
+
1812
+ // No relatively equal comparison for complex numbers.
1813
+ //
1814
+ // As a simplification, we can require bitwise equality. This avoids false positives.
1815
+ // (i.e. "pass" really means passing. "Fail" may not actually mean failure given appropriate epsilon.)
1816
+ //
1817
+ case library::NumericTypeID::kCF16:
1818
+ return reference::device::BlockCompareEqual<cutlass::complex<half_t> >(
1819
+ reinterpret_cast<complex<half_t> const *>(ptr_A),
1820
+ reinterpret_cast<complex<half_t> const *>(ptr_B),
1821
+ capacity);
1822
+
1823
+ case library::NumericTypeID::kCF32:
1824
+ return reference::device::BlockCompareEqual<cutlass::complex<float> >(
1825
+ reinterpret_cast<complex<float> const *>(ptr_A),
1826
+ reinterpret_cast<complex<float> const *>(ptr_B),
1827
+ capacity);
1828
+
1829
+ case library::NumericTypeID::kCF64:
1830
+ return reference::device::BlockCompareEqual<cutlass::complex<double> >(
1831
+ reinterpret_cast<complex<double> const *>(ptr_A),
1832
+ reinterpret_cast<complex<double> const *>(ptr_B),
1833
+ capacity);
1834
+
1835
+ default:
1836
+ {
1837
+ throw std::runtime_error(std::string("Unsupported numeric type: ") + to_string(numeric_type));
1838
+ }
1839
+ }
1840
+ }
1841
+
1842
+ /////////////////////////////////////////////////////////////////////////////////////////////////
1843
+
1844
+ /// Permits copying dynamic vectors into static-length vectors
1845
+ template <typename TensorCoord, int Rank>
1846
+ struct vector_to_coord {
1847
+
1848
+ vector_to_coord(TensorCoord &coord, std::vector<int> const &vec) {
1849
+
1850
+ coord[Rank - 1] = vec.at(Rank - 1);
1851
+
1852
+ if (Rank > 1) {
1853
+ vector_to_coord<TensorCoord, Rank - 1>(coord, vec);
1854
+ }
1855
+ }
1856
+
1857
+ vector_to_coord(TensorCoord &coord, std::vector<int64_t> const &vec) {
1858
+
1859
+ coord[Rank - 1] = (int)vec.at(Rank - 1);
1860
+
1861
+ if (Rank > 1) {
1862
+ vector_to_coord<TensorCoord, Rank - 1>(coord, vec);
1863
+ }
1864
+ }
1865
+ };
1866
+
1867
+ /// Permits copying dynamic vectors into static-length vectors
1868
+ template <typename TensorCoord>
1869
+ struct vector_to_coord<TensorCoord, 1> {
1870
+
1871
+ vector_to_coord(TensorCoord &coord, std::vector<int> const &vec) {
1872
+
1873
+ coord[0] = vec.at(0);
1874
+ }
1875
+
1876
+ vector_to_coord(TensorCoord &coord, std::vector<int64_t> const &vec) {
1877
+
1878
+ coord[0] = (int)vec.at(0);
1879
+ }
1880
+ };
1881
+
1882
+ /// Permits copying dynamic vectors into static-length vectors
1883
+ template <typename TensorCoord>
1884
+ struct vector_to_coord<TensorCoord, 0> {
1885
+
1886
+ vector_to_coord(TensorCoord &coord, std::vector<int> const &vec) {
1887
+
1888
+ }
1889
+ };
1890
+
1891
+ /////////////////////////////////////////////////////////////////////////////////////////////////
1892
+
1893
+ template <typename Element, typename Layout>
1894
+ static void write_tensor_csv_static_tensor_view(
1895
+ std::ostream &out,
1896
+ DeviceAllocation &allocation) {
1897
+
1898
+ Coord<Layout::kRank> extent;
1899
+ Coord<Layout::kStrideRank, typename Layout::Stride::Index> stride;
1900
+
1901
+ if (allocation.extent().size() != Layout::kRank) {
1902
+ throw std::runtime_error("Allocation extent has invalid rank");
1903
+ }
1904
+
1905
+ if (allocation.stride().size() != Layout::kStrideRank) {
1906
+ throw std::runtime_error("Allocation stride has invalid rank");
1907
+ }
1908
+
1909
+ vector_to_coord<Coord<Layout::kRank>, Layout::kRank>(extent, allocation.extent());
1910
+ vector_to_coord<Coord<Layout::kStrideRank, typename Layout::Stride::Index>,
1911
+ Layout::kStrideRank>(stride, allocation.stride());
1912
+
1913
+ Layout layout(stride);
1914
+ HostTensor<Element, Layout> host_tensor(extent, layout, false);
1915
+
1916
+ if (host_tensor.capacity() != allocation.batch_stride()) {
1917
+ throw std::runtime_error("Unexpected capacity to equal.");
1918
+ }
1919
+
1920
+ host_tensor.copy_in_device_to_host(
1921
+ static_cast<Element const *>(allocation.data()),
1922
+ allocation.batch_stride());
1923
+
1924
+ TensorViewWrite(out, host_tensor.host_view());
1925
+
1926
+ out << "\n\n";
1927
+ }
1928
+
1929
+ /////////////////////////////////////////////////////////////////////////////////////////////////
1930
+
1931
+ template <typename T>
1932
+ static void write_tensor_csv_static_type(
1933
+ std::ostream &out,
1934
+ DeviceAllocation &allocation) {
1935
+
1936
+ switch (allocation.layout()) {
1937
+ case library::LayoutTypeID::kRowMajor:
1938
+ write_tensor_csv_static_tensor_view<T, layout::RowMajor>(out, allocation);
1939
+ break;
1940
+ case library::LayoutTypeID::kColumnMajor:
1941
+ write_tensor_csv_static_tensor_view<T, layout::ColumnMajor>(out, allocation);
1942
+ break;
1943
+ case library::LayoutTypeID::kRowMajorInterleavedK2:
1944
+ write_tensor_csv_static_tensor_view<T, layout::RowMajorInterleaved<2>>(out, allocation);
1945
+ break;
1946
+ case library::LayoutTypeID::kColumnMajorInterleavedK2:
1947
+ write_tensor_csv_static_tensor_view<T, layout::ColumnMajorInterleaved<2>>(out, allocation);
1948
+ break;
1949
+ case library::LayoutTypeID::kRowMajorInterleavedK4:
1950
+ write_tensor_csv_static_tensor_view<T, layout::RowMajorInterleaved<4>>(out, allocation);
1951
+ break;
1952
+ case library::LayoutTypeID::kColumnMajorInterleavedK4:
1953
+ write_tensor_csv_static_tensor_view<T, layout::ColumnMajorInterleaved<4>>(out, allocation);
1954
+ break;
1955
+ case library::LayoutTypeID::kRowMajorInterleavedK16:
1956
+ write_tensor_csv_static_tensor_view<T, layout::RowMajorInterleaved<16>>(out, allocation);
1957
+ break;
1958
+ case library::LayoutTypeID::kColumnMajorInterleavedK16:
1959
+ write_tensor_csv_static_tensor_view<T, layout::ColumnMajorInterleaved<16>>(out, allocation);
1960
+ break;
1961
+ case library::LayoutTypeID::kRowMajorInterleavedK32:
1962
+ write_tensor_csv_static_tensor_view<T, layout::RowMajorInterleaved<32>>(out, allocation);
1963
+ break;
1964
+ case library::LayoutTypeID::kColumnMajorInterleavedK32:
1965
+ write_tensor_csv_static_tensor_view<T, layout::ColumnMajorInterleaved<32>>(out, allocation);
1966
+ break;
1967
+ case library::LayoutTypeID::kRowMajorInterleavedK64:
1968
+ write_tensor_csv_static_tensor_view<T, layout::RowMajorInterleaved<64>>(out, allocation);
1969
+ break;
1970
+ case library::LayoutTypeID::kColumnMajorInterleavedK64:
1971
+ write_tensor_csv_static_tensor_view<T, layout::ColumnMajorInterleaved<64>>(out, allocation);
1972
+ break;
1973
+ case library::LayoutTypeID::kTensorNHWC:
1974
+ write_tensor_csv_static_tensor_view<T, layout::TensorNHWC>(out, allocation);
1975
+ break;
1976
+ case library::LayoutTypeID::kTensorNDHWC:
1977
+ write_tensor_csv_static_tensor_view<T, layout::TensorNDHWC>(out, allocation);
1978
+ break;
1979
+ case library::LayoutTypeID::kTensorNC32HW32:
1980
+ write_tensor_csv_static_tensor_view<T, layout::TensorNCxHWx<32>>(out, allocation);
1981
+ break;
1982
+ case library::LayoutTypeID::kTensorNC64HW64:
1983
+ write_tensor_csv_static_tensor_view<T, layout::TensorNCxHWx<64>>(out, allocation);
1984
+ break;
1985
+ case library::LayoutTypeID::kTensorC32RSK32:
1986
+ write_tensor_csv_static_tensor_view<T, layout::TensorCxRSKx<32>>(out, allocation);
1987
+ break;
1988
+ case library::LayoutTypeID::kTensorC64RSK64:
1989
+ write_tensor_csv_static_tensor_view<T, layout::TensorCxRSKx<64>>(out, allocation);
1990
+ break;
1991
+ default:
1992
+ throw std::runtime_error("Unhandled layout");
1993
+ }
1994
+ }
1995
+
1996
+ /////////////////////////////////////////////////////////////////////////////////////////////////
1997
+
1998
+ /// Writes a tensor to csv
1999
+ void DeviceAllocation::write_tensor_csv(
2000
+ std::ostream &out) {
2001
+
2002
+ switch (this->type()) {
2003
+ case library::NumericTypeID::kFE4M3:
2004
+ write_tensor_csv_static_type<float_e4m3_t>(out, *this);
2005
+ break;
2006
+
2007
+ case library::NumericTypeID::kFE5M2:
2008
+ write_tensor_csv_static_type<float_e5m2_t>(out, *this);
2009
+ break;
2010
+ case library::NumericTypeID::kF16:
2011
+ write_tensor_csv_static_type<half_t>(out, *this);
2012
+ break;
2013
+
2014
+ case library::NumericTypeID::kBF16:
2015
+ write_tensor_csv_static_type<bfloat16_t>(out, *this);
2016
+ break;
2017
+
2018
+ case library::NumericTypeID::kTF32:
2019
+ write_tensor_csv_static_type<tfloat32_t>(out, *this);
2020
+ break;
2021
+
2022
+ case library::NumericTypeID::kF32:
2023
+ write_tensor_csv_static_type<float>(out, *this);
2024
+ break;
2025
+
2026
+ case library::NumericTypeID::kF64:
2027
+ write_tensor_csv_static_type<double>(out, *this);
2028
+ break;
2029
+
2030
+ case library::NumericTypeID::kS2:
2031
+ write_tensor_csv_static_type<int2b_t>(out, *this);
2032
+ break;
2033
+
2034
+ case library::NumericTypeID::kS4:
2035
+ write_tensor_csv_static_type<int4b_t>(out, *this);
2036
+ break;
2037
+
2038
+ case library::NumericTypeID::kS8:
2039
+ write_tensor_csv_static_type<int8_t>(out, *this);
2040
+ break;
2041
+
2042
+ case library::NumericTypeID::kS16:
2043
+ write_tensor_csv_static_type<int16_t>(out, *this);
2044
+ break;
2045
+
2046
+ case library::NumericTypeID::kS32:
2047
+ write_tensor_csv_static_type<int32_t>(out, *this);
2048
+ break;
2049
+
2050
+ case library::NumericTypeID::kS64:
2051
+ write_tensor_csv_static_type<int64_t>(out, *this);
2052
+ break;
2053
+
2054
+ case library::NumericTypeID::kB1:
2055
+ write_tensor_csv_static_type<uint1b_t>(out, *this);
2056
+ break;
2057
+
2058
+ case library::NumericTypeID::kU2:
2059
+ write_tensor_csv_static_type<uint2b_t>(out, *this);
2060
+ break;
2061
+
2062
+ case library::NumericTypeID::kU4:
2063
+ write_tensor_csv_static_type<uint4b_t>(out, *this);
2064
+ break;
2065
+
2066
+ case library::NumericTypeID::kU8:
2067
+ write_tensor_csv_static_type<uint8_t>(out, *this);
2068
+ break;
2069
+
2070
+ case library::NumericTypeID::kU16:
2071
+ write_tensor_csv_static_type<uint16_t>(out, *this);
2072
+ break;
2073
+
2074
+ case library::NumericTypeID::kU32:
2075
+ write_tensor_csv_static_type<uint32_t>(out, *this);
2076
+ break;
2077
+
2078
+ case library::NumericTypeID::kU64:
2079
+ write_tensor_csv_static_type<uint64_t>(out, *this);
2080
+ break;
2081
+
2082
+ case library::NumericTypeID::kCF16:
2083
+ write_tensor_csv_static_type<cutlass::complex<half_t> >(out, *this);
2084
+ break;
2085
+
2086
+ case library::NumericTypeID::kCF32:
2087
+ write_tensor_csv_static_type<cutlass::complex<float> >(out, *this);
2088
+ break;
2089
+
2090
+ case library::NumericTypeID::kCF64:
2091
+ write_tensor_csv_static_type<cutlass::complex<double> >(out, *this);
2092
+ break;
2093
+
2094
+ case library::NumericTypeID::kVoid:
2095
+ // Not dump anything as it is a empty tensor.
2096
+ break;
2097
+
2098
+ default:
2099
+ throw std::runtime_error(std::string("Unsupported numeric type: ") + to_string(this->type()) ) ;
2100
+ }
2101
+ }
2102
+
2103
+ template <typename Element, typename Layout>
2104
+ static void tensor_fill_tensor_view(DeviceAllocation &allocation, Element val = Element()) {
2105
+ Coord<Layout::kRank> extent;
2106
+ Coord<Layout::kStrideRank, typename Layout::LongIndex> stride;
2107
+
2108
+ if (allocation.extent().size() != Layout::kRank) {
2109
+ throw std::runtime_error("Allocation extent has invalid rank");
2110
+ }
2111
+
2112
+ if (allocation.stride().size() != Layout::kStrideRank) {
2113
+ throw std::runtime_error("Allocation stride has invalid rank");
2114
+ }
2115
+
2116
+ vector_to_coord<Coord<Layout::kRank>, Layout::kRank>(extent, allocation.extent());
2117
+ vector_to_coord<Coord<Layout::kStrideRank, typename Layout::LongIndex>,
2118
+ Layout::kStrideRank>(stride, allocation.stride());
2119
+
2120
+ TensorView<Element, Layout> view(
2121
+ static_cast<Element *>(allocation.data()),
2122
+ Layout(stride),
2123
+ extent
2124
+ );
2125
+
2126
+
2127
+ cutlass::reference::device::TensorFill<Element, Layout>(
2128
+ view,
2129
+ val
2130
+ );
2131
+ }
2132
+
2133
+ template <typename Element>
2134
+ static void tensor_fill(DeviceAllocation &allocation, Element val = Element()) {
2135
+ switch (allocation.layout()) {
2136
+ case library::LayoutTypeID::kRowMajor:
2137
+ tensor_fill_tensor_view<Element, layout::RowMajor>(allocation, val);
2138
+ break;
2139
+ case library::LayoutTypeID::kColumnMajor:
2140
+ tensor_fill_tensor_view<Element, layout::ColumnMajor>(allocation, val);
2141
+ break;
2142
+ case library::LayoutTypeID::kTensorNHWC:
2143
+ tensor_fill_tensor_view<Element, layout::TensorNHWC>(allocation, val);
2144
+ break;
2145
+ case library::LayoutTypeID::kTensorNDHWC:
2146
+ tensor_fill_tensor_view<Element, layout::TensorNDHWC>(allocation, val);
2147
+ break;
2148
+ case library::LayoutTypeID::kTensorNC32HW32:
2149
+ tensor_fill_tensor_view<Element, layout::TensorNCxHWx<32>>(allocation, val);
2150
+ break;
2151
+ case library::LayoutTypeID::kTensorNC64HW64:
2152
+ tensor_fill_tensor_view<Element, layout::TensorNCxHWx<64>>(allocation, val);
2153
+ break;
2154
+ case library::LayoutTypeID::kTensorC32RSK32:
2155
+ tensor_fill_tensor_view<Element, layout::TensorCxRSKx<32>>(allocation, val);
2156
+ break;
2157
+ case library::LayoutTypeID::kTensorC64RSK64:
2158
+ tensor_fill_tensor_view<Element, layout::TensorCxRSKx<64>>(allocation, val);
2159
+ break;
2160
+ default:
2161
+ throw std::runtime_error("Unsupported layout");
2162
+ break;
2163
+ }
2164
+ }
2165
+
2166
+ /// Fills a tensor uniformly with a value (most frequently used to clear the tensor)
2167
+ void DeviceAllocation::fill_device(double val = 0.0) {
2168
+
2169
+ switch (this->type()) {
2170
+ case library::NumericTypeID::kFE4M3:
2171
+ tensor_fill<float_e4m3_t>(*this, static_cast<float_e4m3_t>(val));
2172
+ break;
2173
+
2174
+ case library::NumericTypeID::kFE5M2:
2175
+ tensor_fill<float_e5m2_t>(*this, static_cast<float_e5m2_t>(val));
2176
+ break;
2177
+ case library::NumericTypeID::kF16:
2178
+ tensor_fill<half_t>(*this, static_cast<half_t>(val));
2179
+ break;
2180
+
2181
+ case library::NumericTypeID::kBF16:
2182
+ tensor_fill<bfloat16_t>(*this, static_cast<bfloat16_t>(val));
2183
+ break;
2184
+
2185
+ case library::NumericTypeID::kTF32:
2186
+ tensor_fill<tfloat32_t>(*this, static_cast<tfloat32_t>(val));
2187
+ break;
2188
+
2189
+ case library::NumericTypeID::kF32:
2190
+ tensor_fill<float>(*this, static_cast<float>(val));
2191
+ break;
2192
+
2193
+ case library::NumericTypeID::kF64:
2194
+ tensor_fill<double>(*this, static_cast<double>(val));
2195
+ break;
2196
+
2197
+ case library::NumericTypeID::kS2:
2198
+ tensor_fill<int2b_t>(*this, static_cast<int2b_t>(val));
2199
+ break;
2200
+
2201
+ case library::NumericTypeID::kS4:
2202
+ tensor_fill<int4b_t>(*this, static_cast<int4b_t>(val));
2203
+ break;
2204
+
2205
+ case library::NumericTypeID::kS8:
2206
+ tensor_fill<int8_t>(*this, static_cast<int8_t>(val));
2207
+ break;
2208
+
2209
+ case library::NumericTypeID::kS16:
2210
+ tensor_fill<int16_t>(*this, static_cast<int16_t>(val));
2211
+ break;
2212
+
2213
+ case library::NumericTypeID::kS32:
2214
+ tensor_fill<int32_t>(*this, static_cast<int32_t>(val));
2215
+ break;
2216
+
2217
+ case library::NumericTypeID::kS64:
2218
+ tensor_fill<int64_t>(*this, static_cast<int64_t>(val));
2219
+ break;
2220
+
2221
+ case library::NumericTypeID::kB1:
2222
+ tensor_fill<uint1b_t>(*this, static_cast<uint1b_t>(val));
2223
+ break;
2224
+
2225
+ case library::NumericTypeID::kU2:
2226
+ tensor_fill<uint2b_t>(*this, static_cast<uint2b_t>(val));
2227
+ break;
2228
+
2229
+ case library::NumericTypeID::kU4:
2230
+ tensor_fill<uint4b_t>(*this, static_cast<uint4b_t>(val));
2231
+ break;
2232
+
2233
+ case library::NumericTypeID::kU8:
2234
+ tensor_fill<uint8_t>(*this, static_cast<uint8_t>(val));
2235
+ break;
2236
+
2237
+ case library::NumericTypeID::kU16:
2238
+ tensor_fill<uint16_t>(*this, static_cast<uint16_t>(val));
2239
+ break;
2240
+
2241
+ case library::NumericTypeID::kU32:
2242
+ tensor_fill<uint32_t>(*this, static_cast<uint32_t>(val));
2243
+ break;
2244
+
2245
+ case library::NumericTypeID::kU64:
2246
+ tensor_fill<uint64_t>(*this, static_cast<uint64_t>(val));
2247
+ break;
2248
+
2249
+ case library::NumericTypeID::kCF16:
2250
+ tensor_fill<cutlass::complex<half_t> >(*this, from_real<half_t>(val));
2251
+ break;
2252
+
2253
+ case library::NumericTypeID::kCF32:
2254
+ tensor_fill<cutlass::complex<float> >(*this, from_real<float>(val));
2255
+ break;
2256
+
2257
+ case library::NumericTypeID::kCF64:
2258
+ tensor_fill<cutlass::complex<double> >(*this, from_real<double>(val));
2259
+ break;
2260
+
2261
+ default:
2262
+ throw std::runtime_error(std::string("Unsupported numeric type: ") + to_string(this->type()));
2263
+ }
2264
+ }
2265
+
2266
+ /// Fills a tensor uniformly with a value (most frequently used to clear the tensor)
2267
+ void DeviceAllocation::fill_host(double val = 0.0) {
2268
+
2269
+ std::vector<uint8_t> host_data(bytes());
2270
+
2271
+ switch (this->type()) {
2272
+ case library::NumericTypeID::kFE4M3:
2273
+ cutlass::reference::host::BlockFill<float_e4m3_t>(
2274
+ reinterpret_cast<float_e4m3_t *>(host_data.data()),
2275
+ capacity_,
2276
+ static_cast<float_e4m3_t>(val)
2277
+ );
2278
+ break;
2279
+
2280
+ case library::NumericTypeID::kFE5M2:
2281
+ cutlass::reference::host::BlockFill<float_e5m2_t>(
2282
+ reinterpret_cast<float_e5m2_t *>(host_data.data()),
2283
+ capacity_,
2284
+ static_cast<float_e5m2_t>(val)
2285
+ );
2286
+ break;
2287
+
2288
+ case library::NumericTypeID::kF16:
2289
+ cutlass::reference::host::BlockFill<half_t>(
2290
+ reinterpret_cast<half_t *>(host_data.data()),
2291
+ capacity_,
2292
+ static_cast<half_t>(val)
2293
+ );
2294
+ break;
2295
+
2296
+ case library::NumericTypeID::kBF16:
2297
+ cutlass::reference::host::BlockFill<bfloat16_t>(
2298
+ reinterpret_cast<bfloat16_t *>(host_data.data()),
2299
+ capacity_,
2300
+ static_cast<bfloat16_t>(val)
2301
+ );
2302
+ break;
2303
+
2304
+ case library::NumericTypeID::kTF32:
2305
+ cutlass::reference::host::BlockFill<tfloat32_t>(
2306
+ reinterpret_cast<tfloat32_t *>(host_data.data()),
2307
+ capacity_,
2308
+ static_cast<tfloat32_t>(val)
2309
+ );
2310
+ break;
2311
+
2312
+ case library::NumericTypeID::kF32:
2313
+ cutlass::reference::host::BlockFill<float>(
2314
+ reinterpret_cast<float *>(host_data.data()),
2315
+ capacity_,
2316
+ static_cast<float>(val)
2317
+ );
2318
+ break;
2319
+
2320
+ case library::NumericTypeID::kF64:
2321
+ cutlass::reference::host::BlockFill<double>(
2322
+ reinterpret_cast<double *>(host_data.data()),
2323
+ capacity_,
2324
+ static_cast<double>(val)
2325
+ );
2326
+ break;
2327
+
2328
+ case library::NumericTypeID::kS2:
2329
+ cutlass::reference::host::BlockFill<int2b_t>(
2330
+ reinterpret_cast<int2b_t *>(host_data.data()),
2331
+ capacity_,
2332
+ static_cast<int2b_t>(val)
2333
+ );
2334
+ break;
2335
+
2336
+ case library::NumericTypeID::kS4:
2337
+ cutlass::reference::host::BlockFill<int4b_t>(
2338
+ reinterpret_cast<int4b_t *>(host_data.data()),
2339
+ capacity_,
2340
+ static_cast<int4b_t>(val)
2341
+ );
2342
+ break;
2343
+
2344
+ case library::NumericTypeID::kS8:
2345
+ cutlass::reference::host::BlockFill<int8_t>(
2346
+ reinterpret_cast<int8_t *>(host_data.data()),
2347
+ capacity_,
2348
+ static_cast<int8_t>(val)
2349
+ );
2350
+ break;
2351
+
2352
+ case library::NumericTypeID::kS16:
2353
+ cutlass::reference::host::BlockFill<int16_t>(
2354
+ reinterpret_cast<int16_t *>(host_data.data()),
2355
+ capacity_,
2356
+ static_cast<int16_t>(val)
2357
+ );
2358
+ break;
2359
+
2360
+ case library::NumericTypeID::kS32:
2361
+ cutlass::reference::host::BlockFill<int32_t>(
2362
+ reinterpret_cast<int32_t *>(host_data.data()),
2363
+ capacity_,
2364
+ static_cast<int32_t>(val)
2365
+ );
2366
+ break;
2367
+
2368
+ case library::NumericTypeID::kS64:
2369
+ cutlass::reference::host::BlockFill<int64_t>(
2370
+ reinterpret_cast<int64_t *>(host_data.data()),
2371
+ capacity_,
2372
+ static_cast<int64_t>(val)
2373
+ );
2374
+ break;
2375
+
2376
+ case library::NumericTypeID::kB1:
2377
+ cutlass::reference::host::BlockFill<uint1b_t>(
2378
+ reinterpret_cast<uint1b_t *>(host_data.data()),
2379
+ capacity_,
2380
+ static_cast<uint1b_t>(val)
2381
+ );
2382
+ break;
2383
+
2384
+ case library::NumericTypeID::kU2:
2385
+ cutlass::reference::host::BlockFill<uint2b_t>(
2386
+ reinterpret_cast<uint2b_t *>(host_data.data()),
2387
+ capacity_,
2388
+ static_cast<uint2b_t>(val)
2389
+ );
2390
+ break;
2391
+
2392
+ case library::NumericTypeID::kU4:
2393
+ cutlass::reference::host::BlockFill<uint4b_t>(
2394
+ reinterpret_cast<uint4b_t *>(host_data.data()),
2395
+ capacity_,
2396
+ static_cast<uint4b_t>(val)
2397
+ );
2398
+ break;
2399
+
2400
+ case library::NumericTypeID::kU8:
2401
+ cutlass::reference::host::BlockFill<uint8_t>(
2402
+ reinterpret_cast<uint8_t *>(host_data.data()),
2403
+ capacity_,
2404
+ static_cast<uint8_t>(val)
2405
+ );
2406
+ break;
2407
+
2408
+ case library::NumericTypeID::kU16:
2409
+ cutlass::reference::host::BlockFill<uint16_t>(
2410
+ reinterpret_cast<uint16_t *>(host_data.data()),
2411
+ capacity_,
2412
+ static_cast<uint16_t>(val)
2413
+ );
2414
+ break;
2415
+
2416
+ case library::NumericTypeID::kU32:
2417
+ cutlass::reference::host::BlockFill<uint32_t>(
2418
+ reinterpret_cast<uint32_t *>(host_data.data()),
2419
+ capacity_,
2420
+ static_cast<uint32_t>(val)
2421
+ );
2422
+ break;
2423
+
2424
+ case library::NumericTypeID::kU64:
2425
+ cutlass::reference::host::BlockFill<uint64_t>(
2426
+ reinterpret_cast<uint64_t *>(host_data.data()),
2427
+ capacity_,
2428
+ static_cast<uint64_t>(val)
2429
+ );
2430
+ break;
2431
+
2432
+ default:
2433
+ throw std::runtime_error(std::string("Unsupported numeric type: ") + to_string(this->type()));
2434
+ }
2435
+
2436
+ copy_from_host(host_data.data());
2437
+ }
2438
+
2439
+ cudaError_t DeviceAllocation::malloc(void** ptr, size_t size) {
2440
+ cudaError_t result;
2441
+ int set_device_back_to = -1;
2442
+
2443
+ /// When needed this sets the device to the allocation's device remembering
2444
+ /// the current device so that it can be set back after the cudaMalloc is
2445
+ /// performed.
2446
+ if (device_ >= 0) {
2447
+ int current_device;
2448
+ result = cudaGetDevice(&current_device);
2449
+ if (result != cudaSuccess) {
2450
+ return result;
2451
+ }
2452
+
2453
+ if (current_device != device_) {
2454
+ set_device_back_to = current_device;
2455
+ result = cudaSetDevice(device_);
2456
+ if (result != cudaSuccess) {
2457
+ return result;
2458
+ }
2459
+ }
2460
+ }
2461
+
2462
+ // This performs the cudaMalloc
2463
+ result = cudaMalloc(ptr, size);
2464
+ if (result != cudaSuccess) {
2465
+ return result;
2466
+ }
2467
+
2468
+ /// When needed this sets the device back to what it was when the function was
2469
+ /// called.
2470
+ if (set_device_back_to != -1) {
2471
+ result = cudaSetDevice(set_device_back_to);
2472
+ if (result != cudaSuccess) {
2473
+ return result;
2474
+ }
2475
+ }
2476
+
2477
+ return cudaSuccess;
2478
+ }
2479
+
2480
+ /////////////////////////////////////////////////////////////////////////////////////////////////
2481
+
2482
+ } // namespace profiler
2483
+ } // namespace cutlass
sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/cutlass/tools/profiler/src/device_context.cu ADDED
@@ -0,0 +1,245 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ * SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice, this
9
+ * list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holder nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
+ *
30
+ **************************************************************************************************/
31
+ /* \file
32
+ \brief
33
+ */
34
+
35
+ #include "cutlass/profiler/device_context.h"
36
+
37
+ namespace cutlass {
38
+ namespace profiler {
39
+
40
+ /////////////////////////////////////////////////////////////////////////////////////////////////
41
+
42
+ /// Allocates memory of a given type, capacity (elements), and name
43
+ DeviceAllocation *DeviceContext::allocate_block(
44
+ Options const &options,
45
+ std::string const &name,
46
+ library::NumericTypeID type,
47
+ size_t capacity,
48
+ size_t device_index) {
49
+
50
+ int device = options.device.device_id(device_index);
51
+ device_memory_.emplace_back(type, capacity, device);
52
+ DeviceAllocation *allocation = &device_memory_.back();
53
+
54
+ allocations_[name] = allocation;
55
+ return allocation;
56
+ }
57
+
58
+ /// Allocates memory of a given type, capacity (elements), and name
59
+ DeviceAllocation *DeviceContext::allocate_tensor(
60
+ Options const &options,
61
+ std::string const &name,
62
+ library::NumericTypeID type,
63
+ library::LayoutTypeID layout_id,
64
+ std::vector<int> const &extent,
65
+ std::vector<int64_t> const &stride,
66
+ int batch_count,
67
+ size_t device_index) {
68
+
69
+ int device = options.device.device_id(device_index);
70
+ device_memory_.emplace_back(type, layout_id, extent, stride, batch_count,
71
+ device);
72
+ DeviceAllocation *allocation = &device_memory_.back();
73
+
74
+ allocations_[name] = allocation;
75
+ return allocation;
76
+ }
77
+
78
+ /// Allocates memory of a given type, capacity (elements), and name
79
+ DeviceAllocation *DeviceContext::allocate_and_initialize_tensor(
80
+ Options const &options,
81
+ std::string const &name,
82
+ library::NumericTypeID type,
83
+ library::LayoutTypeID layout_id,
84
+ std::vector<int> const &extent,
85
+ std::vector<int64_t> const &stride,
86
+ int batch_count,
87
+ int seed_shift,
88
+ size_t device_index) {
89
+
90
+ DeviceAllocation *allocation =
91
+ allocate_tensor(options, name, type, layout_id, extent, stride,
92
+ batch_count, device_index);
93
+
94
+ if (options.initialization.enabled) {
95
+ Distribution data_distribution = options.initialization.data_distribution;
96
+
97
+ // check if data distribution is allowed to change
98
+ if(!options.initialization.fix_data_distribution) {
99
+ // change data distribution based on bit width
100
+ switch(type) {
101
+ case library::NumericTypeID::kFE4M3:
102
+ data_distribution.set_uniform(-1, 1, 0);
103
+ break;
104
+ case library::NumericTypeID::kFE5M2:
105
+ data_distribution.set_uniform(-1, 1, 0);
106
+ break;
107
+ case library::NumericTypeID::kF16:
108
+ data_distribution.set_uniform(-3, 3, 0);
109
+ break;
110
+ case library::NumericTypeID::kB1:
111
+ data_distribution.set_uniform(0, 1, 0);
112
+ break;
113
+ case library::NumericTypeID::kS2:
114
+ data_distribution.set_uniform(-1, 1, 0);
115
+ break;
116
+ case library::NumericTypeID::kS4:
117
+ data_distribution.set_uniform(-2, 2, 0);
118
+ break;
119
+ case library::NumericTypeID::kU2:
120
+ data_distribution.set_uniform(0, 2, 0);
121
+ break;
122
+ case library::NumericTypeID::kU4:
123
+ data_distribution.set_uniform(0, 2, 0);
124
+ break;
125
+ case library::NumericTypeID::kS8:
126
+ data_distribution.set_uniform(-3, 3, 0);
127
+ break;
128
+ case library::NumericTypeID::kU8:
129
+ data_distribution.set_uniform(0, 4, 0);
130
+ break;
131
+ default: break;
132
+ }
133
+ }
134
+
135
+ // Override pnz for the A/B/C tensors if overridden for Gaussian distributions
136
+ if (data_distribution.kind == Distribution::Gaussian) {
137
+ double mean = data_distribution.gaussian.mean;
138
+ double stddev = data_distribution.gaussian.stddev;
139
+ int scale = data_distribution.int_scale;
140
+
141
+ if (name == "A" && data_distribution.gaussian.pnzA != 1.0) {
142
+ data_distribution.set_gaussian(mean, stddev, scale, data_distribution.gaussian.pnzA);
143
+ }
144
+ else if (name == "B" && data_distribution.gaussian.pnzB != 1.0) {
145
+ data_distribution.set_gaussian(mean, stddev, scale, data_distribution.gaussian.pnzB);
146
+ }
147
+ else if (name == "C" && data_distribution.gaussian.pnzC != 1.0) {
148
+ data_distribution.set_gaussian(mean, stddev, scale, data_distribution.gaussian.pnzC);
149
+ }
150
+ }
151
+
152
+ if (options.initialization.provider == library::Provider::kReferenceDevice) {
153
+ if (data_distribution.kind == Distribution::Sequential) {
154
+ allocation->initialize_sequential_device(
155
+ data_distribution);
156
+ }
157
+ else {
158
+ allocation->initialize_random_device(
159
+ options.initialization.seed + seed_shift,
160
+ data_distribution);
161
+ }
162
+ }
163
+ else if (options.initialization.provider == library::Provider::kReferenceHost) {
164
+ if (data_distribution.kind == Distribution::Sequential) {
165
+ allocation->initialize_sequential_host(
166
+ data_distribution);
167
+ }
168
+ else {
169
+ allocation->initialize_random_host(
170
+ options.initialization.seed + seed_shift,
171
+ data_distribution);
172
+ }
173
+ }
174
+ }
175
+
176
+ return allocation;
177
+ }
178
+
179
+ /// Allocates memory for sparse meta data
180
+ DeviceAllocation *DeviceContext::allocate_and_initialize_sparsemeta_tensor(
181
+ Options const &options,
182
+ std::string const &name,
183
+ library::NumericTypeID type,
184
+ library::LayoutTypeID layout_id,
185
+ library::NumericTypeID type_a,
186
+ std::vector<int> const &extent,
187
+ std::vector<int64_t> const &stride,
188
+ int batch_count,
189
+ int seed_shift,
190
+ size_t device_index) {
191
+
192
+ DeviceAllocation *allocation =
193
+ allocate_tensor(options, name, type, layout_id, extent, stride,
194
+ batch_count, device_index);
195
+
196
+ if (options.initialization.enabled) {
197
+ // TF32 has 4bit meta data. The rest has 2bit.
198
+ int MetaSizeInBits = (cutlass::library::sizeof_bits(type_a) == 32) ? 4 : 2;
199
+
200
+ if (options.initialization.provider == library::Provider::kReferenceDevice) {
201
+ allocation->initialize_random_sparsemeta_device(
202
+ options.initialization.seed + seed_shift,
203
+ MetaSizeInBits);
204
+ }
205
+ else if (options.initialization.provider == library::Provider::kReferenceHost) {
206
+ allocation->initialize_random_sparsemeta_host(
207
+ options.initialization.seed + seed_shift,
208
+ MetaSizeInBits);
209
+ }
210
+ }
211
+
212
+ return allocation;
213
+ }
214
+ /// Clears named allocations (but does not necessarily free memory)
215
+ void DeviceContext::clear() {
216
+ allocations_.clear();
217
+ }
218
+
219
+ /// Frees all device memory allocations
220
+ void DeviceContext::free() {
221
+ allocations_.clear();
222
+ device_memory_.clear();
223
+ }
224
+
225
+ /// Gets the allocation by name
226
+ DeviceAllocation &DeviceContext::at(std::string const &name) {
227
+ return *allocations_.at(name);
228
+ }
229
+
230
+ size_t DeviceContext::size() const {
231
+ return allocations_.size();
232
+ }
233
+
234
+ DeviceContext::AllocationMap::iterator DeviceContext::begin() {
235
+ return allocations_.begin();
236
+ }
237
+
238
+ DeviceContext::AllocationMap::iterator DeviceContext::end() {
239
+ return allocations_.end();
240
+ }
241
+
242
+ /////////////////////////////////////////////////////////////////////////////////////////////////
243
+
244
+ } // namespace profiler
245
+ } // namespace cutlass
sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/cutlass/tools/profiler/src/enumerated_types.cpp ADDED
@@ -0,0 +1,275 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ * SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice, this
9
+ * list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holder nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
+ *
30
+ **************************************************************************************************/
31
+ /* \file
32
+ \brief Provides several functions for filling tensors with data.
33
+ */
34
+
35
+ #include "cutlass/profiler/enumerated_types.h"
36
+
37
+ namespace cutlass {
38
+ namespace profiler {
39
+
40
+ /////////////////////////////////////////////////////////////////////////////////////////////////
41
+
42
+ static struct {
43
+ char const *text;
44
+ char const *pretty;
45
+ ExecutionMode enumerant;
46
+ }
47
+ ExecutionMode_enumerants[] = {
48
+ {"profile", "Profile", ExecutionMode::kProfile},
49
+ {"dry_run", "Dry run", ExecutionMode::kDryRun},
50
+ {"dry", "dry run", ExecutionMode::kDryRun},
51
+ {"trace", "Trace", ExecutionMode::kTrace},
52
+ {"enumerate", "Enumerate", ExecutionMode::kEnumerate}
53
+ };
54
+
55
+ /// Converts a ExecutionMode enumerant to a string
56
+ char const *to_string(ExecutionMode mode, bool pretty) {
57
+
58
+ for (auto const & possible : ExecutionMode_enumerants) {
59
+ if (mode == possible.enumerant) {
60
+ if (pretty) {
61
+ return possible.pretty;
62
+ }
63
+ else {
64
+ return possible.text;
65
+ }
66
+ }
67
+ }
68
+
69
+ return pretty ? "Invalid" : "invalid";
70
+ }
71
+
72
+ /// Parses a ExecutionMode enumerant from a string
73
+ template <>
74
+ ExecutionMode from_string<ExecutionMode>(std::string const &str) {
75
+
76
+ for (auto const & possible : ExecutionMode_enumerants) {
77
+ if ((str.compare(possible.text) == 0) ||
78
+ (str.compare(possible.pretty) == 0)) {
79
+ return possible.enumerant;
80
+ }
81
+ }
82
+
83
+ return ExecutionMode::kInvalid;
84
+ }
85
+
86
+ /////////////////////////////////////////////////////////////////////////////////////////////////
87
+
88
+ static struct {
89
+ char const *text;
90
+ char const *pretty;
91
+ AlgorithmMode enumerant;
92
+ }
93
+ AlgorithmMode_enumerants[] = {
94
+ {"matching", "Matching", AlgorithmMode::kMatching},
95
+ {"best", "Best", AlgorithmMode::kBest},
96
+ {"default", "Default", AlgorithmMode::kDefault}
97
+ };
98
+
99
+ /// Converts a ExecutionMode enumerant to a string
100
+ char const *to_string(AlgorithmMode mode, bool pretty) {
101
+
102
+ for (auto const & possible : AlgorithmMode_enumerants) {
103
+ if (mode == possible.enumerant) {
104
+ if (pretty) {
105
+ return possible.pretty;
106
+ }
107
+ else {
108
+ return possible.text;
109
+ }
110
+ }
111
+ }
112
+
113
+ return pretty ? "Invalid" : "invalid";
114
+ }
115
+
116
+ /// Parses a ExecutionMode enumerant from a string
117
+ template <>
118
+ AlgorithmMode from_string<AlgorithmMode>(std::string const &str) {
119
+
120
+ for (auto const & possible : AlgorithmMode_enumerants) {
121
+ if ((str.compare(possible.text) == 0) ||
122
+ (str.compare(possible.pretty) == 0)) {
123
+ return possible.enumerant;
124
+ }
125
+ }
126
+
127
+ return AlgorithmMode::kInvalid;
128
+ }
129
+
130
+ /////////////////////////////////////////////////////////////////////////////////////////////////
131
+
132
+ static struct {
133
+ char const *text;
134
+ char const *pretty;
135
+ Disposition enumerant;
136
+ }
137
+ Disposition_enumerants[] = {
138
+ {"passed", "Passed", Disposition::kPassed},
139
+ {"failed", "Failed", Disposition::kFailed},
140
+ {"not_run", "Not run", Disposition::kNotRun},
141
+ {"not_verified", "Not verified", Disposition::kNotVerified},
142
+ {"invalid_problem", "Invalid problem", Disposition::kInvalidProblem},
143
+ {"not_supported", "Not supported", Disposition::kNotSupported},
144
+ {"incorrect", "Incorrect", Disposition::kIncorrect}
145
+ };
146
+
147
+ /// Converts a Disposition enumerant to a string
148
+ char const *to_string(Disposition disposition, bool pretty) {
149
+
150
+ for (auto const & possible : Disposition_enumerants) {
151
+ if (disposition == possible.enumerant) {
152
+ if (pretty) {
153
+ return possible.pretty;
154
+ }
155
+ else {
156
+ return possible.text;
157
+ }
158
+ }
159
+ }
160
+
161
+ return pretty ? "Invalid" : "invalid";
162
+ }
163
+
164
+ /// Parses a Disposition enumerant from a string
165
+ template <>
166
+ Disposition from_string<Disposition>(std::string const &str) {
167
+
168
+ for (auto const & possible : Disposition_enumerants) {
169
+ if ((str.compare(possible.text) == 0) ||
170
+ (str.compare(possible.pretty) == 0)) {
171
+ return possible.enumerant;
172
+ }
173
+ }
174
+
175
+ return Disposition::kInvalid;
176
+ }
177
+
178
+ /////////////////////////////////////////////////////////////////////////////////////////////////
179
+
180
+ static struct {
181
+ char const *text;
182
+ char const *pretty;
183
+ SaveWorkspace enumerant;
184
+ }
185
+ SaveWorkspace_enumerants[] = {
186
+ {"never", "Never", SaveWorkspace::kNever},
187
+ {"incorrect", "Incorrect", SaveWorkspace::kIncorrect},
188
+ {"always", "Always", SaveWorkspace::kAlways}
189
+ };
190
+
191
+ /// Converts a SaveWorkspace enumerant to a string
192
+ char const *to_string(SaveWorkspace save_option, bool pretty) {
193
+
194
+ for (auto const & possible : SaveWorkspace_enumerants) {
195
+ if (save_option == possible.enumerant) {
196
+ if (pretty) {
197
+ return possible.pretty;
198
+ }
199
+ else {
200
+ return possible.text;
201
+ }
202
+ }
203
+ }
204
+
205
+ return pretty ? "Invalid" : "invalid";
206
+ }
207
+
208
+ /// Parses a SaveWorkspace enumerant from a string
209
+ template <>
210
+ SaveWorkspace from_string<SaveWorkspace>(std::string const &str) {
211
+
212
+ for (auto const & possible : SaveWorkspace_enumerants) {
213
+ if ((str.compare(possible.text) == 0) ||
214
+ (str.compare(possible.pretty) == 0)) {
215
+ return possible.enumerant;
216
+ }
217
+ }
218
+
219
+ return SaveWorkspace::kInvalid;
220
+ }
221
+
222
+ /////////////////////////////////////////////////////////////////////////////////////////////////
223
+
224
+ static struct {
225
+ char const *text;
226
+ char const *pretty;
227
+ ArgumentTypeID enumerant;
228
+ }
229
+ ArgumentTypeID_enumerants[] = {
230
+ {"scalar", "Scalar", ArgumentTypeID::kScalar},
231
+ {"int", "Integer", ArgumentTypeID::kInteger},
232
+ {"tensor", "Tensor", ArgumentTypeID::kTensor},
233
+ {"batched_tensor", "BatchedTensor", ArgumentTypeID::kBatchedTensor},
234
+ {"struct", "Struct", ArgumentTypeID::kStructure},
235
+ {"enum", "Enumerated type", ArgumentTypeID::kEnumerated}
236
+ };
237
+
238
+ /// Converts a ArgumentTypeID enumerant to a string
239
+ char const *to_string(ArgumentTypeID type, bool pretty) {
240
+
241
+ for (auto const & possible : ArgumentTypeID_enumerants) {
242
+ if (type == possible.enumerant) {
243
+ if (pretty) {
244
+ return possible.pretty;
245
+ }
246
+ else {
247
+ return possible.text;
248
+ }
249
+ }
250
+ }
251
+
252
+ return pretty ? "Invalid" : "invalid";
253
+ }
254
+
255
+ /// Parses a ArgumentTypeID enumerant from a string
256
+ template <>
257
+ ArgumentTypeID from_string<ArgumentTypeID>(std::string const &str) {
258
+
259
+ for (auto const & possible : ArgumentTypeID_enumerants) {
260
+ if ((str.compare(possible.text) == 0) ||
261
+ (str.compare(possible.pretty) == 0)) {
262
+ return possible.enumerant;
263
+ }
264
+ }
265
+
266
+ return ArgumentTypeID::kInvalid;
267
+ }
268
+
269
+ /////////////////////////////////////////////////////////////////////////////////////////////////
270
+
271
+ } // namespace profiler
272
+ } // namespace cutlass
273
+
274
+ /////////////////////////////////////////////////////////////////////////////////////////////////
275
+
sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/cutlass/tools/profiler/src/gemm_operation_profiler.cu ADDED
@@ -0,0 +1,1298 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ * SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice, this
9
+ * list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holder nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
+ *
30
+ **************************************************************************************************/
31
+ /* \file
32
+ \brief Execution environment
33
+ */
34
+
35
+ #include <iostream>
36
+ #include <stdexcept>
37
+ #include <iomanip>
38
+ #include <ios>
39
+ #include <vector>
40
+
41
+ #include "cutlass/core_io.h"
42
+ #include <cuda_runtime_api.h>
43
+
44
+ #include "cutlass/profiler/cublas_helpers.h"
45
+ #include "cutlass/profiler/gemm_operation_profiler.h"
46
+ #include "cutlass/profiler/gpu_timer.h"
47
+ #include "cutlass/library/singleton.h"
48
+ #include "cutlass/library/library.h"
49
+ #include "cutlass/library/handle.h"
50
+ /////////////////////////////////////////////////////////////////////////////////////////////////
51
+
52
+ namespace cutlass {
53
+ namespace profiler {
54
+
55
+
56
+ /////////////////////////////////////////////////////////////////////////////////////////////////
57
+
58
+ /// Ctor
59
+ GemmOperationProfiler::GemmOperationProfiler(Options const &options):
60
+ OperationProfiler(
61
+ options,
62
+ library::OperationKind::kGemm,
63
+ {
64
+ {ArgumentTypeID::kEnumerated, {"gemm_kind"}, "Variant of GEMM (universal, gemm, planar_complex, planar_complex_array)"},
65
+ {ArgumentTypeID::kInteger, {"m", "problem-size::m"}, "M dimension of the GEMM problem space"},
66
+ {ArgumentTypeID::kInteger, {"n", "problem-size::n"}, "N dimension of the GEMM problem space"},
67
+ {ArgumentTypeID::kInteger, {"k", "problem-size::k"}, "K dimension of the GEMM problem space"},
68
+ {ArgumentTypeID::kTensor, {"A"}, "Tensor storing the A operand"},
69
+ {ArgumentTypeID::kTensor, {"B"}, "Tensor storing the B operand"},
70
+ {ArgumentTypeID::kTensor, {"C"}, "Tensor storing the C operand"},
71
+ {ArgumentTypeID::kTensor, {"D"}, "Tensor storing the D output"},
72
+ {ArgumentTypeID::kScalar, {"alpha", "epilogue::alpha"}, "Epilogue scalar alpha"},
73
+ {ArgumentTypeID::kScalar, {"beta", "epilogue::beta"}, "Epilogue scalar beta"},
74
+ {ArgumentTypeID::kEnumerated, {"split_k_mode", "split-k-mode"}, "Variant of split K mode(serial, parallel)"},
75
+ {ArgumentTypeID::kInteger, {"split_k_slices", "split-k-slices"}, "Number of partitions of K dimension"},
76
+ {ArgumentTypeID::kInteger, {"batch_count", "batch-count"}, "Number of GEMMs computed in one batch"},
77
+ {ArgumentTypeID::kEnumerated, {"raster_order", "raster-order"}, "Raster order (heuristic, along_n, along_m)"},
78
+ {ArgumentTypeID::kInteger, {"swizzle_size", "swizzle-size"}, "Size to swizzle"},
79
+ },
80
+ { library::Provider::kCUBLAS}
81
+ ) {
82
+
83
+ description_ = " General matrix-matrix product. D = alpha * A*B + beta * C";
84
+ }
85
+
86
+ /// Destructor
87
+ GemmOperationProfiler::~GemmOperationProfiler() {
88
+
89
+ }
90
+
91
+ /// Prints usage statement for the math function
92
+ void GemmOperationProfiler::print_usage(std::ostream &out) const {
93
+ out << "GEMM" << "\n\n";
94
+
95
+ OperationProfiler::print_usage(out);
96
+ }
97
+
98
+ /// Prints examples
99
+ void GemmOperationProfiler::print_examples(std::ostream &out) const {
100
+
101
+ out << "\nExamples:\n\n"
102
+ << "Profile a particular problem size:\n"
103
+ << " $ cutlass_profiler --operation=Gemm --m=1024 --n=1024 --k=128\n\n"
104
+
105
+ << "Schmoo over problem size and beta:\n"
106
+ << " $ cutlass_profiler --operation=Gemm --m=1024:4096:256 --n=1024:4096:256 --k=128:8192:128 --beta=0,1,2.5\n\n"
107
+
108
+ << "Schmoo over accumulator types:\n"
109
+ << " $ cutlass_profiler --operation=Gemm --accumulator-type=f16,f32\n\n"
110
+
111
+ << "Run when A is f16 with column-major and B is any datatype with row-major (For column major, use column, col, or n. For row major use, row or t):\n"
112
+ << " $ cutlass_profiler --operation=Gemm --A=f16:column --B=*:row\n\n"
113
+
114
+ << "Profile a particular problem size with split K and parallel reduction:\n"
115
+ << " $ cutlass_profiler --operation=Gemm --split_k_mode=parallel --split_k_slices=2 --m=1024 --n=1024 --k=128\n\n"
116
+
117
+ << "Using various input value distribution:\n"
118
+ << " $ cutlass_profiler --operation=Gemm --dist=uniform,min:0,max:3\n"
119
+ << " $ cutlass_profiler --operation=Gemm --dist=gaussian,mean:0,stddev:3\n"
120
+ << " $ cutlass_profiler --operation=Gemm --dist=sequential,start:0,delta:1\n\n"
121
+
122
+ << "Run a kernel with cta tile size of 256x128x32 and save workspace if results are incorrect (note that --cta-tile::k=32 is default cta-tile size):\n"
123
+ << " $ cutlass_profiler --operation=Gemm --cta_m=256 --cta_n=128 --cta_k=32 --save-workspace=incorrect\n\n"
124
+
125
+ << "Test your changes to gemm kernels with a quick functional test and save results in functional-test.csv:\n"
126
+ << " $ cutlass_profiler --operation=Gemm \\ \n"
127
+ << " --m=8,56,120,136,256,264,512,520,1024,1032,4096,8192,16384 \\ \n"
128
+ << " --n=8,56,120,136,256,264,512,520,1024,1032,4096,8192,16384 \\ \n"
129
+ << " --k=8,16,32,64,128,256,288,384,504,512,520 \\ \n"
130
+ << " --beta=0,1,2 --profiling-iterations=1 \\ \n"
131
+ << " --providers=cutlass --output=functional-test.csv\n\n";
132
+ }
133
+
134
+ /////////////////////////////////////////////////////////////////////////////////////////////////
135
+
136
+ #if 0
137
+ // used this for debugging
138
+ static std::string byte_string(std::vector<uint8_t> const &bytes) {
139
+ std::stringstream ss;
140
+
141
+ ss << "0x";
142
+
143
+ for (size_t idx = bytes.size(); idx > 0; --idx) {
144
+ ss << std::hex << std::setw(2) << std::setfill('0') << uint32_t(bytes.at(idx - 1));
145
+ }
146
+
147
+ return ss.str();
148
+ }
149
+ #endif
150
+
151
+ Status GemmOperationProfiler::GemmProblem::parse(
152
+ library::GemmDescription const &operation_desc,
153
+ ProblemSpace const &problem_space,
154
+ ProblemSpace::Problem const &problem) {
155
+
156
+ this->mode = library::GemmUniversalMode::kGemm;
157
+
158
+ if (!arg_as_int(this->m, "m", problem_space, problem)) {
159
+ // default value
160
+ this->m = 1024;
161
+ }
162
+
163
+ if (!arg_as_int(this->n, "n", problem_space, problem)) {
164
+ // default value
165
+ this->n = 1024;
166
+ }
167
+
168
+ if (!arg_as_int(this->k, "k", problem_space, problem)) {
169
+ // default value
170
+ this->k = 1024;
171
+ }
172
+
173
+ if (!arg_as_SplitKModeID(this->split_k_mode, "split_k_mode", problem_space, problem)) {
174
+ // default value
175
+ this->split_k_mode = library::SplitKMode::kSerial;
176
+ }
177
+
178
+ this->mode = library::GemmUniversalMode::kGemm;
179
+ if (this->split_k_mode == library::SplitKMode::kParallel) {
180
+ this->mode = library::GemmUniversalMode::kGemmSplitKParallel;
181
+ }
182
+
183
+ if (!arg_as_int(this->split_k_slices, "split_k_slices", problem_space, problem)) {
184
+ // default value
185
+ this->split_k_slices = 1;
186
+ }
187
+
188
+ if (!arg_as_int(this->batch_count, "batch_count", problem_space, problem)) {
189
+ // default value
190
+ this->batch_count = 1;
191
+ } else if (this->batch_count > 1) {
192
+ this->mode = library::GemmUniversalMode::kBatched;
193
+ }
194
+
195
+ if (!arg_as_int(this->swizzle_size, "swizzle_size", problem_space, problem)) {
196
+ // default value
197
+ this->swizzle_size = 1;
198
+ if (this->swizzle_size <= 0) {
199
+ return Status::kErrorInvalidProblem;
200
+ }
201
+ }
202
+
203
+ if (!arg_as_RasterOrder(this->raster_order, "raster_order", problem_space, problem)) {
204
+ // default value
205
+ this->raster_order = library::RasterOrder::kHeuristic;
206
+ }
207
+
208
+ if (this->split_k_slices > 1 && this->batch_count > 1) {
209
+ // At least one of these must be one
210
+ return Status::kErrorInvalidProblem;
211
+ }
212
+
213
+ if (!tensor_description_satisfies(operation_desc.A, "A", problem_space, problem)) {
214
+ return Status::kErrorInvalidProblem;
215
+ }
216
+
217
+ if (!tensor_description_satisfies(operation_desc.B, "B", problem_space, problem)) {
218
+ return Status::kErrorInvalidProblem;
219
+ }
220
+
221
+ if (!tensor_description_satisfies(operation_desc.C, "C", problem_space, problem)) {
222
+ return Status::kErrorInvalidProblem;
223
+ }
224
+
225
+ if (!tensor_description_satisfies(operation_desc.D, "D", problem_space, problem)) {
226
+ return Status::kErrorInvalidProblem;
227
+ }
228
+
229
+ if (!arg_as_scalar(
230
+ this->alpha,
231
+ operation_desc.element_epilogue,
232
+ "alpha",
233
+ problem_space,
234
+ problem)) {
235
+
236
+ if (!cast_from_double(this->alpha, operation_desc.element_epilogue, 1)) {
237
+ return Status::kErrorInternal;
238
+ }
239
+ }
240
+
241
+ if (!arg_as_scalar(
242
+ this->beta,
243
+ operation_desc.element_epilogue,
244
+ "beta",
245
+ problem_space,
246
+ problem)) {
247
+
248
+ if (!cast_from_double(this->beta, operation_desc.element_epilogue, 0)) {
249
+ return Status::kErrorInternal;
250
+ }
251
+ }
252
+
253
+ this->lda = DeviceAllocation::get_packed_layout(
254
+ operation_desc.A.layout, {int(this->m), int(this->k)}).front();
255
+
256
+ this->ldb = DeviceAllocation::get_packed_layout(
257
+ operation_desc.B.layout, {int(this->k), int(this->n)}).front();
258
+
259
+ this->ldc = DeviceAllocation::get_packed_layout(
260
+ operation_desc.C.layout, {int(this->m), int(this->n)}).front();
261
+
262
+ return Status::kSuccess;
263
+ }
264
+
265
+ /// Total number of bytes loaded
266
+ int64_t GemmOperationProfiler::GemmProblem::bytes(library::GemmDescription const &operation_desc) const {
267
+ // Input bytes read and Output bytes written for the gemm problem
268
+ int64_t bytes =
269
+ int64_t(library::sizeof_bits(operation_desc.A.element) * m / 8) * k +
270
+ int64_t(library::sizeof_bits(operation_desc.B.element) * n / 8) * k +
271
+ int64_t(library::sizeof_bits(operation_desc.C.element) * m / 8) * n;
272
+
273
+ // Set is_beta_zero true if beta is zero
274
+ bool is_beta_zero = std::all_of(beta.begin(), beta.end(), [](uint8_t i) { return i==0; });
275
+
276
+ // Output bytes read for the gemm problem for non-zero beta values
277
+ if (!is_beta_zero) {
278
+ bytes += int64_t(library::sizeof_bits(operation_desc.C.element) * m / 8) * n;
279
+ }
280
+
281
+ bytes *= batch_count;
282
+
283
+ return bytes;
284
+ }
285
+
286
+ /// Total number of flops computed
287
+ int64_t GemmOperationProfiler::GemmProblem::flops(library::GemmDescription const &operation_desc) const {
288
+ int64_t flops_ = (int64_t(m) * n * k + m * n) * 2 * batch_count;
289
+
290
+ // complex-valued support
291
+ switch (operation_desc.tile_description.math_instruction.math_operation) {
292
+ case library::MathOperationID::kMultiplyAddComplex:
293
+ flops_ *= 4;
294
+ break;
295
+
296
+ case library::MathOperationID::kMultiplyAddComplexFastF32:
297
+ flops_ *= 4;
298
+ break;
299
+
300
+ case library::MathOperationID::kMultiplyAddGaussianComplex:
301
+ flops_ *= 3;
302
+ break;
303
+
304
+ default: break;
305
+ }
306
+
307
+ return flops_;
308
+ }
309
+
310
+
311
+ /// Initializes a performance result
312
+ void GemmOperationProfiler::GemmProblem::initialize_result(
313
+ PerformanceResult &result,
314
+ library::GemmDescription const &operation_desc,
315
+ ProblemSpace const &problem_space) {
316
+
317
+ result.arguments.resize(problem_space.rank());
318
+
319
+ set_argument(result, "gemm_kind", problem_space, library::to_string(operation_desc.gemm_kind));
320
+
321
+ set_argument(result, "A", problem_space,
322
+ std::string(library::to_string(operation_desc.A.element)) + ":" + library::to_string(operation_desc.A.layout));
323
+
324
+ set_argument(result, "B", problem_space,
325
+ std::string(library::to_string(operation_desc.B.element)) + ":" + library::to_string(operation_desc.B.layout));
326
+
327
+ set_argument(result, "C", problem_space,
328
+ std::string(library::to_string(operation_desc.C.element)) + ":" + library::to_string(operation_desc.C.layout));
329
+
330
+ set_argument(result, "D", problem_space,
331
+ std::string(library::to_string(operation_desc.D.element)) + ":" + library::to_string(operation_desc.D.layout));
332
+
333
+ set_argument(result, "m", problem_space, m);
334
+ set_argument(result, "n", problem_space, n);
335
+ set_argument(result, "k", problem_space, k);
336
+
337
+ set_argument(result, "split_k_mode", problem_space, library::to_string(split_k_mode));
338
+ set_argument(result, "split_k_slices", problem_space, split_k_slices);
339
+ set_argument(result, "batch_count", problem_space, batch_count);
340
+ set_argument(result, "raster_order", problem_space, library::to_string(raster_order));
341
+ set_argument(result, "swizzle_size", problem_space, swizzle_size);
342
+
343
+ set_argument(result, "alpha", problem_space,
344
+ library::lexical_cast(alpha, operation_desc.element_epilogue));
345
+
346
+ set_argument(result, "beta", problem_space,
347
+ library::lexical_cast(beta, operation_desc.element_epilogue));
348
+ }
349
+
350
+ /////////////////////////////////////////////////////////////////////////////////////////////////
351
+
352
+ /// Extracts the problem dimensions
353
+ Status GemmOperationProfiler::initialize_configuration(
354
+ Options const &options,
355
+ PerformanceReport &report,
356
+ DeviceContext &device_context,
357
+ library::Operation const *operation,
358
+ ProblemSpace const &problem_space,
359
+ ProblemSpace::Problem const &problem) {
360
+
361
+ library::GemmDescription const &operation_desc =
362
+ static_cast<library::GemmDescription const &>(operation->description());
363
+
364
+ if (operation_desc.gemm_kind != library::GemmKind::kUniversal) {
365
+ return Status::kErrorInvalidProblem;
366
+ }
367
+
368
+ Status status = problem_.parse(operation_desc, problem_space, problem);
369
+
370
+ if (status != Status::kSuccess) {
371
+ return status;
372
+ }
373
+
374
+ gemm_workspace_.configuration.mode = problem_.mode;
375
+ gemm_workspace_.configuration.problem_size.m() = int(problem_.m);
376
+ gemm_workspace_.configuration.problem_size.n() = int(problem_.n);
377
+ gemm_workspace_.configuration.problem_size.k() = int(problem_.k);
378
+ gemm_workspace_.configuration.lda = problem_.lda;
379
+ gemm_workspace_.configuration.ldb = problem_.ldb;
380
+ gemm_workspace_.configuration.ldc = problem_.ldc;
381
+ gemm_workspace_.configuration.ldd = problem_.ldc;
382
+
383
+ if (problem_.mode == library::GemmUniversalMode::kBatched) {
384
+ gemm_workspace_.configuration.batch_count = problem_.batch_count;
385
+ }
386
+ else {
387
+ gemm_workspace_.configuration.batch_count = problem_.split_k_slices;
388
+ }
389
+
390
+ gemm_workspace_.arguments.A = nullptr;
391
+ gemm_workspace_.arguments.B = nullptr;
392
+ gemm_workspace_.arguments.C = nullptr;
393
+ gemm_workspace_.arguments.D = nullptr;
394
+ gemm_workspace_.arguments.alpha = problem_.alpha.data();
395
+ gemm_workspace_.arguments.beta = problem_.beta.data();
396
+ gemm_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
397
+ gemm_workspace_.arguments.swizzle_size = problem_.swizzle_size;
398
+ gemm_workspace_.arguments.raster_order = problem_.raster_order;
399
+ // initialize reduction operation for parallel splitKMode
400
+ if (problem_.split_k_mode == library::SplitKMode::kParallel) {
401
+ if (!initialize_reduction_configuration_(operation, problem)) {
402
+ return Status::kErrorInternal;
403
+ }
404
+ }
405
+
406
+ initialize_result_(this->model_result_, options, operation_desc, problem_space);
407
+
408
+ return operation->can_implement(&gemm_workspace_.configuration, &gemm_workspace_.arguments);
409
+ }
410
+
411
+ /// Initializes the performance result
412
+ void GemmOperationProfiler::initialize_result_(
413
+ PerformanceResult &result,
414
+ Options const &options,
415
+ library::GemmDescription const &operation_desc,
416
+ ProblemSpace const &problem_space) {
417
+
418
+ result.provider = library::Provider::kCUTLASS;
419
+ result.disposition = Disposition::kNotRun;
420
+ result.status = Status::kSuccess;
421
+ result.operation_name = operation_desc.name;
422
+
423
+ problem_.initialize_result(result, operation_desc, problem_space);
424
+
425
+ OperationProfiler::initialize_result_(result, operation_desc, problem_space);
426
+
427
+ result.bytes = problem_.bytes(operation_desc);
428
+ result.flops = problem_.flops(operation_desc);
429
+ result.runtime = 0;
430
+
431
+ }
432
+
433
+ /// Initialize reduction problem dimensions and library::Operation
434
+ bool GemmOperationProfiler::initialize_reduction_configuration_(
435
+ library::Operation const *operation,
436
+ ProblemSpace::Problem const &problem) {
437
+
438
+ library::GemmDescription const &gemm_desc =
439
+ static_cast<library::GemmDescription const&>(operation->description());
440
+
441
+ if (!cast_from_double(problem_.alpha_one, gemm_desc.element_epilogue, 1)) {
442
+ return false;
443
+ }
444
+
445
+ if (!cast_from_double(problem_.beta_zero, gemm_desc.element_epilogue, 0)) {
446
+ return false;
447
+ }
448
+
449
+ /// initialize library::ReductionConfiguration
450
+ gemm_workspace_.reduction_configuration.problem_size = gemm::GemmCoord(int(problem_.n), int(problem_.m), int(problem_.k)).mn();
451
+ gemm_workspace_.reduction_configuration.partitions = int(problem_.split_k_slices);
452
+ gemm_workspace_.reduction_configuration.partition_stride = gemm::GemmCoord(int(problem_.n), int(problem_.m), int(problem_.k)).mn().product();
453
+ gemm_workspace_.reduction_configuration.ldw = problem_.ldc;
454
+ gemm_workspace_.reduction_configuration.lds = problem_.ldc;
455
+ gemm_workspace_.reduction_configuration.ldd = problem_.ldc;
456
+
457
+ // find reduction operation
458
+ library::ReductionFunctionalKey reduction_key(
459
+ library::Provider::kCUTLASS,
460
+ gemm_desc.tile_description.math_instruction.element_accumulator, // element workspace
461
+ gemm_desc.tile_description.math_instruction.element_accumulator, // element accumulator
462
+ gemm_desc.D.element, // element output
463
+ gemm_desc.element_epilogue // element compute
464
+ );
465
+
466
+ auto reduction_it = library::Singleton::get().operation_table.reduction_operations.find(reduction_key);
467
+
468
+ if (reduction_it == library::Singleton::get().operation_table.reduction_operations.end()) {
469
+ return false;
470
+ }
471
+
472
+ // initialize reduction operation required for parallel split-k operator
473
+ reduction_op_ = reduction_it->second;
474
+
475
+ // reduction operation found and initialized
476
+ return true;
477
+ }
478
+
479
+ /// Initializes workspace
480
+ Status GemmOperationProfiler::initialize_workspace(
481
+ Options const &options,
482
+ PerformanceReport &report,
483
+ DeviceContext &device_context,
484
+ library::Operation const *operation,
485
+ ProblemSpace const &problem_space,
486
+ ProblemSpace::Problem const &problem) {
487
+
488
+ if (options.device.devices.size() != 1) {
489
+ throw std::runtime_error("This operation profiler only supports a single "
490
+ "device.");
491
+ }
492
+
493
+ cudaError_t result;
494
+ result = cudaSetDevice(options.device.device_id(0));
495
+ if (result != cudaSuccess) {
496
+ throw std::runtime_error("cudaSetDevice() failed.");
497
+ }
498
+
499
+ library::Operation const* underlying_operation = operation;
500
+
501
+ if (problem_.split_k_mode == library::SplitKMode::kParallel) {
502
+ if (!(underlying_operation = library::find_gemm_operation_for_parallel_reduction(operation))) {
503
+ return Status::kErrorNotSupported;
504
+ }
505
+ }
506
+
507
+ library::GemmDescription const &operation_desc =
508
+ static_cast<library::GemmDescription const &>(operation->description());
509
+
510
+ bool is_sparse = operation_desc.tile_description.math_instruction.opcode_class == cutlass::library::OpcodeClassID::kSparseTensorOp;
511
+
512
+ // Compute the number of copies of the problem to avoid L2 camping.
513
+ if (!options.profiling.workspace_count) {
514
+ int64_t bytes = problem_.bytes(operation_desc);
515
+ if (bytes < 3 * int64_t(options.device.properties[0].l2CacheSize)) {
516
+ gemm_workspace_.problem_count =
517
+ 1 + int((3 * int64_t(options.device.properties[0].l2CacheSize)) / bytes);
518
+ }
519
+ else {
520
+ gemm_workspace_.problem_count = 1;
521
+ }
522
+ }
523
+ else {
524
+ gemm_workspace_.problem_count = options.profiling.workspace_count;
525
+ }
526
+
527
+ bool allocate_device_tensors = options.execution_mode != ExecutionMode::kDryRun;
528
+ if (allocate_device_tensors) {
529
+ int seed_shift = 0;
530
+ gemm_workspace_.A = device_context.allocate_and_initialize_tensor(
531
+ options,
532
+ "A",
533
+ operation_desc.A.element,
534
+ operation_desc.A.layout,
535
+ {int(problem_.m), int(problem_.k)},
536
+ {int(problem_.lda)},
537
+ problem_.batch_count * gemm_workspace_.problem_count,
538
+ seed_shift++,
539
+ 0 // device_index
540
+ );
541
+
542
+ gemm_workspace_.B = device_context.allocate_and_initialize_tensor(
543
+ options,
544
+ "B",
545
+ operation_desc.B.element,
546
+ operation_desc.B.layout,
547
+ {int(problem_.k), int(problem_.n)},
548
+ {int(problem_.ldb)},
549
+ problem_.batch_count * gemm_workspace_.problem_count,
550
+ seed_shift++,
551
+ 0 // device_index
552
+ );
553
+
554
+ gemm_workspace_.C = device_context.allocate_and_initialize_tensor(
555
+ options,
556
+ "C",
557
+ operation_desc.C.element,
558
+ operation_desc.C.layout,
559
+ {int(problem_.m), int(problem_.n)},
560
+ {int(problem_.ldc)},
561
+ problem_.batch_count * gemm_workspace_.problem_count,
562
+ seed_shift++,
563
+ 0 // device_index
564
+ );
565
+
566
+ gemm_workspace_.Computed = device_context.allocate_tensor(
567
+ options,
568
+ "D",
569
+ operation_desc.D.element,
570
+ operation_desc.D.layout,
571
+ {int(problem_.m), int(problem_.n)},
572
+ {int(problem_.ldc)},
573
+ problem_.batch_count * gemm_workspace_.problem_count,
574
+ 0 // device_index
575
+ );
576
+
577
+ gemm_workspace_.Reference = device_context.allocate_tensor(
578
+ options,
579
+ "Reference",
580
+ operation_desc.D.element,
581
+ operation_desc.D.layout,
582
+ {int(problem_.m), int(problem_.n)},
583
+ {int(problem_.ldc)},
584
+ problem_.batch_count * gemm_workspace_.problem_count,
585
+ 0 // device_index
586
+ );
587
+ }
588
+
589
+ if (options.execution_mode != ExecutionMode::kDryRun) {
590
+ // NOTE: the leading non-batch strides are duplicated here for 3.0 API kernels
591
+ gemm_workspace_.arguments.problem_size = {int(problem_.m), int(problem_.n), int(problem_.k)};
592
+ gemm_workspace_.arguments.batch_count = problem_.batch_count;
593
+ gemm_workspace_.arguments.lda = problem_.lda;
594
+ gemm_workspace_.arguments.ldb = problem_.ldb;
595
+ gemm_workspace_.arguments.ldc = problem_.ldc;
596
+ gemm_workspace_.arguments.ldd = problem_.ldc;
597
+ gemm_workspace_.arguments.batch_stride_A = gemm_workspace_.A->batch_stride();
598
+ gemm_workspace_.arguments.batch_stride_B = gemm_workspace_.B->batch_stride();
599
+ gemm_workspace_.arguments.batch_stride_C = gemm_workspace_.C->batch_stride();
600
+ gemm_workspace_.arguments.batch_stride_D = gemm_workspace_.Computed->batch_stride();
601
+
602
+ /* Query device SM count to pass onto the kernel as an argument, where needed */
603
+ gemm_workspace_.arguments.sm_count = options.device.properties[0].multiProcessorCount;
604
+ }
605
+
606
+ //
607
+ // Initialize the CUTLASS operation
608
+ //
609
+ Status status = Status::kSuccess;
610
+
611
+ if (options.profiling.provider_enabled(library::Provider::kCUTLASS)) {
612
+
613
+ if (options.execution_mode != ExecutionMode::kDryRun) {
614
+ uint64_t workspace_size = underlying_operation->get_host_workspace_size(&gemm_workspace_.configuration);
615
+ gemm_workspace_.host_workspace.resize(workspace_size, 0);
616
+
617
+ workspace_size = underlying_operation->get_device_workspace_size(&gemm_workspace_.configuration,
618
+ &gemm_workspace_.arguments);
619
+ if (is_sparse) {
620
+ // sparse gemm get_device_workspace_size() only return device workspace size per iteration
621
+ // Needs to multiply it w/ number of iteration
622
+ workspace_size *= gemm_workspace_.problem_count;
623
+ }
624
+ gemm_workspace_.device_workspace.reset(library::NumericTypeID::kU8, workspace_size);
625
+
626
+ // Convert to structure sparse contents here.
627
+ if (is_sparse) {
628
+ uint8_t* profiler_workspaces[1];
629
+ profiler_workspaces[0] = reinterpret_cast<uint8_t*>(gemm_workspace_.A->data());
630
+ // Sparse operations have a different initialize interface.
631
+ // initialize_with_profiler_workspace converts mxk tensorA to compressed mxk/sp tensorA and the tensorE
632
+ auto modifiable_underlying_op = const_cast<library::Operation*>(underlying_operation);
633
+ status = modifiable_underlying_op->initialize_with_profiler_workspace(
634
+ &gemm_workspace_.configuration,
635
+ gemm_workspace_.host_workspace.data(),
636
+ gemm_workspace_.device_workspace.data(),
637
+ profiler_workspaces,
638
+ gemm_workspace_.problem_count);
639
+ }
640
+ else {
641
+ status = underlying_operation->initialize(
642
+ &gemm_workspace_.configuration,
643
+ gemm_workspace_.host_workspace.data(),
644
+ gemm_workspace_.device_workspace.data());
645
+ }
646
+
647
+ if (status != Status::kSuccess) {
648
+ return status;
649
+ }
650
+
651
+ if (problem_.split_k_mode == library::SplitKMode::kParallel) {
652
+ workspace_size = reduction_op_->get_host_workspace_size(&gemm_workspace_.reduction_configuration);
653
+ gemm_workspace_.reduction_host_workspace.resize(workspace_size, 0);
654
+
655
+ status = reduction_op_->initialize(
656
+ &gemm_workspace_.reduction_configuration,
657
+ gemm_workspace_.reduction_host_workspace.data(),
658
+ nullptr);
659
+
660
+ if (status != Status::kSuccess) {
661
+ return status;
662
+ }
663
+ }
664
+ }
665
+
666
+ //
667
+ // If CUTLASS is enabled, generate a result for it
668
+ //
669
+ results_.push_back(model_result_);
670
+ results_.back().provider = library::Provider::kCUTLASS;
671
+ results_.back().op_kind = library::OperationKind::kGemm;
672
+ results_.back().disposition = Disposition::kNotRun;
673
+
674
+ for (auto provider : verification_providers_) {
675
+ results_.back().verification_map[provider] = Disposition::kNotRun;
676
+ }
677
+ }
678
+ return status;
679
+ }
680
+
681
+ /////////////////////////////////////////////////////////////////////////////////////////////////
682
+
683
+ /// Verifies CUTLASS against references
684
+ bool GemmOperationProfiler::verify_cutlass(
685
+ Options const &options,
686
+ PerformanceReport &report,
687
+ DeviceContext &device_context,
688
+ library::Operation const *operation,
689
+ ProblemSpace const &problem_space,
690
+ ProblemSpace::Problem const &problem) {
691
+
692
+ if (!options.profiling.provider_enabled(library::Provider::kCUTLASS)) {
693
+ return true;
694
+ }
695
+
696
+ if (options.execution_mode == ExecutionMode::kDryRun) {
697
+ return true;
698
+ }
699
+
700
+ // Initialize structure containing GEMM arguments
701
+ gemm_workspace_.arguments.A = gemm_workspace_.A->data();
702
+ gemm_workspace_.arguments.B = gemm_workspace_.B->data();
703
+ gemm_workspace_.arguments.C = gemm_workspace_.C->data();
704
+ gemm_workspace_.arguments.D = gemm_workspace_.Computed->data();
705
+ gemm_workspace_.arguments.alpha = problem_.alpha.data();
706
+ gemm_workspace_.arguments.beta = problem_.beta.data();
707
+ gemm_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
708
+ gemm_workspace_.arguments.batch_stride_A = gemm_workspace_.A->batch_stride();
709
+ gemm_workspace_.arguments.batch_stride_B = gemm_workspace_.B->batch_stride();
710
+ gemm_workspace_.arguments.batch_stride_C = gemm_workspace_.C->batch_stride();
711
+ gemm_workspace_.arguments.batch_stride_D = gemm_workspace_.Computed->batch_stride();
712
+
713
+ if (problem_.split_k_mode == library::SplitKMode::kParallel) {
714
+ gemm_workspace_.arguments.D = gemm_workspace_.device_workspace.data();
715
+ gemm_workspace_.arguments.alpha = problem_.alpha_one.data();
716
+ gemm_workspace_.arguments.beta = problem_.beta_zero.data();
717
+
718
+ gemm_workspace_.reduction_arguments.workspace = gemm_workspace_.device_workspace.data();
719
+ gemm_workspace_.reduction_arguments.source = gemm_workspace_.C->data();
720
+ gemm_workspace_.reduction_arguments.destination = gemm_workspace_.Computed->data();
721
+ gemm_workspace_.reduction_arguments.alpha = problem_.alpha.data();
722
+ gemm_workspace_.reduction_arguments.beta = problem_.beta.data();
723
+ gemm_workspace_.reduction_arguments.pointer_mode = library::ScalarPointerMode::kHost;
724
+ }
725
+
726
+ //
727
+ // Run the CUTLASS operation
728
+ //
729
+
730
+ // initialize gemm underlying operation to handle parallel reduction
731
+ library::Operation const * underlying_operation = operation;
732
+
733
+ if (problem_.split_k_mode == library::SplitKMode::kParallel) {
734
+ if (!(underlying_operation = library::find_gemm_operation_for_parallel_reduction(operation))) {
735
+ results_.back().disposition = Disposition::kFailed;
736
+ return false;
737
+ }
738
+ }
739
+
740
+ results_.back().status = underlying_operation->run(
741
+ &gemm_workspace_.arguments,
742
+ gemm_workspace_.host_workspace.data(),
743
+ gemm_workspace_.device_workspace.data());
744
+
745
+ if (results_.back().status != Status::kSuccess) {
746
+ results_.back().disposition = Disposition::kFailed;
747
+ return false;
748
+ }
749
+
750
+ // Run parallel reduction kernel for parallel split_k_mode
751
+ if (problem_.split_k_mode == library::SplitKMode::kParallel) {
752
+ results_.back().status = reduction_op_->run(
753
+ &gemm_workspace_.reduction_arguments,
754
+ gemm_workspace_.reduction_host_workspace.data(),
755
+ nullptr);
756
+
757
+ if (results_.back().status != Status::kSuccess) {
758
+ results_.back().disposition = Disposition::kFailed;
759
+ return false;
760
+ }
761
+ }
762
+
763
+ cudaError_t result = cudaDeviceSynchronize();
764
+ if (result != cudaSuccess) {
765
+ results_.back().disposition = Disposition::kFailed;
766
+ return false;
767
+ }
768
+
769
+ // CUTLASS op ran the but not yet verified against any verification provider
770
+ results_.back().disposition = Disposition::kNotVerified;
771
+
772
+ //
773
+ // Run verification providers
774
+ //
775
+
776
+ if (options.verification.enabled) {
777
+
778
+ #if CUTLASS_ENABLE_CUBLAS
779
+ if (options.verification.provider_enabled(library::Provider::kCUBLAS)) {
780
+
781
+ // Guard against unsupported cases
782
+ auto const & gemm_desc = static_cast<library::GemmDescription const &>(operation->description());
783
+
784
+ if (cublas_satisfies(gemm_desc) == Status::kSuccess) {
785
+
786
+ // call cublas verification if supported
787
+ verify_with_cublas_(
788
+ options,
789
+ report,
790
+ device_context,
791
+ operation,
792
+ problem_space,
793
+ problem);
794
+ }
795
+
796
+ else {
797
+ // set verification map for cublas to not supported
798
+ results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kNotSupported;
799
+ }
800
+ }
801
+ #endif // #if CUTLASS_ENABLE_CUBLAS
802
+
803
+ library::GemmDescription const &gemm_desc =
804
+ static_cast<library::GemmDescription const &>(operation->description());
805
+
806
+
807
+ cutlass::library::NumericTypeID element_A = gemm_desc.A.element;
808
+ cutlass::library::NumericTypeID element_B = gemm_desc.B.element;
809
+ bool verification_status = verify_with_reference_(options, report, device_context, operation, problem_space, problem, element_A, element_B);
810
+
811
+ // Update disposition to worst case verification outcome among all
812
+ // verification providers which are supported
813
+ bool is_any_verification_run_passed = false;
814
+ for (auto &m : results_.back().verification_map) {
815
+ if (m.second == Disposition::kFailed || m.second == Disposition::kIncorrect) {
816
+ results_.back().disposition = m.second;
817
+ return true;
818
+ }
819
+ if (!is_any_verification_run_passed && m.second == Disposition::kPassed) {
820
+ is_any_verification_run_passed = true;
821
+ }
822
+ }
823
+
824
+ if (is_any_verification_run_passed) {
825
+ results_.back().disposition = Disposition::kPassed;
826
+ }
827
+ }
828
+
829
+ // if verification.required is set, then return success iff at least one ref-check was run
830
+ if (options.verification.required) {
831
+ bool did_any_verification_run = false;
832
+ for (auto provider : options.verification.providers) {
833
+ did_any_verification_run |= (Disposition::kNotRun != results_.back().verification_map[provider]);
834
+ }
835
+
836
+ if (not did_any_verification_run) {
837
+ results_.back().status = Status::kErrorNotSupported;
838
+ return false;
839
+ }
840
+ }
841
+
842
+ // Return true means continue profiling
843
+ return true;
844
+ }
845
+
846
+ ///////////////////////////////////////////////////////////////////////////////////////////////////
847
+
848
+ /// Verifies CUTLASS against references
849
+ bool GemmOperationProfiler::verify_with_cublas_(
850
+ Options const &options,
851
+ PerformanceReport &report,
852
+ DeviceContext &device_context,
853
+ library::Operation const *operation,
854
+ ProblemSpace const &problem_space,
855
+ ProblemSpace::Problem const &problem) {
856
+
857
+ #if CUTLASS_ENABLE_CUBLAS
858
+
859
+ library::GemmDescription const &gemm_desc =
860
+ static_cast<library::GemmDescription const &>(operation->description());
861
+
862
+ //
863
+ // Construct cuBLAS operators
864
+ //
865
+
866
+ CublasLtCreate handle;
867
+ cublasStatus_t status = handle.get_cublaslt_create_status();
868
+
869
+ if (status != CUBLAS_STATUS_SUCCESS) {
870
+ results_.back().verification_map[library::Provider::kCUBLAS] = get_cutlass_disposition(status);
871
+ return true;
872
+ }
873
+
874
+
875
+ //
876
+ // Initialize state
877
+ //
878
+
879
+ try {
880
+
881
+ //
882
+ // Construct dispatcher to cublasGemmEx()
883
+ //
884
+
885
+ // Initialize structure containing GEMM arguments
886
+ gemm_workspace_.arguments.A = gemm_workspace_.A->data();
887
+ gemm_workspace_.arguments.batch_stride_A = gemm_workspace_.A->batch_stride();
888
+ gemm_workspace_.arguments.B = gemm_workspace_.B->data();
889
+ gemm_workspace_.arguments.batch_stride_B = gemm_workspace_.B->batch_stride();
890
+ gemm_workspace_.arguments.C = gemm_workspace_.Reference->data();
891
+ gemm_workspace_.arguments.batch_stride_C = gemm_workspace_.Reference->batch_stride();
892
+ gemm_workspace_.arguments.D = gemm_workspace_.Reference->data();
893
+ gemm_workspace_.arguments.batch_stride_D = gemm_workspace_.Reference->batch_stride();
894
+ gemm_workspace_.arguments.alpha = problem_.alpha.data();
895
+ gemm_workspace_.arguments.beta = problem_.beta.data();
896
+ gemm_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
897
+
898
+ detail::cublasLtGemmExDispatcher gemm_op(
899
+ gemm_desc,
900
+ gemm_workspace_.configuration,
901
+ gemm_workspace_.arguments
902
+ );
903
+
904
+ gemm_op.initialize_cublaslt();
905
+
906
+ if(!gemm_op.get_cublaslt_algo(handle, AlgorithmMode::kDefault)){
907
+ return true;
908
+ }
909
+
910
+ if (gemm_op.status != Status::kSuccess) {
911
+ results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kNotRun;
912
+ return true;
913
+ }
914
+
915
+ status = gemm_op(handle);
916
+
917
+ // Handle errors
918
+ if (status != CUBLAS_STATUS_SUCCESS) {
919
+ std::cerr << "cublasLt Verification run failed with status : " << cublasLtGetStatusName(status) << "\n";
920
+ results_.back().verification_map[library::Provider::kCUBLAS] = get_cutlass_disposition(status);
921
+ return true;
922
+ }
923
+
924
+ results_.back().status = Status::kSuccess;
925
+
926
+ //
927
+ // Verify results
928
+ //
929
+
930
+ results_.back().verification_map[library::Provider::kCUBLAS] = compare_tensors(
931
+ options,
932
+ *gemm_workspace_.Computed,
933
+ *gemm_workspace_.Reference,
934
+ gemm_workspace_.Computed->batch_stride()
935
+ );
936
+
937
+ // Save workspace if incorrect
938
+ if (options.verification.save_workspace == SaveWorkspace::kIncorrect &&
939
+ results_.back().verification_map[library::Provider::kCUBLAS] == Disposition::kIncorrect) {
940
+
941
+ save_workspace(
942
+ device_context,
943
+ options,
944
+ gemm_desc,
945
+ library::Provider::kCUTLASS,
946
+ library::Provider::kCUBLAS);
947
+ }
948
+ }
949
+ catch (...) {
950
+ results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kFailed;
951
+ }
952
+
953
+ #endif
954
+
955
+ // Return true means continue profiling
956
+ return true;
957
+ }
958
+
959
+ /////////////////////////////////////////////////////////////////////////////////////////////////
960
+
961
+ /// Verifies CUTLASS against host and device references
962
+ bool GemmOperationProfiler::verify_with_reference_(
963
+ Options const &options,
964
+ PerformanceReport &report,
965
+ DeviceContext &device_context,
966
+ library::Operation const *operation,
967
+ ProblemSpace const &problem_space,
968
+ ProblemSpace::Problem const &problem,
969
+ cutlass::library::NumericTypeID element_A,
970
+ cutlass::library::NumericTypeID element_B)
971
+ {
972
+ library::GemmDescription const &gemm_desc =
973
+ static_cast<library::GemmDescription const &>(operation->description());
974
+
975
+ //
976
+ // Initialize state
977
+ //
978
+
979
+ for (auto provider : options.verification.providers) {
980
+
981
+ // Skip providers that are not enabled
982
+ if (!options.verification.provider_enabled(provider)) {
983
+ continue;
984
+ }
985
+
986
+ void *ptr_A = gemm_workspace_.A->data();
987
+ void *ptr_B = gemm_workspace_.B->data();
988
+ void *ptr_C = gemm_workspace_.C->data();
989
+ void *ptr_D = gemm_workspace_.Reference->data();
990
+
991
+ // To support the host-side reference, conditionally allocate and
992
+ // copy tensors to host memory.
993
+ std::vector<uint8_t> host_data_A;
994
+ std::vector<uint8_t> host_data_B;
995
+ std::vector<uint8_t> host_data_C;
996
+ std::vector<uint8_t> host_data_D;
997
+
998
+ if (provider == library::Provider::kReferenceHost) {
999
+
1000
+ host_data_A.resize(gemm_workspace_.A->bytes());
1001
+ ptr_A = host_data_A.data();
1002
+ gemm_workspace_.A->copy_to_host(ptr_A);
1003
+
1004
+ host_data_B.resize(gemm_workspace_.B->bytes());
1005
+ ptr_B = host_data_B.data();
1006
+ gemm_workspace_.B->copy_to_host(ptr_B);
1007
+
1008
+ host_data_C.resize(gemm_workspace_.C->bytes());
1009
+ ptr_C = host_data_C.data();
1010
+ gemm_workspace_.C->copy_to_host(ptr_C);
1011
+
1012
+ host_data_D.resize(gemm_workspace_.Reference->bytes());
1013
+ ptr_D = host_data_D.data();
1014
+ }
1015
+
1016
+ //
1017
+ // Launch
1018
+ //
1019
+
1020
+ library::Handle handle;
1021
+
1022
+ handle.set_provider(provider);
1023
+
1024
+ Status status = handle.gemm_universal(
1025
+ problem_.mode,
1026
+ gemm_workspace_.configuration.problem_size.m(),
1027
+ gemm_workspace_.configuration.problem_size.n(),
1028
+ gemm_workspace_.configuration.problem_size.k(),
1029
+ gemm_desc.tile_description.math_instruction.element_accumulator,
1030
+ gemm_desc.element_epilogue,
1031
+
1032
+ problem_.alpha.data(),
1033
+
1034
+ element_A,
1035
+ gemm_desc.A.layout,
1036
+ gemm_desc.transform_A,
1037
+ ptr_A,
1038
+ int(gemm_workspace_.configuration.lda),
1039
+
1040
+ element_B,
1041
+ gemm_desc.B.layout,
1042
+ gemm_desc.transform_B,
1043
+ ptr_B,
1044
+ int(gemm_workspace_.configuration.ldb),
1045
+
1046
+ problem_.beta.data(),
1047
+
1048
+ gemm_desc.C.element,
1049
+ gemm_desc.C.layout,
1050
+ ptr_C,
1051
+ int(gemm_workspace_.configuration.ldc),
1052
+
1053
+ gemm_desc.D.element,
1054
+ gemm_desc.D.layout,
1055
+ ptr_D,
1056
+ int(gemm_workspace_.configuration.ldd),
1057
+
1058
+ gemm_workspace_.configuration.batch_count,
1059
+ gemm_workspace_.A->batch_stride(),
1060
+ gemm_workspace_.B->batch_stride(),
1061
+ gemm_workspace_.C->batch_stride(),
1062
+ gemm_workspace_.Reference->batch_stride());
1063
+
1064
+ if (status != Status::kSuccess) {
1065
+ results_.back().verification_map[provider] = Disposition::kNotRun;
1066
+ continue;
1067
+ }
1068
+ results_.back().status = status;
1069
+
1070
+ if (provider == library::Provider::kReferenceHost) {
1071
+ gemm_workspace_.Reference->copy_from_host(ptr_D);
1072
+ }
1073
+
1074
+ //
1075
+ // Verify results
1076
+ //
1077
+
1078
+ results_.back().verification_map[provider] = compare_tensors(
1079
+ options,
1080
+ *gemm_workspace_.Computed,
1081
+ *gemm_workspace_.Reference,
1082
+ gemm_workspace_.Computed->batch_stride()
1083
+ );
1084
+
1085
+ // Save workspace if incorrect
1086
+ if (options.verification.save_workspace == SaveWorkspace::kIncorrect &&
1087
+ results_.back().verification_map[provider] == Disposition::kIncorrect) {
1088
+
1089
+ save_workspace(
1090
+ device_context,
1091
+ options,
1092
+ gemm_desc,
1093
+ library::Provider::kCUTLASS,
1094
+ provider);
1095
+ }
1096
+ }
1097
+
1098
+ return true;
1099
+ }
1100
+
1101
+ /////////////////////////////////////////////////////////////////////////////////////////////////
1102
+
1103
+ /// Measures performance results
1104
+ bool GemmOperationProfiler::profile(
1105
+ Options const &options,
1106
+ PerformanceReport &report,
1107
+ DeviceContext &device_context,
1108
+ library::Operation const *operation,
1109
+ ProblemSpace const &problem_space,
1110
+ ProblemSpace::Problem const &problem) {
1111
+
1112
+ if (options.profiling.provider_enabled(library::Provider::kCUTLASS)) {
1113
+
1114
+ // Initialize structure containing GEMM arguments
1115
+ gemm_workspace_.arguments.A = gemm_workspace_.A->data();
1116
+ gemm_workspace_.arguments.B = gemm_workspace_.B->data();
1117
+ gemm_workspace_.arguments.C = gemm_workspace_.C->data();
1118
+ gemm_workspace_.arguments.D = gemm_workspace_.Computed->data();
1119
+ gemm_workspace_.arguments.alpha = problem_.alpha.data();
1120
+ gemm_workspace_.arguments.beta = problem_.beta.data();
1121
+ gemm_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
1122
+ gemm_workspace_.arguments.batch_stride_A = gemm_workspace_.A->batch_stride();
1123
+ gemm_workspace_.arguments.batch_stride_B = gemm_workspace_.B->batch_stride();
1124
+ gemm_workspace_.arguments.batch_stride_C = gemm_workspace_.C->batch_stride();
1125
+ gemm_workspace_.arguments.batch_stride_D = gemm_workspace_.Computed->batch_stride();
1126
+
1127
+ if (problem_.split_k_mode == library::SplitKMode::kParallel) {
1128
+ gemm_workspace_.arguments.D = gemm_workspace_.device_workspace.data();
1129
+ gemm_workspace_.arguments.alpha = problem_.alpha_one.data();
1130
+ gemm_workspace_.arguments.beta = problem_.beta_zero.data();
1131
+
1132
+ gemm_workspace_.reduction_arguments.workspace = gemm_workspace_.device_workspace.data();
1133
+ gemm_workspace_.reduction_arguments.source = gemm_workspace_.C->data();
1134
+ gemm_workspace_.reduction_arguments.destination = gemm_workspace_.Computed->data();
1135
+ gemm_workspace_.reduction_arguments.alpha = problem_.alpha.data();
1136
+ gemm_workspace_.reduction_arguments.beta = problem_.beta.data();
1137
+ gemm_workspace_.reduction_arguments.pointer_mode = library::ScalarPointerMode::kHost;
1138
+ }
1139
+
1140
+ results_.back().status = profile_cutlass_(
1141
+ results_.back().runtime,
1142
+ options,
1143
+ operation,
1144
+ &gemm_workspace_.arguments,
1145
+ gemm_workspace_.host_workspace.data(),
1146
+ gemm_workspace_.device_workspace.data()
1147
+ );
1148
+ }
1149
+ return true;
1150
+ }
1151
+
1152
+ /////////////////////////////////////////////////////////////////////////////////////////////////
1153
+
1154
+ /// Method to profile a CUTLASS Operation
1155
+ Status GemmOperationProfiler::profile_cutlass_(
1156
+ double &runtime,
1157
+ Options const &options,
1158
+ library::Operation const *operation,
1159
+ void *arguments,
1160
+ void *host_workspace,
1161
+ void *device_workspace) {
1162
+
1163
+ GpuTimer timer;
1164
+ // initialize gemm underlying operation to handle parallel reduction
1165
+ library::Operation const * underlying_operation = operation;
1166
+
1167
+ if (problem_.split_k_mode == library::SplitKMode::kParallel) {
1168
+ if (!(underlying_operation = library::find_gemm_operation_for_parallel_reduction(operation))) {
1169
+ return Status::kErrorNotSupported;
1170
+ }
1171
+ }
1172
+
1173
+ //
1174
+ // Optional sleep to limit power consumption and thermals
1175
+ //
1176
+
1177
+ sleep(options.profiling.sleep_duration);
1178
+
1179
+ //
1180
+ // Warmup loop
1181
+ //
1182
+
1183
+ Status status;
1184
+
1185
+ for (int iteration = 0; iteration < options.profiling.warmup_iterations; ++iteration) {
1186
+
1187
+ int problem_idx = (iteration % gemm_workspace_.problem_count) * problem_.batch_count;
1188
+
1189
+ gemm_workspace_.arguments.A = gemm_workspace_.A->batch_data(problem_idx);
1190
+ gemm_workspace_.arguments.B = gemm_workspace_.B->batch_data(problem_idx);
1191
+ gemm_workspace_.arguments.C = gemm_workspace_.C->batch_data(problem_idx);
1192
+ gemm_workspace_.arguments.D = gemm_workspace_.Computed->batch_data(problem_idx);
1193
+
1194
+ if (problem_.split_k_mode == library::SplitKMode::kParallel) {
1195
+ gemm_workspace_.arguments.D = gemm_workspace_.device_workspace.data();
1196
+
1197
+ gemm_workspace_.reduction_arguments.workspace = gemm_workspace_.device_workspace.data();
1198
+ gemm_workspace_.reduction_arguments.source = gemm_workspace_.C->batch_data(problem_idx);
1199
+ gemm_workspace_.reduction_arguments.destination = gemm_workspace_.Computed->batch_data(problem_idx);
1200
+ }
1201
+
1202
+ // Execute the CUTLASS operation
1203
+ status = underlying_operation->run(
1204
+ &gemm_workspace_.arguments,
1205
+ host_workspace,
1206
+ device_workspace);
1207
+
1208
+ if (status != Status::kSuccess) {
1209
+ return status;
1210
+ }
1211
+
1212
+ // Run parallel reduction kernel for parallel split_k_mode
1213
+ if (problem_.split_k_mode == library::SplitKMode::kParallel) {
1214
+ status = reduction_op_->run(
1215
+ &gemm_workspace_.reduction_arguments,
1216
+ gemm_workspace_.reduction_host_workspace.data(),
1217
+ nullptr);
1218
+
1219
+ if (status != Status::kSuccess) {
1220
+ return status;
1221
+ }
1222
+ }
1223
+ }
1224
+
1225
+ //
1226
+ // Initialize GPU timer
1227
+ //
1228
+
1229
+ timer.start();
1230
+
1231
+ //
1232
+ // Profiling loop
1233
+ //
1234
+
1235
+ int Iterations = options.profiling.iterations;
1236
+
1237
+ int iteration = 0;
1238
+ for (; iteration < Iterations; ++iteration) {
1239
+
1240
+ // Iterate over copies of the problem in memory
1241
+ int workspace_idx = options.profiling.warmup_iterations + iteration;
1242
+ int problem_idx = (workspace_idx % gemm_workspace_.problem_count) * problem_.batch_count;
1243
+
1244
+ gemm_workspace_.arguments.A = gemm_workspace_.A->batch_data(problem_idx);
1245
+ gemm_workspace_.arguments.B = gemm_workspace_.B->batch_data(problem_idx);
1246
+ gemm_workspace_.arguments.C = gemm_workspace_.C->batch_data(problem_idx);
1247
+ gemm_workspace_.arguments.D = gemm_workspace_.Computed->batch_data(problem_idx);
1248
+
1249
+ if (problem_.split_k_mode == library::SplitKMode::kParallel) {
1250
+ gemm_workspace_.arguments.D = gemm_workspace_.device_workspace.data();
1251
+
1252
+ gemm_workspace_.reduction_arguments.workspace = gemm_workspace_.device_workspace.data();
1253
+ gemm_workspace_.reduction_arguments.source = gemm_workspace_.C->batch_data(problem_idx);
1254
+ gemm_workspace_.reduction_arguments.destination = gemm_workspace_.Computed->batch_data(problem_idx);
1255
+ }
1256
+
1257
+ status = underlying_operation->run(
1258
+ arguments,
1259
+ host_workspace,
1260
+ device_workspace);
1261
+
1262
+ if (status != Status::kSuccess) {
1263
+ return status;
1264
+ }
1265
+
1266
+ // Run parallel reduction kernel for parallel split_k_mode
1267
+ if (problem_.split_k_mode == library::SplitKMode::kParallel) {
1268
+ status = reduction_op_->run(
1269
+ &gemm_workspace_.reduction_arguments,
1270
+ gemm_workspace_.reduction_host_workspace.data(),
1271
+ nullptr);
1272
+
1273
+ if (status != Status::kSuccess) {
1274
+ return status;
1275
+ }
1276
+ }
1277
+ }
1278
+
1279
+ //
1280
+ // Wait for completion
1281
+ //
1282
+
1283
+ timer.stop_and_wait();
1284
+ //
1285
+ // Update performance result
1286
+ //
1287
+
1288
+ runtime = timer.duration(iteration);
1289
+
1290
+ return status;
1291
+ }
1292
+
1293
+ /////////////////////////////////////////////////////////////////////////////////////////////////
1294
+
1295
+ } // namespace profiler
1296
+ } // namespace cutlass
1297
+
1298
+ /////////////////////////////////////////////////////////////////////////////////////////////////
sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/cutlass/tools/profiler/src/gpu_timer.cpp ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ * SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice, this
9
+ * list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holder nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
+ *
30
+ **************************************************************************************************/
31
+ /* \file
32
+ \brief Defines a math function
33
+ */
34
+
35
+ #include <stdexcept>
36
+
37
+ #include "cutlass/profiler/gpu_timer.h"
38
+
39
+ namespace cutlass {
40
+ namespace profiler {
41
+
42
+ /////////////////////////////////////////////////////////////////////////////////////////////////
43
+
44
+ GpuTimer::GpuTimer() {
45
+ cudaError_t result;
46
+
47
+ for (auto & event : events) {
48
+ result = cudaEventCreate(&event);
49
+ if (result != cudaSuccess) {
50
+ throw std::runtime_error("Failed to create CUDA event");
51
+ }
52
+ }
53
+ }
54
+
55
+ GpuTimer::~GpuTimer() {
56
+ for (auto & event : events) {
57
+ cudaEventDestroy(event);
58
+ }
59
+ }
60
+
61
+ /// Records a start event in the stream
62
+ void GpuTimer::start(cudaStream_t stream) {
63
+ cudaError_t result = cudaEventRecord(events[0], stream);
64
+ if (result != cudaSuccess) {
65
+ throw std::runtime_error("Failed to record start event.");
66
+ }
67
+ }
68
+
69
+ /// Records a stop event in the stream
70
+ void GpuTimer::stop(cudaStream_t stream) {
71
+ cudaError_t result = cudaEventRecord(events[1], stream);
72
+ if (result != cudaSuccess) {
73
+ throw std::runtime_error("Failed to record stop event.");
74
+ }
75
+ }
76
+
77
+ /// Records a stop event in the stream and synchronizes on the stream
78
+ void GpuTimer::stop_and_wait(cudaStream_t stream) {
79
+
80
+ stop(stream);
81
+
82
+ cudaError_t result;
83
+ if (stream) {
84
+ result = cudaStreamSynchronize(stream);
85
+ if (result != cudaSuccess) {
86
+ throw std::runtime_error("Failed to synchronize with non-null CUDA stream.");
87
+ }
88
+ }
89
+ else {
90
+ result = cudaDeviceSynchronize();
91
+ if (result != cudaSuccess) {
92
+ throw std::runtime_error("Failed to synchronize with CUDA device.");
93
+ }
94
+ }
95
+ }
96
+
97
+ /// Returns the duration in milliseconds
98
+ double GpuTimer::duration(int iterations) const {
99
+
100
+ float avg_ms;
101
+
102
+ cudaError_t result = cudaEventElapsedTime(&avg_ms, events[0], events[1]);
103
+ if (result != cudaSuccess) {
104
+ throw std::runtime_error("Failed to query elapsed time from CUDA events.");
105
+ }
106
+
107
+ return double(avg_ms) / double(iterations);
108
+ }
109
+
110
+ /////////////////////////////////////////////////////////////////////////////////////////////////
111
+
112
+ } // namespace profiler
113
+ } // namespace cutlass
sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/cutlass/tools/profiler/src/options.cu ADDED
@@ -0,0 +1,899 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ * SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice, this
9
+ * list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holder nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
+ *
30
+ **************************************************************************************************/
31
+ /* \file
32
+ \brief Command line options for performance test program
33
+ */
34
+
35
+ #include <algorithm>
36
+ #include <set>
37
+
38
+ #include "cutlass/cutlass.h"
39
+ #include "cutlass/version.h"
40
+
41
+ #include "cutlass/library/util.h"
42
+
43
+ #include "cutlass/profiler/options.h"
44
+
45
+ /////////////////////////////////////////////////////////////////////////////////////////////////
46
+
47
+ namespace cutlass {
48
+ namespace profiler {
49
+
50
+ /////////////////////////////////////////////////////////////////////////////////////////////////
51
+
52
+ /// Newline and indent for help strings
53
+ static char const *end_of_line = "\n ";
54
+
55
+ /////////////////////////////////////////////////////////////////////////////////////////////////
56
+
57
+ Options::Device::Device(cutlass::CommandLine const &cmdline) {
58
+
59
+ // Gets the number of devices for future validation
60
+ cudaError_t result;
61
+ result = cudaGetDeviceCount(&num_devices);
62
+ if (result != cudaSuccess) {
63
+ throw std::runtime_error("cudaGetNumDevices() failed");
64
+ }
65
+
66
+ // Gets the devices specified by the user
67
+ // This preserves the user specified order and checks for duplicates
68
+ {
69
+ std::vector<int> temp_device_list;
70
+ cmdline.get_cmd_line_arguments("devices", temp_device_list);
71
+ if (temp_device_list.empty()) {
72
+ temp_device_list.push_back(0);
73
+ }
74
+ {
75
+ std::set<int> temp_device_set;
76
+ for (int device : temp_device_list) {
77
+ auto res = temp_device_set.insert(device);
78
+ if (!res.second) {
79
+ throw std::runtime_error("Duplicate device specified: " +
80
+ std::to_string(device));
81
+ } else if (device > num_devices) {
82
+ throw std::runtime_error("Bad device ID: " +
83
+ std::to_string(device));
84
+ } else {
85
+ devices.push_back(device);
86
+ }
87
+ }
88
+ }
89
+ }
90
+
91
+ properties.resize(devices.size());
92
+ // Retrieves properties for all specified devices
93
+ for (size_t device_index = 0; device_index < devices.size(); device_index++) {
94
+ int device = devices[device_index];
95
+
96
+ result = cudaGetDeviceProperties(&properties[device_index], device);
97
+
98
+ if (result != cudaSuccess) {
99
+ throw std::runtime_error("cudaGetDeviceProperties() failed for given device");
100
+ }
101
+
102
+ // Check that all devices are the same
103
+ if (device_index > 0) {
104
+ if ((properties[device_index].major != properties[0].major) ||
105
+ (properties[device_index].minor != properties[0].minor)) {
106
+ throw std::runtime_error("All selected devices must have the same "
107
+ "compute capability");
108
+ }
109
+ if (properties[device_index].l2CacheSize != properties[0].l2CacheSize) {
110
+ throw std::runtime_error("All selected devices must have the same "
111
+ "L2 cache size");
112
+ }
113
+ if (properties[device_index].multiProcessorCount != properties[0].multiProcessorCount) {
114
+ throw std::runtime_error("All selected devices must have the same "
115
+ "SM count");
116
+ }
117
+ }
118
+
119
+ result = cudaSetDevice(device);
120
+ if (result != cudaSuccess) {
121
+ throw std::runtime_error("cudaSetDevice() failed for given device.");
122
+ }
123
+
124
+ // Permit overriding the compute capability
125
+ if (cmdline.check_cmd_line_flag("compute-capability")) {
126
+ int cc = compute_capability(device_index);
127
+ cmdline.get_cmd_line_argument("compute-capability", cc, cc);
128
+ properties[device_index].major = cc / 10;
129
+ properties[device_index].minor = cc % 10;
130
+ }
131
+
132
+ // Permit overriding the L2 cache capacity
133
+ if (cmdline.check_cmd_line_flag("llc-capacity")) {
134
+ int llc_capacity = 0;
135
+ cmdline.get_cmd_line_argument("llc-capacity", llc_capacity, 0);
136
+
137
+ if (llc_capacity >= 0) {
138
+ properties[device_index].l2CacheSize = (llc_capacity << 10);
139
+ }
140
+ }
141
+
142
+ }
143
+ }
144
+
145
+ void Options::Device::print_usage(std::ostream &out) const {
146
+
147
+ out << "Device:\n"
148
+ << " --devices=<int>,<int>,... "
149
+ << " CUDA Device IDs\n\n";
150
+
151
+ int device_count = 0;
152
+ cudaError_t result = cudaGetDeviceCount(&device_count);
153
+
154
+ if (result != cudaSuccess) {
155
+ out << " <could not query for CUDA devices>\n";
156
+ }
157
+ else {
158
+
159
+ for (int idx = 0; idx < device_count; ++idx) {
160
+ cudaDeviceProp prop;
161
+ result = cudaGetDeviceProperties(&prop, idx);
162
+ if (result != cudaSuccess) {
163
+ out << " <could not obtain device properties for device " << idx << ">" << std::endl;
164
+ break;
165
+ }
166
+ else {
167
+ out << " [" << idx << "] - "
168
+ << prop.name << " - SM " << prop.major << "." << prop.minor << ", "
169
+ << prop.multiProcessorCount << " SMs @ " << (prop.clockRate / 1000.0) << " MHz, "
170
+ << "L2 cache: " << (prop.l2CacheSize >> 20) << " MB, Global Memory: " << (prop.totalGlobalMem >> 30) << " GB"
171
+ << std::endl;
172
+ }
173
+ }
174
+ out << "\n";
175
+ }
176
+
177
+ out
178
+ << " --compute-capability=<int> "
179
+ << " Override the compute capability.\n\n"
180
+
181
+ << " --llc-capacity=<capacity in KiB> "
182
+ << " Capacity of last-level cache in kilobytes. If this is non-zero," << end_of_line
183
+ << " profiling phases cycle through different input tensors to induce" << end_of_line
184
+ << " capacity misses in the L2.\n\n";
185
+
186
+ }
187
+
188
+ void Options::Device::print_device_info(std::ostream &out) const {
189
+ cudaDeviceProp props;
190
+ cudaError_t result;
191
+
192
+ out << "Device Name,SM,CUDA Device ID,Phy Device ID" << std::endl;
193
+
194
+ for (int device = 0; device < num_devices; device++) {
195
+ result = cudaSetDevice(device);
196
+ if (result != cudaSuccess) {
197
+ throw std::runtime_error("cudaSetDevice() failed for device");
198
+ }
199
+
200
+ result = cudaGetDeviceProperties(&props, device);
201
+ if (result != cudaSuccess) {
202
+ throw std::runtime_error("cudaGetDeviceProperties failed for device");
203
+ }
204
+
205
+ out << props.name << "," << props.major << props.minor << ","
206
+ << device << "," << props.multiGpuBoardGroupID << std::endl;
207
+
208
+ }
209
+ }
210
+
211
+ void Options::Device::print_options(std::ostream &out, int indent) const {
212
+
213
+ out
214
+ << indent_str(indent) << "devices: ";
215
+ for (int device : devices) {
216
+ out << device << ',';
217
+ }
218
+ out
219
+ << "\n"
220
+ << indent_str(indent) << "clock: " << int(double(properties[0].clockRate) / 1000.0) << "\n"
221
+ << indent_str(indent) << "compute-capability: " << compute_capability(0) << "\n";
222
+ }
223
+
224
+ /// Returns the device ID from a device index
225
+ int Options::Device::device_id(size_t device_index) const {
226
+ if (device_index > devices.size()) {
227
+ throw std::runtime_error("Out of bounds device index: " +
228
+ std::to_string(device_index));
229
+ }
230
+ return devices.at(device_index);
231
+ }
232
+
233
+ /// Returns the compute capability of the listed device (e.g. 61, 60, 70, 75)
234
+ int Options::Device::compute_capability(int device_index) const {
235
+ return properties[device_index].major * 10 + properties[device_index].minor;
236
+ }
237
+
238
+ /////////////////////////////////////////////////////////////////////////////////////////////////
239
+
240
+ Options::Initialization::Initialization(cutlass::CommandLine const &cmdline) {
241
+
242
+ cmdline.get_cmd_line_argument("initialization-enabled", enabled, true);
243
+
244
+ if (cmdline.check_cmd_line_flag("initialization-provider")) {
245
+ std::string str;
246
+ cmdline.get_cmd_line_argument("initialization-provider", str);
247
+ provider = library::from_string<library::Provider>(str);
248
+ if (provider == library::Provider::kInvalid) {
249
+ enabled = false;
250
+ }
251
+ else if (provider != library::Provider::kReferenceHost && provider != library::Provider::kReferenceDevice) {
252
+ throw std::runtime_error("Unsupported initialization provider specified.");
253
+ }
254
+ }
255
+ else {
256
+ provider = library::Provider::kReferenceDevice;
257
+ }
258
+
259
+ cmdline.get_cmd_line_argument("seed", seed, 2019);
260
+
261
+ if (cmdline.check_cmd_line_flag("dist")) {
262
+ // user has set the data distribution (fix data distribution once set)
263
+ fix_data_distribution = true;
264
+ // set user provided data distribution
265
+ get_distribution(cmdline, "dist", data_distribution);
266
+ }
267
+ else {
268
+ // profiler chosen data distribution (allowed to change based on numeric types)
269
+ fix_data_distribution = false;
270
+ // set uniform data distribution with range [-4, 4]
271
+ data_distribution.set_uniform(-4, 4, 0);
272
+ }
273
+
274
+
275
+ }
276
+
277
+ /// Gets the initial distribution
278
+ void Options::Initialization::get_distribution(
279
+ cutlass::CommandLine const &args,
280
+ std::string const &arg,
281
+ cutlass::Distribution &dist) {
282
+
283
+ struct {
284
+ const char *label;
285
+ cutlass::Distribution::Kind kind;
286
+ } distribution_kinds[] = {
287
+ {"uniform", cutlass::Distribution::Uniform},
288
+ {"gaussian", cutlass::Distribution::Gaussian},
289
+ {"identity", cutlass::Distribution::Identity},
290
+ {"sequential", cutlass::Distribution::Sequential},
291
+ {0, cutlass::Distribution::Invalid}
292
+ };
293
+
294
+ struct {
295
+ char const *label;
296
+ double *member;
297
+ } members[] = {
298
+ {"min", &dist.uniform.min},
299
+ {"max", &dist.uniform.max},
300
+ {"mean", &dist.gaussian.mean},
301
+ {"stddev", &dist.gaussian.stddev},
302
+ {"pnzA", &dist.gaussian.pnzA},
303
+ {"pnzB", &dist.gaussian.pnzB},
304
+ {"pnzC", &dist.gaussian.pnzC},
305
+ {"start", &dist.sequential.start},
306
+ {"delta", &dist.sequential.delta},
307
+ {0, 0}
308
+ };
309
+
310
+ // Initalize pnz values to a default value of 100%
311
+ dist.gaussian.pnz = 1.0;
312
+ dist.gaussian.pnzA = 1.0;
313
+ dist.gaussian.pnzB = 1.0;
314
+ dist.gaussian.pnzC = 1.0;
315
+
316
+ using KeyValueVector = std::vector<std::pair<std::string, std::string> >;
317
+
318
+ KeyValueVector values;
319
+ args.get_cmd_line_argument_pairs(arg.c_str(), values);
320
+
321
+ // The parser expects the first token to be a string identifying the distribution type.
322
+ auto it = values.begin();
323
+ if (it != values.end()) {
324
+ for (int i = 0; distribution_kinds[i].label; ++i) {
325
+ if (it->first == distribution_kinds[i].label) {
326
+ dist.kind = distribution_kinds[i].kind;
327
+ break;
328
+ }
329
+ }
330
+ ++it;
331
+ }
332
+
333
+ // Subsequent key-value pairs update the named field of the distribution struct.
334
+ for (; it != values.end(); ++it) {
335
+ // Integer scaling factor - if < 0, no integer rounding is performed.
336
+ if ((it->first.compare("scale") == 0) && !it->second.empty()) {
337
+ std::stringstream ss;
338
+ ss << it->second;
339
+ ss >> dist.int_scale;
340
+ continue; // next token
341
+ }
342
+
343
+ // Casts as integer without scaling
344
+ if (it->first.compare("integer") == 0) {
345
+ dist.int_scale = 0;
346
+ continue; // next token
347
+ }
348
+
349
+ // initialize other members
350
+ for (int m = 0; members[m].label; ++m) {
351
+ if (it->first == members[m].label && !it->second.empty()) {
352
+ std::stringstream ss;
353
+ ss << it->second;
354
+ ss >> *(members[m].member);
355
+ }
356
+ }
357
+ }
358
+ }
359
+
360
+ void Options::Initialization::print_usage(std::ostream &out) const {
361
+
362
+ out << "Initialization:\n"
363
+
364
+ << " --initialization=<bool> "
365
+ << " Enables initialization (default: true). If false, device memory is" << end_of_line
366
+ << " not initialized after allocation.\n\n"
367
+
368
+ << " --initialization-provider=<provider> "
369
+ << " Selects initialization provider {host, device*}. (default: '*')\n\n"
370
+
371
+ << " --dist=<distribution> "
372
+ << " Data distribution of input tensors {uniform*, gaussian, identity, sequential}" << end_of_line
373
+ << " --dist=uniform,min:<double>,max:<double>,scale:<integer>" << end_of_line
374
+ << " --dist=gaussian,mean:<double>,stddev:<double>,scale:<integer>,pnzA:<double>,pnzB:<double>,pnzC:<double>" << end_of_line
375
+ << " --dist=sequential,start:<double>,delta:<double>,scale:<integer>" << end_of_line
376
+ << " --dist=identity\n\n"
377
+
378
+ << " --seed=<int> "
379
+ << " Random number generator seed. Used to enforce deterministic" << end_of_line
380
+ << " initialization.\n\n";
381
+
382
+ }
383
+
384
+ void Options::Initialization::print_options(std::ostream &out, int indent) const {
385
+
386
+ }
387
+
388
+ /////////////////////////////////////////////////////////////////////////////////////////////////
389
+
390
+ Options::Library::Library(cutlass::CommandLine const &cmdline) {
391
+
392
+ algorithm_mode = AlgorithmMode::kDefault;
393
+
394
+ if (cmdline.check_cmd_line_flag("library-algo-mode")) {
395
+ std::string mode = "default";
396
+ cmdline.get_cmd_line_argument("library-algo-mode", mode);
397
+ algorithm_mode = from_string<AlgorithmMode>(mode);
398
+ }
399
+
400
+ if (cmdline.check_cmd_line_flag("library-algos")) {
401
+
402
+ // If algorithms are specified, override as kBest.
403
+ algorithm_mode = AlgorithmMode::kBest;
404
+
405
+ std::vector<std::string> tokens;
406
+ cmdline.get_cmd_line_arguments("library-algos", tokens);
407
+
408
+ algorithms.reserve(tokens.size());
409
+
410
+ for (auto const & token : tokens) {
411
+ if (token.find(":")) {
412
+ // TODO: tokenized range
413
+ }
414
+ else {
415
+ int algo;
416
+ std::stringstream ss;
417
+
418
+ ss << token;
419
+ ss >> algo;
420
+
421
+ algorithms.push_back(algo);
422
+ }
423
+ }
424
+ }
425
+ }
426
+
427
+ void Options::Library::print_usage(std::ostream &out) const {
428
+
429
+ out << "Library:\n"
430
+
431
+ << " --library-algo-mode=<mode> "
432
+ << " Indicates algorithm mode used to call libraries such as cuBLAS and cuDNN.\n"
433
+ << " "
434
+ << " mode={default*,matching,best}\n\n"
435
+
436
+ << " --library-algos=<range-list> "
437
+ << " If --algorithm-mode=best, permits specifying a selection of algorithms.\n\n";
438
+
439
+ }
440
+
441
+ void Options::Library::print_options(std::ostream &out, int indent) const {
442
+
443
+ out
444
+ << indent_str(indent) << "library-algo-mode: " << to_string(algorithm_mode) << "\n"
445
+ << indent_str(indent) << "library-algos: ";
446
+
447
+ int j = 0;
448
+ for (int x : algorithms) {
449
+ out << (j++ ? "," : "") << x;
450
+ }
451
+
452
+ out << "\n\n";
453
+ }
454
+
455
+ /////////////////////////////////////////////////////////////////////////////////////////////////
456
+
457
+ Options::Profiling::Profiling(cutlass::CommandLine const &cmdline) {
458
+
459
+ cmdline.get_cmd_line_argument("workspace-count", workspace_count, 0);
460
+ cmdline.get_cmd_line_argument("warmup-iterations", warmup_iterations, 10);
461
+ cmdline.get_cmd_line_argument("profiling-iterations", iterations, 100);
462
+ cmdline.get_cmd_line_argument("sleep-duration", sleep_duration, 50);
463
+ cmdline.get_cmd_line_argument("profiling-enabled", enabled, true);
464
+
465
+ if (cmdline.check_cmd_line_flag("providers")) {
466
+
467
+ std::vector<std::string> tokens;
468
+ cmdline.get_cmd_line_arguments("providers", tokens);
469
+
470
+ providers.clear();
471
+
472
+ for (auto const &token : tokens) {
473
+ providers.push_back(library::from_string<library::Provider>(token));
474
+ }
475
+ }
476
+ else {
477
+ providers.push_back(library::Provider::kCUTLASS);
478
+ providers.push_back(library::Provider::kCUBLAS);
479
+ providers.push_back(library::Provider::kCUDNN);
480
+ }
481
+ }
482
+
483
+ void Options::Profiling::print_usage(std::ostream &out) const {
484
+
485
+ out << "Profiling:\n"
486
+
487
+ << " --workspace-count=<workspace count> "
488
+ << " Number of discrete workspaces maintained to avoid cache-resident " << end_of_line
489
+ << " If zero (default), the amount is chosen for each workload based on " << end_of_line
490
+ << " capacity of the last-level cache.\n\n"
491
+
492
+ << " --profiling-iterations=<iterations> "
493
+ << " Number of iterations to profile each kernel. If zero, kernels" << end_of_line
494
+ << " are launched up to the profiling duration.\n\n"
495
+
496
+ << " --warmup-iterations=<iterations> "
497
+ << " Number of iterations to execute each kernel prior to profiling.\n\n"
498
+
499
+ << " --sleep-duration=<duration> "
500
+ << " Number of ms to sleep between profiling periods (ms).\n\n"
501
+
502
+ << " --profiling-enabled=<bool> "
503
+ << " If true, profiling is actually conducted.\n\n"
504
+
505
+ ;
506
+ }
507
+
508
+ void Options::Profiling::print_options(std::ostream &out, int indent) const {
509
+
510
+ out
511
+ << indent_str(indent) << "profiling_iterations: " << iterations << "\n"
512
+ << indent_str(indent) << "sleep_duration: " << sleep_duration << "\n"
513
+ << indent_str(indent) << "profiling_enabled: " << enabled << "\n"
514
+ << indent_str(indent) << "providers: [";
515
+
516
+ int j = 0;
517
+ for (auto const & provider : providers) {
518
+ out << (j++ ? ", " : "") << library::to_string(provider);
519
+ }
520
+ out << "]\n";
521
+ }
522
+
523
+ /// Returns true if a provider is enabled
524
+ bool Options::Profiling::provider_enabled(library::Provider provider) const {
525
+ return std::find(providers.begin(), providers.end(), provider) != providers.end();
526
+ }
527
+
528
+ /// Returns the index of a provider if its enabled
529
+ size_t Options::Profiling::index(library::Provider provider) const {
530
+ size_t idx = 0;
531
+ for (auto const & x : providers) {
532
+ if (x == provider) {
533
+ return idx;
534
+ }
535
+ ++idx;
536
+ }
537
+ return idx;
538
+ }
539
+
540
+ /////////////////////////////////////////////////////////////////////////////////////////////////
541
+
542
+ Options::Verification::Verification(cutlass::CommandLine const &cmdline) {
543
+
544
+ cmdline.get_cmd_line_argument("verification-enabled", enabled, true);
545
+ if (enabled) {
546
+ cmdline.get_cmd_line_argument("verification-required", required, false);
547
+ }
548
+
549
+ cmdline.get_cmd_line_argument("epsilon", epsilon, 0.05);
550
+
551
+ cmdline.get_cmd_line_argument("nonzero-floor", nonzero_floor, 1.0 / 256.0);
552
+
553
+ if (cmdline.check_cmd_line_flag("save-workspace")) {
554
+ std::string value;
555
+ cmdline.get_cmd_line_argument("save-workspace", value);
556
+ save_workspace = from_string<SaveWorkspace>(value);
557
+ }
558
+ else {
559
+ save_workspace = SaveWorkspace::kNever;
560
+ }
561
+
562
+ if (cmdline.check_cmd_line_flag("verification-providers")) {
563
+
564
+ std::vector<std::string> tokens;
565
+ cmdline.get_cmd_line_arguments("verification-providers", tokens);
566
+
567
+ providers.clear();
568
+
569
+ for (auto const &token : tokens) {
570
+ library::Provider provider = library::from_string<library::Provider>(token);
571
+ if (provider != library::Provider::kInvalid) {
572
+ providers.push_back(provider);
573
+ }
574
+ }
575
+ }
576
+ else {
577
+ providers.push_back(library::Provider::kCUBLAS);
578
+ providers.push_back(library::Provider::kReferenceDevice);
579
+ providers.push_back(library::Provider::kCUDNN);
580
+ }
581
+ }
582
+
583
+ void Options::Verification::print_usage(std::ostream &out) const {
584
+
585
+ out << "Verification:\n"
586
+
587
+ << " --verification-enabled=<bool> "
588
+ << " Whether to perform verification checks.\n\n"
589
+
590
+ << " --epsilon=<error> "
591
+ << " Error threshold. Setting to zero (default) requires" << end_of_line
592
+ << " bit-level equivalence.\n\n"
593
+
594
+ << " --nonzero-floor=<floor> "
595
+ << " Results whose absolute value is less than this quantity" << end_of_line
596
+ << " are treated as zero for comparisons.\n\n"
597
+
598
+ << " --save-workspace=<string> "
599
+ << " Specifies when to save the GEMM inputs and results to the filesystem." << end_of_line
600
+ << " --save-workspace=never never save workspace (default)" << end_of_line
601
+ << " --save-workspace=incorrect save workspace for incorrect results" << end_of_line
602
+ << " --save-workspace=always always save workspace\n\n"
603
+
604
+ << " --verification-providers=<providers> "
605
+ << " List of providers used to verify result. (default: '*')" << end_of_line
606
+ << " Gemm verification-providers {cublas*}" << end_of_line
607
+ << " Conv2d verification-providers {cudnn*, device*, host}"
608
+ << "\n\n";
609
+ }
610
+
611
+ void Options::Verification::print_options(std::ostream &out, int indent) const {
612
+
613
+ out
614
+ << indent_str(indent) << "verification_enabled: " << enabled << "\n"
615
+ << indent_str(indent) << "epsilon: " << epsilon << "\n"
616
+ << indent_str(indent) << "save_workspace: " << to_string(save_workspace) << "\n"
617
+ << indent_str(indent) << "verification_providers: [";
618
+
619
+ int j = 0;
620
+ for (auto const & provider : providers) {
621
+ out << (j++ ? ", " : "") << library::to_string(provider);
622
+ }
623
+ out << "]\n";
624
+ }
625
+
626
+ /// Returns true if a provider is enabled
627
+ bool Options::Verification::provider_enabled(library::Provider provider) const {
628
+ return std::find(providers.begin(), providers.end(), provider) != providers.end();
629
+ }
630
+
631
+ /// Returns the index of a provider if its enabled
632
+ size_t Options::Verification::index(library::Provider provider) const {
633
+ size_t idx = 0;
634
+ for (auto const & x : providers) {
635
+ if (x == provider) {
636
+ return idx;
637
+ }
638
+ ++idx;
639
+ }
640
+ return idx;
641
+ }
642
+
643
+ /////////////////////////////////////////////////////////////////////////////////////////////////
644
+
645
+ Options::Report::Report(cutlass::CommandLine const &cmdline) {
646
+
647
+ cmdline.get_cmd_line_argument("append", append, false);
648
+ cmdline.get_cmd_line_argument("output", output_path);
649
+ cmdline.get_cmd_line_argument("junit-output", junit_output_path);
650
+
651
+ if (cmdline.check_cmd_line_flag("tags")) {
652
+ cmdline.get_cmd_line_argument_pairs("tags", pivot_tags);
653
+ }
654
+
655
+ cmdline.get_cmd_line_argument("report-not-run", report_not_run, false);
656
+
657
+ cmdline.get_cmd_line_argument("verbose", verbose, true);
658
+
659
+ cmdline.get_cmd_line_argument("sort-results", sort_results, false);
660
+
661
+ cmdline.get_cmd_line_argument("print-kernel-before-running", print_kernel_before_running, false);
662
+ }
663
+
664
+ void Options::Report::print_usage(std::ostream &out) const {
665
+
666
+ out << "Report:\n"
667
+
668
+ << " --append=<bool> "
669
+ << " If true, result is appended to possibly existing file. Otherwise, " << end_of_line
670
+ << " any existing file is overwritten.\n\n"
671
+
672
+ << " --output=<path> "
673
+ << " Path to output file for machine readable results. Operation kind and '.csv' is appended.\n\n"
674
+
675
+ << " --junit-output=<path> "
676
+ << " Path to junit output file for result reporting. Operation kind and '.junit.xml' is appended.\n\n"
677
+
678
+ << " --print-kernel-before-running=<bool> "
679
+ << " Prints the name of the kernel being profiled before running the kernel." << end_of_line
680
+ << " This is useful for determining which kernel is causing a run of the profiler to hang\n\n"
681
+
682
+ << " --report-not-run=<bool> "
683
+ << " If true, reports the status of all kernels including those that" << end_of_line
684
+ << " do not satisfy the given arguments.\n\n"
685
+
686
+ << " --tags=<column:tag,...> "
687
+ << " Inserts leading columns in output table and uniform values for each" << end_of_line
688
+ << " column. Useful for generating pivot tables.\n\n"
689
+
690
+ << " --verbose=<bool> "
691
+ << " Prints human-readable text to stdout. If false, nothing is written to stdout.\n\n"
692
+
693
+ << " --sort-results=<bool> "
694
+ << " Sorts results (by flops-per-byte).\n\n";
695
+ }
696
+
697
+ void Options::Report::print_options(std::ostream &out, int indent) const {
698
+
699
+ out
700
+ << indent_str(indent) << "append: " << append << "\n"
701
+ << indent_str(indent) << "output: " << output_path << "\n"
702
+ << indent_str(indent) << "junit-output: " << junit_output_path << "\n"
703
+ << indent_str(indent) << "print-kernel-before-running: " << print_kernel_before_running << "\n"
704
+ << indent_str(indent) << "report-not-run: " << report_not_run << "\n"
705
+ << indent_str(indent) << "tags:\n";
706
+
707
+ for (auto const & tag : pivot_tags) {
708
+ out << indent_str(indent + 1) << tag.first << ": " << tag.second << "\n";
709
+ }
710
+
711
+ out
712
+ << indent_str(indent) << "verbose: " << verbose << "\n";
713
+ }
714
+
715
+ /////////////////////////////////////////////////////////////////////////////////////////////////
716
+
717
+ Options::About::About(cutlass::CommandLine const &cmdline) {
718
+ help = cmdline.check_cmd_line_flag("help");
719
+ version = cmdline.check_cmd_line_flag("version");
720
+ device_info = cmdline.check_cmd_line_flag("device-info");
721
+ }
722
+
723
+ void Options::About::print_usage(std::ostream &out) const {
724
+
725
+ out << "About:\n"
726
+ << " --version ";
727
+
728
+ print_version(out);
729
+
730
+ out << "\n";
731
+ }
732
+
733
+ void Options::About::print_version(std::ostream &out) {
734
+ out << "CUTLASS " << cutlass::getVersionString()
735
+ << " built on " << __DATE__ << " at " << __TIME__;
736
+ if (!cutlass::getGitRevision().empty()) out << " with commit " << cutlass::getGitRevision() << "";
737
+ }
738
+
739
+ void Options::About::print_options(std::ostream &out, int indent) const {
740
+
741
+ }
742
+
743
+ /////////////////////////////////////////////////////////////////////////////////////////////////
744
+
745
+ Options::Options(cutlass::CommandLine const &cmdline):
746
+ cmdline(cmdline),
747
+ device(cmdline),
748
+ initialization(cmdline),
749
+ library(cmdline),
750
+ profiling(cmdline),
751
+ verification(cmdline),
752
+ report(cmdline),
753
+ about(cmdline) {
754
+
755
+ if (cmdline.check_cmd_line_flag("mode")) {
756
+ std::string token;
757
+ cmdline.get_cmd_line_argument("mode", token);
758
+ execution_mode = from_string<ExecutionMode>(token);
759
+ }
760
+ else {
761
+ execution_mode = ExecutionMode::kProfile;
762
+ }
763
+
764
+ // Enumerating kernels is equivalent to a dry run.
765
+ if (execution_mode == ExecutionMode::kEnumerate) {
766
+ execution_mode = ExecutionMode::kDryRun;
767
+ }
768
+
769
+ if (cmdline.check_cmd_line_flag("operation")) {
770
+ std::string str;
771
+ cmdline.get_cmd_line_argument("operation", str);
772
+ operation_kind = library::from_string<library::OperationKind>(str);
773
+ }
774
+ else if (cmdline.check_cmd_line_flag("function")) {
775
+ std::string str;
776
+ cmdline.get_cmd_line_argument("function", str);
777
+ operation_kind = library::from_string<library::OperationKind>(str);
778
+ }
779
+ else {
780
+ operation_kind = library::OperationKind::kInvalid;
781
+ }
782
+
783
+ if (cmdline.check_cmd_line_flag("operation_names")) {
784
+ cmdline.get_cmd_line_arguments("operation_names", operation_names);
785
+ }
786
+ else if (cmdline.check_cmd_line_flag("kernels")) {
787
+ cmdline.get_cmd_line_arguments("kernels", operation_names);
788
+ profiling.error_on_no_match = cmdline.check_cmd_line_flag("error-on-no-match");
789
+ profiling.error_if_nothing_is_profiled = cmdline.check_cmd_line_flag("error-if-nothing-is-profiled");
790
+ }
791
+
792
+ if (cmdline.check_cmd_line_flag("ignore-kernels")) {
793
+ cmdline.get_cmd_line_arguments("ignore-kernels", excluded_operation_names);
794
+ profiling.error_on_no_match = cmdline.check_cmd_line_flag("error-on-no-match");
795
+ profiling.error_if_nothing_is_profiled = cmdline.check_cmd_line_flag("error-if-nothing-is-profiled");
796
+ }
797
+
798
+ // Prevent launches on the device for anything other than CUTLASS operation
799
+ // Allow verification only on host
800
+ if (execution_mode == ExecutionMode::kTrace) {
801
+ initialization.provider = library::Provider::kReferenceHost;
802
+ verification.providers = {library::Provider::kReferenceHost};
803
+ profiling.enabled = false;
804
+ }
805
+ }
806
+
807
+ void Options::print_usage(std::ostream &out) const {
808
+
809
+ out
810
+ << "CUTLASS Profiler\n"
811
+ << "usage:\n\n"
812
+ << " cutlass_profiler [options]\n\n"
813
+ << " --help\n\n"
814
+
815
+ << " --mode=<string> "
816
+ << " Cutlass profiler execution mode." << end_of_line
817
+ << " --mode=profile regular verification and profiling (default)" << end_of_line
818
+ << " --mode=dry_run no kernels are launched or workspaces allocated" << end_of_line
819
+ << " --mode=enumerate lists all operation kind and operations" << end_of_line
820
+ << " --mode=trace executes a single device-side computation with" << end_of_line
821
+ << " no other kernel launches\n\n"
822
+
823
+ << " --device-info "
824
+ << " Prints information on all GPUs present in the system\n\n"
825
+
826
+ << " --operation=<operation_kind> "
827
+ << " CUTLASS operation to profile.\n\n"
828
+
829
+ << " --kernels=<string_list> "
830
+ << " Filter operations by kernel names. For example, call all kernels with" << end_of_line
831
+ << " (\"s1688\" and \"nt\") or (\"s844\" and \"tn\" and \"align8\") in their" << end_of_line
832
+ << " operation name using --kernels=\"s1688*nt, s884*tn*align8\"\n\n"
833
+
834
+ << " --ignore-kernels=<string_list> "
835
+ << " Excludes kernels whose names match anything in this list.\n\n"
836
+ ;
837
+
838
+ //
839
+ // Detailed options
840
+ //
841
+
842
+ device.print_usage(out);
843
+ out << "\n";
844
+
845
+ initialization.print_usage(out);
846
+ out << "\n";
847
+
848
+ library.print_usage(out);
849
+ out << "\n";
850
+
851
+ profiling.print_usage(out);
852
+ out << "\n";
853
+
854
+ verification.print_usage(out);
855
+ out << "\n";
856
+
857
+ report.print_usage(out);
858
+ out << "\n";
859
+
860
+ about.print_usage(out);
861
+ out << "\n";
862
+ }
863
+
864
+ void Options::print_options(std::ostream &out) const {
865
+
866
+ out
867
+ << "options:\n"
868
+ << " help: " << about.help << "\n"
869
+ << " mode: " << to_string(execution_mode) << "\n";
870
+
871
+ out
872
+ << " device:\n";
873
+ device.print_options(out, 2);
874
+
875
+ out
876
+ << " initialization:\n";
877
+ initialization.print_options(out, 2);
878
+
879
+ out
880
+ << " profiling:\n";
881
+ profiling.print_options(out, 2);
882
+
883
+ out
884
+ << " verification:\n";
885
+ verification.print_options(out, 2);
886
+
887
+ out
888
+ << " report:\n";
889
+ report.print_options(out, 2);
890
+ }
891
+
892
+ std::string Options::indent_str(int indent) {
893
+ return std::string(indent * 2, ' ');
894
+ }
895
+
896
+ /////////////////////////////////////////////////////////////////////////////////////////////////
897
+
898
+ } // namespace profiler
899
+ } // namespace cutlass
sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/cutlass/tools/profiler/src/performance_report.cpp ADDED
@@ -0,0 +1,505 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ * SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice, this
9
+ * list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holder nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
+ *
30
+ **************************************************************************************************/
31
+ /* \file
32
+ \brief Execution environment
33
+ */
34
+
35
+ #include <iostream>
36
+ #include <stdexcept>
37
+ #include <iomanip>
38
+ #include <algorithm>
39
+ #include <cstring>
40
+
41
+ #include "cutlass/library/util.h"
42
+
43
+ #include "cutlass/library/util.h"
44
+
45
+ #include "cutlass/profiler/performance_report.h"
46
+ #include "cutlass/profiler/debug.h"
47
+ namespace cutlass {
48
+ namespace profiler {
49
+
50
+ /////////////////////////////////////////////////////////////////////////////////////////////////
51
+
52
+ #if defined(__unix__)
53
+
54
+ #define SHELL_COLOR_BRIGHT() "\033[1;37m"
55
+ #define SHELL_COLOR_GREEN() "\033[1;32m"
56
+ #define SHELL_COLOR_RED() "\033[1;31m"
57
+ #define SHELL_COLOR_END() "\033[0m"
58
+
59
+ #else
60
+
61
+ #define SHELL_COLOR_BRIGHT() ""
62
+ #define SHELL_COLOR_GREEN() ""
63
+ #define SHELL_COLOR_RED() ""
64
+ #define SHELL_COLOR_END() ""
65
+
66
+ #endif
67
+
68
+ /////////////////////////////////////////////////////////////////////////////////////////////////
69
+
70
+ PerformanceReport::PerformanceReport(
71
+ Options const &options,
72
+ std::vector<std::string> const &argument_names,
73
+ library::OperationKind const &op_kind
74
+ ):
75
+ options_(options), argument_names_(argument_names), problem_index_(0), good_(true), op_kind_(op_kind) {
76
+
77
+ // Strip '.csv' if present
78
+ std::string base_path = options_.report.output_path;
79
+ base_path = base_path.substr(0, base_path.rfind(".csv"));
80
+ op_file_name_ = base_path + "." + to_string(op_kind_) + ".csv";
81
+
82
+ base_path = options_.report.junit_output_path;
83
+ base_path = base_path.substr(0, base_path.rfind(".xml"));
84
+ base_path = base_path.substr(0, base_path.rfind(".junit"));
85
+ op_junit_file_name_ = base_path + "." + to_string(op_kind_) + ".junit.xml";
86
+
87
+ //
88
+ // Open output file for operation of PerformanceReport::op_kind
89
+ //
90
+ if (!options_.report.output_path.empty()) {
91
+
92
+ bool print_header = true;
93
+
94
+ if (options_.report.append) {
95
+
96
+ std::ifstream test_output_file(op_file_name_);
97
+
98
+ if (test_output_file.is_open()) {
99
+ print_header = false;
100
+ test_output_file.close();
101
+ }
102
+
103
+ output_file_.open(op_file_name_, std::ios::app);
104
+ }
105
+ else {
106
+ output_file_.open(op_file_name_);
107
+ }
108
+
109
+ if (!output_file_.good()) {
110
+
111
+ std::cerr << "Could not open output file at path '"
112
+ << options_.report.output_path << "'" << std::endl;
113
+
114
+ good_ = false;
115
+ }
116
+
117
+ if (print_header) {
118
+ print_csv_header_(output_file_) << std::endl;
119
+ }
120
+ }
121
+
122
+ if (!options_.report.junit_output_path.empty()) {
123
+
124
+ junit_output_file_.open(op_junit_file_name_);
125
+
126
+ if (!junit_output_file_.good()) {
127
+
128
+ std::cerr << "Could not open junit output file at path '"
129
+ << options_.report.junit_output_path << "'" << std::endl;
130
+
131
+ good_ = false;
132
+ }
133
+
134
+ print_junit_header_(junit_output_file_);
135
+ }
136
+ }
137
+
138
+ void PerformanceReport::next_problem() {
139
+ ++problem_index_;
140
+ }
141
+
142
+ void PerformanceReport::append_result(PerformanceResult result) {
143
+
144
+ result.problem_index = problem_index_;
145
+
146
+ if (options_.report.verbose) {
147
+ std::cout << "\n";
148
+ print_result_pretty_(std::cout, result) << std::flush;
149
+ }
150
+
151
+ if (junit_output_file_.is_open()) {
152
+ print_junit_result_(junit_output_file_, result);
153
+ }
154
+
155
+ if (output_file_.is_open()) {
156
+ print_result_csv_(output_file_, result) << std::endl;
157
+ }
158
+ else {
159
+ concatenated_results_.push_back(result);
160
+ }
161
+ }
162
+
163
+ void PerformanceReport::sort_results(PerformanceResultVector &results) {
164
+
165
+ struct FlopsPerByteCompare
166
+ {
167
+ bool operator()(const PerformanceResult &a, const PerformanceResult &b)
168
+ {
169
+ double a_flops_per_byte = double(a.flops) / double(a.bytes);
170
+ double b_flops_per_byte = double(b.flops) / double(b.bytes);
171
+
172
+ return (a_flops_per_byte < b_flops_per_byte);
173
+ }
174
+ };
175
+
176
+ std::stable_sort(results.begin(), results.end(), FlopsPerByteCompare());
177
+ }
178
+
179
+ void PerformanceReport::append_results(PerformanceResultVector const &results) {
180
+
181
+ if (options_.report.verbose) {
182
+ std::cout << "\n\n";
183
+ }
184
+
185
+ // For each result
186
+ for (auto const & result : results) {
187
+ append_result(result);
188
+ }
189
+ }
190
+
191
+ PerformanceReport::~PerformanceReport() {
192
+
193
+ //
194
+ // Output results to stdout if they were not written to a file already.
195
+ //
196
+ if (options_.report.verbose && !concatenated_results_.empty()) {
197
+
198
+ if (options_.report.sort_results) {
199
+ sort_results(concatenated_results_);
200
+ }
201
+
202
+ std::cout << "\n\n";
203
+ std::cout << "=============================\n\n";
204
+ std::cout << "CSV Results:\n\n";
205
+
206
+ print_csv_header_(std::cout) << std::endl;
207
+
208
+ for (auto const &result : concatenated_results_) {
209
+ print_result_csv_(std::cout, result) << "\n";
210
+ }
211
+ }
212
+ else if (output_file_.is_open() && options_.report.verbose) {
213
+ std::cout << "\nWrote results to '" << op_file_name_ << "'" << std::endl;
214
+ }
215
+
216
+ if (output_file_.is_open()) {
217
+ output_file_.close();
218
+ }
219
+
220
+ if (junit_output_file_.is_open()) {
221
+ print_junit_footer_(junit_output_file_);
222
+ junit_output_file_.close();
223
+ std::cout << "\nWrote jUnit results to '" << op_junit_file_name_ << "'" << std::endl;
224
+ }
225
+ }
226
+
227
+ static const char *disposition_status_color(Disposition disposition) {
228
+ switch (disposition) {
229
+ case Disposition::kPassed: return SHELL_COLOR_GREEN();
230
+ case Disposition::kIncorrect: return SHELL_COLOR_RED();
231
+ case Disposition::kFailed: return SHELL_COLOR_RED();
232
+ default:
233
+ break;
234
+ }
235
+ return SHELL_COLOR_END();
236
+ }
237
+
238
+ /// Prints the result in human readable form
239
+ std::ostream & PerformanceReport::print_result_pretty_(
240
+ std::ostream &out,
241
+ PerformanceResult const &result,
242
+ bool use_shell_coloring) {
243
+
244
+ out << "=============================\n"
245
+ << " Problem ID: " << result.problem_index << "\n";
246
+
247
+ if (!options_.report.pivot_tags.empty()) {
248
+
249
+ out << " Tags: ";
250
+
251
+ int column_idx = 0;
252
+ for (auto const & tag : options_.report.pivot_tags) {
253
+ out << (column_idx++ ? "," : "") << tag.first << ":" << tag.second;
254
+ }
255
+
256
+ out << "\n";
257
+ }
258
+
259
+ std::string shell_color_bright = use_shell_coloring ? SHELL_COLOR_BRIGHT() : "";
260
+ std::string shell_color_end = use_shell_coloring ? SHELL_COLOR_END() : "";
261
+ auto _disposition_status_color = [&](Disposition d) -> const char * {
262
+ return use_shell_coloring ? disposition_status_color(d) : "";
263
+ };
264
+
265
+ out
266
+ << "\n"
267
+ << " Provider: " << shell_color_bright << library::to_string(result.provider, true) << shell_color_end << "\n"
268
+ << " OperationKind: " << shell_color_bright << library::to_string(result.op_kind) << shell_color_end << "\n"
269
+ << " Operation: " << result.operation_name << "\n\n"
270
+ << " Status: " << shell_color_bright << library::to_string(result.status, true) << shell_color_end << "\n"
271
+ << " Verification: " << shell_color_bright << (options_.verification.enabled ? "ON":"OFF") << shell_color_end << "\n"
272
+ << " Disposition: " << _disposition_status_color(result.disposition) << to_string(result.disposition, true) << shell_color_end << "\n\n";
273
+
274
+ // Display individual verification results for each verification-provider
275
+ if (options_.verification.enabled) {
276
+
277
+ static int const indent_spaces = 16;
278
+
279
+ for(auto & m : result.verification_map) {
280
+ out << std::right << std::setw(indent_spaces) << library::to_string(m.first, true) << ": " << to_string(m.second, true) << "\n";
281
+ }
282
+ }
283
+
284
+ out
285
+ << "\n Arguments:";
286
+
287
+ int column_idx = 0;
288
+ for (auto const &arg : result.arguments) {
289
+ if (!arg.second.empty()) {
290
+ out << " --" << arg.first << "=" << arg.second;
291
+ column_idx += int(4 + arg.first.size() + arg.second.size());
292
+ if (column_idx > 98) {
293
+ out << " \\\n ";
294
+ column_idx = 0;
295
+ }
296
+ }
297
+ }
298
+ out << "\n\n";
299
+
300
+ out
301
+ << " Bytes: " << result.bytes << " bytes\n"
302
+ << " FLOPs: " << result.flops << " flops\n"
303
+ << " FLOPs/Byte: " << (result.flops / result.bytes) << "\n\n";
304
+
305
+ if (result.good()) {
306
+
307
+ out
308
+ << " Runtime: " << result.runtime << " ms\n"
309
+ << " Memory: " << result.gbytes_per_sec() << " GiB/s\n"
310
+ << "\n Math: " << result.gflops_per_sec() << " GFLOP/s\n";
311
+
312
+ }
313
+
314
+ return out;
315
+ }
316
+
317
+ /// Prints the CSV header
318
+ std::ostream & PerformanceReport::print_csv_header_(
319
+ std::ostream &out) {
320
+
321
+ int column_idx = 0;
322
+
323
+ // Pivot tags
324
+ for (auto const & tag : options_.report.pivot_tags) {
325
+ out << (column_idx++ ? "," : "") << tag.first;
326
+ }
327
+
328
+ out
329
+ << (column_idx ? "," : "") << "Problem,Provider"
330
+ << ",OperationKind,Operation,Disposition,Status";
331
+
332
+ for (auto const &arg_name : argument_names_) {
333
+ out << "," << arg_name;
334
+ }
335
+
336
+ out
337
+ << ",Bytes"
338
+ << ",Flops"
339
+ << ",Flops/Byte"
340
+ << ",Runtime"
341
+ << ",GB/s"
342
+ << ",GFLOPs"
343
+ ;
344
+
345
+ return out;
346
+ }
347
+
348
+ /// Print the result in CSV output
349
+ std::ostream & PerformanceReport::print_result_csv_(
350
+ std::ostream &out,
351
+ PerformanceResult const &result) {
352
+
353
+ int column_idx = 0;
354
+
355
+ // Pivot tags
356
+ for (auto const & tag : options_.report.pivot_tags) {
357
+ out << (column_idx++ ? "," : "") << tag.second;
358
+ }
359
+
360
+ out
361
+ << (column_idx ? "," : "")
362
+ << result.problem_index
363
+ << "," << to_string(result.provider, true)
364
+ << "," << to_string(result.op_kind)
365
+ << "," << result.operation_name
366
+ << "," << to_string(result.disposition)
367
+ << "," << library::to_string(result.status);
368
+
369
+ for (auto const & arg : result.arguments) {
370
+ out << "," << arg.second;
371
+ }
372
+
373
+ out
374
+ << "," << result.bytes
375
+ << "," << result.flops
376
+ << "," << result.flops / result.bytes
377
+ << "," << result.runtime;
378
+
379
+ if (result.good()) {
380
+
381
+ out
382
+ << "," << result.gbytes_per_sec()
383
+ << "," << result.gflops_per_sec()
384
+ ;
385
+
386
+ }
387
+ else {
388
+ out << std::string(2
389
+ , ','
390
+ );
391
+ }
392
+
393
+ return out;
394
+ }
395
+
396
+ std::ostream & PerformanceReport::print_junit_header_(std::ostream &out) {
397
+
398
+ out << "<?xml version=\"1.0\" encoding=\"UTF-8\"?>" << std::endl;
399
+ out << "<testsuite name=\"cutlass_profiler\">" << std::endl;
400
+ return out;
401
+
402
+ }
403
+
404
+ namespace {
405
+
406
+ std::string escape_xml_special_chars(const std::string& src) {
407
+ std::stringstream dst;
408
+ for (char ch : src) {
409
+ switch (ch) {
410
+ case '&': dst << "&amp;"; break;
411
+ case '\'': dst << "&apos;"; break;
412
+ case '"': dst << "&quot;"; break;
413
+ case '<': dst << "&lt;"; break;
414
+ case '>': dst << "&gt;"; break;
415
+ default: dst << ch; break;
416
+ }
417
+ }
418
+ return dst.str();
419
+ }
420
+
421
+ template<typename T>
422
+ std::ostream & print_junit_result_property_(std::ostream & os, const std::string & name, const T & property) {
423
+ return os << " <property name=\"" << name << "\" value=\"" << property << "\" />" << std::endl;
424
+ }
425
+ }
426
+
427
+ std::ostream & PerformanceReport::print_junit_result_(std::ostream &out, PerformanceResult const &result) {
428
+
429
+ out << " " << "<testcase name=\"";
430
+
431
+ std::string delim = "";
432
+
433
+ // Pivot tags
434
+ for (auto const & tag : options_.report.pivot_tags) {
435
+ out << delim << tag.second; delim = "_";
436
+ }
437
+
438
+ out << delim << to_string(result.op_kind); delim = "_";
439
+ out << delim << result.operation_name;
440
+
441
+ for (auto const & arg : result.arguments) {
442
+ out << delim << arg.second;
443
+ }
444
+
445
+ out << "\" ";
446
+
447
+ bool skipped = false, failed = false, error = false;
448
+
449
+ switch (result.disposition) {
450
+ case Disposition::kNotRun:
451
+ case Disposition::kNotSupported:
452
+ skipped = true;
453
+ break;
454
+ case Disposition::kPassed:
455
+ case Disposition::kNotVerified:
456
+ break;
457
+ case Disposition::kFailed:
458
+ case Disposition::kIncorrect:
459
+ failed = true;
460
+ break;
461
+ case Disposition::kInvalidProblem:
462
+ case Disposition::kInvalid:
463
+ error = true;
464
+ break;
465
+ };
466
+
467
+ if (skipped) {
468
+ out << "status=\"notrun\"";
469
+ } else {
470
+ out << "status=\"run\"";
471
+ }
472
+
473
+ out << ">" << std::endl;
474
+
475
+ if (failed) {
476
+ out << " <failure message=\"" << to_string(result.disposition) << "\" />" << std::endl;
477
+ }
478
+
479
+ if (error) {
480
+ out << " <error message=\"" << to_string(result.disposition) << "\" />" << std::endl;
481
+ }
482
+
483
+ out << " <system-out><![CDATA[" << std::endl;
484
+ std::stringstream ss;
485
+ print_result_pretty_(ss, result, false);
486
+ out << escape_xml_special_chars(ss.str()) << std::endl;
487
+ out << " ]]></system-out>" << std::endl;
488
+
489
+ out << " </testcase>" << std::endl;
490
+
491
+ return out;
492
+
493
+ }
494
+
495
+ std::ostream & PerformanceReport::print_junit_footer_(std::ostream &out) {
496
+
497
+ out << "</testsuite>" << std::endl;
498
+ return out;
499
+
500
+ }
501
+
502
+ /////////////////////////////////////////////////////////////////////////////////////////////////
503
+
504
+ } // namespace profiler
505
+ } // namespace cutlass
sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/cutlass/tools/profiler/src/performance_result.cu ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ * SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice, this
9
+ * list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holder nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
+ *
30
+ **************************************************************************************************/
31
+ /* \file
32
+ \brief
33
+ */
34
+
35
+ #pragma once
36
+
37
+ #include <vector>
38
+
39
+ #include "cutlass/cutlass.h"
40
+
41
+ // CUTLASS Profiler includes
42
+ #include "cutlass/profiler/enumerated_types.h"
43
+ #include "cutlass/profiler/performance_result.h"
44
+
45
+ // CUTLASS Library includes
46
+ #include "cutlass/library/library.h"
47
+ #include "cutlass/library/util.h"
48
+
49
+ namespace cutlass {
50
+ namespace profiler {
51
+
52
+ /////////////////////////////////////////////////////////////////////////////////////////////////
53
+
54
+
55
+ /////////////////////////////////////////////////////////////////////////////////////////////////
56
+
57
+ } // namespace profiler
58
+ } // namespace cutlass
59
+
60
+ /////////////////////////////////////////////////////////////////////////////////////////////////
61
+
sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/cutlass/tools/profiler/src/problem_space.cpp ADDED
@@ -0,0 +1,1263 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ * SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice, this
9
+ * list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holder nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
+ *
30
+ **************************************************************************************************/
31
+ /* \file
32
+ \brief
33
+ */
34
+
35
+ #include <string>
36
+ #include <stdexcept>
37
+ #include <sstream>
38
+
39
+ #include "cutlass/library/util.h"
40
+
41
+ #include "cutlass/profiler/problem_space.h"
42
+
43
+ /////////////////////////////////////////////////////////////////////////////////////////////////
44
+
45
+ namespace cutlass {
46
+ namespace profiler {
47
+
48
+ /////////////////////////////////////////////////////////////////////////////////////////////////
49
+
50
+ template <typename T>
51
+ static T lexical_cast(std::string const &str) {
52
+ std::stringstream ss;
53
+ T value;
54
+
55
+ ss << str;
56
+ ss >> value;
57
+
58
+ return value;
59
+ }
60
+
61
+ /////////////////////////////////////////////////////////////////////////////////////////////////
62
+
63
+ std::ostream & KernelArgument::ValueIterator::print(std::ostream &out) const {
64
+ out << "[" << (void *)this << " " << argument->qualified_name() << "] ";
65
+ if (this->null_argument) {
66
+ out << "<null>";
67
+ }
68
+ else {
69
+ out << "<not null>";
70
+ }
71
+ return out;
72
+ }
73
+
74
+ KernelArgument::~KernelArgument() {
75
+
76
+ }
77
+
78
+ //////////////////////////////////////////////////////////////////////////////////////////////////
79
+
80
+ ScalarArgument::ScalarValue::ScalarValue(
81
+ std::string const &value_,
82
+ ScalarArgument const *argument_,
83
+ bool not_null_
84
+ ):
85
+ KernelArgument::Value(argument_, not_null_),
86
+ value(value_) {
87
+
88
+ }
89
+
90
+ std::ostream &ScalarArgument::ScalarValue::print(std::ostream &out) const {
91
+ out << argument->qualified_name() << ": ";
92
+ if (not_null) {
93
+ out << value;
94
+ }
95
+ else {
96
+ out << "<null>";
97
+ }
98
+ return out;
99
+ }
100
+
101
+ ScalarArgument::ScalarValueIterator::ScalarValueIterator(
102
+ ScalarArgument const *argument_
103
+ ):
104
+ KernelArgument::ValueIterator(argument_) {
105
+
106
+ if (argument_) {
107
+ value_it = argument_->values.begin();
108
+ }
109
+ }
110
+
111
+ void ScalarArgument::ScalarValueIterator::operator++() {
112
+ if (this->null_argument) {
113
+ this->null_argument = false;
114
+ }
115
+ else {
116
+ ++value_it;
117
+ }
118
+ }
119
+
120
+ bool ScalarArgument::ScalarValueIterator::operator==(ValueIterator const &it) const {
121
+ if (it.type() != ArgumentTypeID::kScalar) {
122
+ throw std::runtime_error("Cannot compare ScalarValueIterator with iterator of different type");
123
+ }
124
+ auto const & scalar_it = static_cast<ScalarValueIterator const &>(it);
125
+ return value_it == scalar_it.value_it;
126
+ }
127
+
128
+ /// Gets the value pointed to
129
+ std::unique_ptr<KernelArgument::Value> ScalarArgument::ScalarValueIterator::at() const {
130
+ if (this->null_argument) {
131
+ return std::unique_ptr<KernelArgument::Value>(
132
+ new ScalarArgument::ScalarValue(
133
+ std::string(),
134
+ static_cast<ScalarArgument const *>(argument),
135
+ false));
136
+ }
137
+ else {
138
+ return std::unique_ptr<KernelArgument::Value>(
139
+ new ScalarArgument::ScalarValue(
140
+ *value_it,
141
+ static_cast<ScalarArgument const *>(argument)));
142
+ }
143
+ }
144
+
145
+ std::unique_ptr<KernelArgument::ValueIterator> ScalarArgument::begin() const {
146
+ return std::unique_ptr<KernelArgument::ValueIterator>(new ScalarValueIterator(this));
147
+ }
148
+
149
+ std::unique_ptr<KernelArgument::ValueIterator> ScalarArgument::end() const {
150
+ ScalarValueIterator *it = new ScalarValueIterator(this);
151
+ it->value_it = this->values.end();
152
+ it->null_argument = false;
153
+ return std::unique_ptr<ValueIterator>(it);
154
+ }
155
+
156
+ //////////////////////////////////////////////////////////////////////////////////////////////////
157
+
158
+ IntegerArgument::IntegerValue::IntegerValue(
159
+ int64_t value_,
160
+ IntegerArgument const *argument_,
161
+ bool not_null_
162
+ ): KernelArgument::Value(argument_, not_null_), value(value_) {
163
+
164
+ }
165
+
166
+
167
+ /// Pretty printer for debugging
168
+ std::ostream &IntegerArgument::IntegerValue::print(std::ostream &out) const {
169
+ out << argument->qualified_name() << ": ";
170
+ if (not_null) {
171
+ out << value;
172
+ }
173
+ else {
174
+ out << "<null>";
175
+ }
176
+ return out;
177
+ }
178
+
179
+ IntegerArgument::IntegerValueIterator::IntegerValueIterator(IntegerArgument const *argument_):
180
+ KernelArgument::ValueIterator(argument_) {
181
+
182
+ if (argument_) {
183
+ range_it = argument_->ranges.begin();
184
+ if (range_it != argument_->ranges.end()) {
185
+ value_it = range_it->begin();
186
+ }
187
+ }
188
+ }
189
+
190
+ void IntegerArgument::IntegerValueIterator::operator++() {
191
+
192
+ if (this->null_argument) {
193
+ this->null_argument = false;
194
+ }
195
+ else {
196
+ ++value_it;
197
+ if (value_it == range_it->end()) {
198
+ ++range_it;
199
+ if (range_it != static_cast<IntegerArgument const *>(argument)->ranges.end()) {
200
+ value_it = range_it->begin();
201
+ }
202
+ }
203
+ }
204
+ }
205
+
206
+ bool IntegerArgument::IntegerValueIterator::operator==(ValueIterator const &it) const {
207
+ if (it.type() != ArgumentTypeID::kInteger) {
208
+ throw std::runtime_error("Cannot compare IntegerValueIterator with iterator of different type");
209
+ }
210
+
211
+ auto const & integer_iterator = static_cast<IntegerValueIterator const &>(it);
212
+
213
+ if (this->null_argument) {
214
+ return it.null_argument;
215
+ }
216
+ else {
217
+ if (range_it != integer_iterator.range_it) {
218
+ return false;
219
+ }
220
+ if (range_it == static_cast<IntegerArgument const *>(argument)->ranges.end() &&
221
+ range_it == integer_iterator.range_it) {
222
+ return true;
223
+ }
224
+ return value_it == integer_iterator.value_it;
225
+ }
226
+ }
227
+
228
+ std::unique_ptr<KernelArgument::Value> IntegerArgument::IntegerValueIterator::at() const {
229
+ if (this->null_argument) {
230
+ return std::unique_ptr<KernelArgument::Value>(
231
+ new IntegerArgument::IntegerValue(
232
+ 0, static_cast<IntegerArgument const *>(argument), false));
233
+ }
234
+ else {
235
+ return std::unique_ptr<KernelArgument::Value>(
236
+ new IntegerArgument::IntegerValue(
237
+ *value_it, static_cast<IntegerArgument const *>(argument)));
238
+ }
239
+ }
240
+
241
+ std::unique_ptr<KernelArgument::ValueIterator> IntegerArgument::begin() const {
242
+ return std::unique_ptr<KernelArgument::ValueIterator>(new IntegerValueIterator(this));
243
+ }
244
+
245
+ std::unique_ptr<KernelArgument::ValueIterator> IntegerArgument::end() const {
246
+ IntegerValueIterator *it = new IntegerValueIterator(this);
247
+ it->range_it = this->ranges.end();
248
+ it->null_argument = false;
249
+ return std::unique_ptr<ValueIterator>(it);
250
+ }
251
+
252
+ //////////////////////////////////////////////////////////////////////////////////////////////////
253
+
254
+ TensorArgument::TensorValue::TensorValue(
255
+ TensorDescription const &desc_,
256
+ TensorArgument const *argument_,
257
+ bool not_null_
258
+ ):
259
+ KernelArgument::Value(argument_, not_null_),
260
+ desc(desc_) {
261
+
262
+ }
263
+
264
+ /// Pretty printer for debugging
265
+ std::ostream &TensorArgument::TensorValue::print(std::ostream &out) const {
266
+ out << argument->qualified_name() << ": " << to_string(desc.element) << ": " << to_string(desc.layout);
267
+ return out;
268
+ }
269
+
270
+ TensorArgument::TensorValueIterator::TensorValueIterator(
271
+ TensorArgument const *argument_
272
+ ):
273
+ KernelArgument::ValueIterator(argument_) {
274
+
275
+ if (argument_) {
276
+ value_it = argument_->values.begin();
277
+ }
278
+ }
279
+
280
+ void TensorArgument::TensorValueIterator::operator++() {
281
+ if (this->null_argument) {
282
+ this->null_argument = false;
283
+ }
284
+ else {
285
+ ++value_it;
286
+ }
287
+ }
288
+
289
+ bool TensorArgument::TensorValueIterator::operator==(ValueIterator const &it) const {
290
+ if (it.type() != ArgumentTypeID::kTensor) {
291
+ throw std::runtime_error("Cannot compare TensorValueIterator with iterator of different type");
292
+ }
293
+ auto const & tensor_it = static_cast<TensorValueIterator const &>(it);
294
+ return value_it == tensor_it.value_it;
295
+ }
296
+
297
+ /// Gets the value pointed to
298
+ std::unique_ptr<KernelArgument::Value> TensorArgument::TensorValueIterator::at() const {
299
+
300
+ if (this->null_argument) {
301
+ return std::unique_ptr<KernelArgument::Value>(
302
+ new TensorArgument::TensorValue(
303
+ TensorDescription(), static_cast<TensorArgument const *>(argument), false));
304
+ }
305
+ else {
306
+ return std::unique_ptr<KernelArgument::Value>(
307
+ new TensorArgument::TensorValue(
308
+ *value_it, static_cast<TensorArgument const *>(argument)));
309
+ }
310
+ }
311
+
312
+ std::unique_ptr<KernelArgument::ValueIterator> TensorArgument::begin() const {
313
+ return std::unique_ptr<KernelArgument::ValueIterator>(new TensorValueIterator(this));
314
+ }
315
+
316
+ std::unique_ptr<KernelArgument::ValueIterator> TensorArgument::end() const {
317
+ TensorValueIterator *it = new TensorValueIterator(this);
318
+ it->value_it = this->values.end();
319
+ it->null_argument = false;
320
+ return std::unique_ptr<ValueIterator>(it);
321
+ }
322
+
323
+ //////////////////////////////////////////////////////////////////////////////////////////////////
324
+
325
+ EnumeratedTypeArgument::EnumeratedTypeValue::EnumeratedTypeValue(
326
+ std::string const & element_,
327
+ EnumeratedTypeArgument const *argument_,
328
+ bool not_null_
329
+ ):
330
+ KernelArgument::Value(argument_, not_null_),
331
+ element(element_) {
332
+
333
+ }
334
+
335
+ /// Pretty printer for debugging
336
+ std::ostream &EnumeratedTypeArgument::EnumeratedTypeValue::print(std::ostream &out) const {
337
+ out << argument->qualified_name() << ": " << element;
338
+ return out;
339
+ }
340
+
341
+ EnumeratedTypeArgument::EnumeratedTypeValueIterator::EnumeratedTypeValueIterator(
342
+ EnumeratedTypeArgument const *argument_
343
+ ):
344
+ KernelArgument::ValueIterator(argument_) {
345
+
346
+ if (argument_) {
347
+ value_it = argument_->values.begin();
348
+ }
349
+ }
350
+
351
+ void EnumeratedTypeArgument::EnumeratedTypeValueIterator::operator++() {
352
+ if (this->null_argument) {
353
+ this->null_argument = false;
354
+ }
355
+ else {
356
+ ++value_it;
357
+ }
358
+ }
359
+
360
+ bool EnumeratedTypeArgument::EnumeratedTypeValueIterator::operator==(ValueIterator const &it) const {
361
+
362
+ if (it.type() != ArgumentTypeID::kEnumerated) {
363
+ throw std::runtime_error("Cannot compare EnumeratedTypeValueIterator with iterator of different type");
364
+ }
365
+
366
+ auto const & enumerated_type_it = static_cast<EnumeratedTypeValueIterator const &>(it);
367
+ return value_it == enumerated_type_it.value_it;
368
+ }
369
+
370
+ /// Gets the value pointed to
371
+ std::unique_ptr<KernelArgument::Value> EnumeratedTypeArgument::EnumeratedTypeValueIterator::at() const {
372
+
373
+ if (this->null_argument) {
374
+ return std::unique_ptr<KernelArgument::Value>(
375
+ new EnumeratedTypeValue(
376
+ std::string(), static_cast<EnumeratedTypeArgument const *>(argument), false));
377
+ }
378
+ else {
379
+ return std::unique_ptr<KernelArgument::Value>(
380
+ new EnumeratedTypeValue(
381
+ *value_it, static_cast<EnumeratedTypeArgument const *>(argument)));
382
+ }
383
+ }
384
+
385
+ std::unique_ptr<KernelArgument::ValueIterator> EnumeratedTypeArgument::begin() const {
386
+ return std::unique_ptr<KernelArgument::ValueIterator>(new EnumeratedTypeValueIterator(this));
387
+ }
388
+
389
+ std::unique_ptr<KernelArgument::ValueIterator> EnumeratedTypeArgument::end() const {
390
+ EnumeratedTypeValueIterator *it = new EnumeratedTypeValueIterator(this);
391
+ it->value_it = this->values.end();
392
+ it->null_argument = false;
393
+ return std::unique_ptr<ValueIterator>(it);
394
+ }
395
+
396
+ //////////////////////////////////////////////////////////////////////////////////////////////////
397
+
398
+ ProblemSpace::Iterator::Iterator(ProblemSpace const &problem_space) {
399
+ for (auto const & arg_ptr : problem_space.arguments) {
400
+ construct_(arg_ptr.get());
401
+ }
402
+ }
403
+
404
+ ProblemSpace::Iterator::Iterator(Iterator && it) {
405
+ iterators = std::move(it.iterators);
406
+ }
407
+
408
+ /// Helper for recursively constructing iterators
409
+ void ProblemSpace::Iterator::construct_(KernelArgument const *argument) {
410
+ iterators.emplace_back(argument->begin());
411
+ }
412
+
413
+ /// Given a set of ranges, iterate over the points within their Cartesian product. No big deal.
414
+ void ProblemSpace::Iterator::operator++() {
415
+
416
+ // Define a pair of iterator into the vector of iterators.
417
+ IteratorVector::iterator iterator_it = iterators.begin();
418
+ IteratorVector::iterator next_iterator = iterator_it;
419
+
420
+ // Advance the first argument.
421
+ ++(**iterator_it);
422
+
423
+ // Maintain a pair of iterators over consecutive arguments.
424
+ ++next_iterator;
425
+
426
+ // Carry logic
427
+ while (next_iterator != iterators.end() &&
428
+ **iterator_it == *((*iterator_it)->argument->end())) { // Did an iterator reach the end of its range?
429
+
430
+ (*iterator_it) = (*iterator_it)->argument->begin(); // Reset that iterator,
431
+
432
+ ++(**next_iterator); // and increment the next argument's iterator.
433
+
434
+ iterator_it = next_iterator; // Advance to the next argument
435
+ ++next_iterator;
436
+ }
437
+ }
438
+
439
+ /// Moves iterator to end
440
+ void ProblemSpace::Iterator::move_to_end() {
441
+ if (!iterators.empty()) {
442
+ std::unique_ptr<KernelArgument::ValueIterator> new_iter = iterators.back()->argument->end();
443
+ std::swap(iterators.back(), new_iter);
444
+ }
445
+ }
446
+
447
+ ProblemSpace::Problem ProblemSpace::Iterator::at() const {
448
+ Problem problem;
449
+
450
+ for (std::unique_ptr<KernelArgument::ValueIterator> const & it : iterators) {
451
+ problem.emplace_back(it->at());
452
+ }
453
+
454
+ return problem;
455
+ }
456
+
457
+ /// Equality operator
458
+ bool ProblemSpace::Iterator::operator==(Iterator const &it) const {
459
+
460
+ // This would be an opportunity for auto, but explicitly denoting references to
461
+ // owning smart pointers to dynamic polymorphic objects seems like a kindness to the reader.
462
+ IteratorVector::const_iterator first_it = iterators.begin();
463
+ IteratorVector::const_iterator second_it = it.iterators.begin();
464
+
465
+ int idx = 0;
466
+ for (; first_it != iterators.end(); ++first_it, ++second_it, ++idx) {
467
+
468
+ KernelArgument::ValueIterator const *my_it = first_it->get();
469
+ KernelArgument::ValueIterator const *their_it = second_it->get();
470
+
471
+ if (*my_it != *their_it) {
472
+ return false;
473
+ }
474
+ }
475
+
476
+ return true;
477
+ }
478
+
479
+ std::ostream &ProblemSpace::Iterator::print(std::ostream &out) const {
480
+
481
+ for (std::unique_ptr<KernelArgument::ValueIterator> const & iter_ptr : iterators) {
482
+ out << " [iter " << (iter_ptr->null_argument ? "null" : "<not null>")
483
+ << ", type: " << to_string(iter_ptr->argument->description->type) << "]" << std::endl;
484
+ }
485
+
486
+ return out;
487
+ }
488
+
489
+ /////////////////////////////////////////////////////////////////////////////////////////////////
490
+
491
+ ProblemSpace::ProblemSpace(ArgumentDescriptionVector const &schema, CommandLine const &cmdline) {
492
+
493
+ // Clone the arguments
494
+ for (ArgumentDescription const & arg_desc : schema) {
495
+ clone_(arguments, &arg_desc);
496
+ }
497
+
498
+ // Parse values from the command line
499
+ for (auto & arg : arguments) {
500
+ parse_(arg.get(), cmdline);
501
+ }
502
+ }
503
+
504
+
505
+ /// Returns the index of an argument by name
506
+ size_t ProblemSpace::argument_index(char const *name) const {
507
+ return argument_index_map.at(name);
508
+ }
509
+
510
+ /// Helper for recursively cloning
511
+ void ProblemSpace::clone_(
512
+ KernelArgumentVector &kernel_args,
513
+ ArgumentDescription const *arg_desc) {
514
+
515
+ KernelArgument *kernel_arg = nullptr;
516
+
517
+ switch (arg_desc->type) {
518
+ case ArgumentTypeID::kScalar:
519
+ kernel_arg = new ScalarArgument(arg_desc);
520
+ break;
521
+ case ArgumentTypeID::kInteger:
522
+ kernel_arg = new IntegerArgument(arg_desc);
523
+ break;
524
+ case ArgumentTypeID::kTensor:
525
+ kernel_arg = new TensorArgument(arg_desc);
526
+ break;
527
+ case ArgumentTypeID::kStructure:
528
+ {
529
+ throw std::runtime_error("ArgumentTypeID::kStructure not supported");
530
+ }
531
+ break;
532
+ case ArgumentTypeID::kEnumerated:
533
+ kernel_arg = new EnumeratedTypeArgument(arg_desc);
534
+ break;
535
+
536
+ default: break;
537
+ }
538
+
539
+ if (kernel_arg) {
540
+ size_t idx = kernel_args.size();
541
+ for (auto const &alias : arg_desc->aliases) {
542
+ argument_index_map.insert(std::make_pair(alias, idx));
543
+ }
544
+ kernel_args.emplace_back(kernel_arg);
545
+ }
546
+ }
547
+
548
+ /// Parses a command line
549
+ void ProblemSpace::parse_(KernelArgument *arg, CommandLine const &cmdline) {
550
+
551
+ switch (arg->description->type) {
552
+ case ArgumentTypeID::kScalar:
553
+ {
554
+ auto * scalar = static_cast<ScalarArgument *>(arg);
555
+
556
+ for (auto const &alias : arg->description->aliases) {
557
+ if (cmdline.check_cmd_line_flag(alias.c_str())) {
558
+
559
+ std::vector<std::vector<std::string>> tokens;
560
+ cmdline.get_cmd_line_argument_ranges(alias.c_str(), tokens);
561
+
562
+ for (auto const & vec : tokens) {
563
+ if (!vec.empty()) {
564
+ scalar->values.push_back(vec.front());
565
+ }
566
+ }
567
+ break;
568
+ }
569
+ }
570
+ }
571
+ break;
572
+ case ArgumentTypeID::kInteger:
573
+ {
574
+ auto *integer = static_cast<IntegerArgument *>(arg);
575
+
576
+ for (auto const &alias : arg->description->aliases) {
577
+ if (cmdline.check_cmd_line_flag(alias.c_str())) {
578
+
579
+ std::vector<std::vector<std::string> > tokens;
580
+ cmdline.get_cmd_line_argument_ranges(alias.c_str(), tokens);
581
+
582
+ for (auto &range_tokens : tokens) {
583
+
584
+ if (!range_tokens.empty()) {
585
+
586
+ Range range;
587
+
588
+ if (range_tokens.front() == "rand") {
589
+ range.mode = Range::Mode::kRandom;
590
+ }
591
+ else if (range_tokens.front() == "randlg2") {
592
+ range.mode = Range::Mode::kRandomLog2;
593
+ }
594
+
595
+ switch (range.mode) {
596
+ case Range::Mode::kSequence:
597
+ {
598
+ range.first = lexical_cast<int64_t>(range_tokens.front());
599
+
600
+ if (range_tokens.size() > 1) {
601
+ range.last = lexical_cast<int64_t>(range_tokens.at(1));
602
+ }
603
+ else {
604
+ range.last = range.first;
605
+ }
606
+
607
+ if (range_tokens.size() > 2) {
608
+ range.increment = lexical_cast<int64_t>(range_tokens.at(2));
609
+ }
610
+ else {
611
+ range.increment = 1;
612
+ }
613
+ }
614
+ break;
615
+ case Range::Mode::kRandom: // fall-through
616
+ case Range::Mode::kRandomLog2:
617
+ {
618
+ if (range_tokens.size() < 4) {
619
+ throw std::runtime_error(
620
+ "Range of mode 'rand' must have four tokens showing "
621
+ "the minimum, maximum, and number of iterations. For example, "
622
+ "rand:16:128:1000");
623
+ }
624
+
625
+ range.minimum = lexical_cast<int64_t>(range_tokens.at(1));
626
+ range.maximum = lexical_cast<int64_t>(range_tokens.at(2));
627
+ range.first = 1;
628
+ range.last = lexical_cast<int64_t>(range_tokens.at(3));
629
+ range.increment = 1;
630
+
631
+ if (range_tokens.size() > 4) {
632
+ range.divisible = lexical_cast<int64_t>(range_tokens.at(4));
633
+ }
634
+ }
635
+ break;
636
+ default:
637
+ throw std::runtime_error("Unsupported range mode.");
638
+ break;
639
+ }
640
+
641
+ integer->ranges.push_back(range);
642
+ }
643
+ }
644
+ break;
645
+ }
646
+ }
647
+ }
648
+ break;
649
+ case ArgumentTypeID::kTensor:
650
+ {
651
+ auto *tensor = static_cast<TensorArgument *>(arg);
652
+
653
+ for (auto const &alias : arg->description->aliases) {
654
+ if (cmdline.check_cmd_line_flag(alias.c_str())) {
655
+
656
+ std::vector<std::vector<std::string>> tokens;
657
+
658
+ cmdline.get_cmd_line_argument_ranges(alias.c_str(), tokens);
659
+
660
+ for (auto const & tensor_tokens : tokens) {
661
+ if (!tensor_tokens.empty()) {
662
+ TensorArgument::TensorDescription tensor_desc;
663
+
664
+ tensor_desc.element = cutlass::library::from_string<library::NumericTypeID>(tensor_tokens.front());
665
+
666
+ // Layout
667
+ if (tensor_tokens.size() > 1) {
668
+ tensor_desc.layout = cutlass::library::from_string<library::LayoutTypeID>(tensor_tokens.at(1));
669
+ }
670
+
671
+ // Stride
672
+ for (size_t i = 2; i < tensor_tokens.size(); ++i) {
673
+ tensor_desc.stride.push_back(lexical_cast<int>(tensor_tokens.at(i)));
674
+ }
675
+
676
+ tensor->values.push_back(tensor_desc);
677
+ }
678
+ }
679
+ break;
680
+ }
681
+ }
682
+ }
683
+ break;
684
+ case ArgumentTypeID::kStructure:
685
+ {
686
+ throw std::runtime_error("Structure arguments not supported");
687
+ }
688
+ break;
689
+ case ArgumentTypeID::kEnumerated:
690
+ {
691
+ auto *enumerated_type = static_cast<EnumeratedTypeArgument *>(arg);
692
+
693
+ for (auto const &alias : arg->description->aliases) {
694
+ if (cmdline.check_cmd_line_flag(alias.c_str())) {
695
+
696
+ std::vector<std::string> tokens;
697
+ cmdline.get_cmd_line_arguments(alias.c_str(), tokens);
698
+
699
+ for (auto const & token : tokens) {
700
+ enumerated_type->values.push_back(token);
701
+ }
702
+
703
+ break;
704
+ }
705
+ }
706
+ }
707
+ break;
708
+ default:
709
+ break;
710
+ }
711
+ }
712
+
713
+ /////////////////////////////////////////////////////////////////////////////////////////////////
714
+
715
+ ProblemSpace::Iterator ProblemSpace::begin() const {
716
+ return ProblemSpace::Iterator(*this);
717
+ }
718
+
719
+ ProblemSpace::Iterator ProblemSpace::end() const {
720
+ ProblemSpace::Iterator it(*this);
721
+ it.move_to_end();
722
+ return it;
723
+ }
724
+
725
+ /// Gets all argument names as an ordered vector
726
+ std::vector<std::string> ProblemSpace::argument_names() const {
727
+
728
+ Problem problem = this->begin().at();
729
+
730
+ std::vector<std::string> names;
731
+ names.reserve(problem.size());
732
+
733
+ for (auto const & arg : problem) {
734
+ names.push_back(arg->argument->description->aliases.front());
735
+ }
736
+
737
+ return names;
738
+ }
739
+
740
+ /////////////////////////////////////////////////////////////////////////////////////////////////
741
+
742
+ /// Lexically casts an argument to an int64 if it is defined. Returns true if not null.
743
+ bool arg_as_int(int64_t &int_value, KernelArgument::Value const *value_ptr) {
744
+ if (value_ptr->not_null) {
745
+ if (value_ptr->argument->description->type == ArgumentTypeID::kInteger) {
746
+ int_value = static_cast<IntegerArgument::IntegerValue const *>(value_ptr)->value;
747
+ }
748
+ else if (value_ptr->argument->description->type == ArgumentTypeID::kScalar) {
749
+ std::stringstream ss;
750
+ ss << static_cast<ScalarArgument::ScalarValue const *>(value_ptr)->value;
751
+ ss >> int_value;
752
+ }
753
+ else {
754
+ throw std::runtime_error(
755
+ "arg_as_int64_t() - illegal cast. Problem space argument must be integer or scalar");
756
+ }
757
+
758
+ return true;
759
+ }
760
+
761
+ return false;
762
+ }
763
+
764
+ /// Lexically casts an argument to an int64 if it is defined. Returns true if not null.
765
+ bool arg_as_int(int &int_value, KernelArgument::Value const *value_ptr) {
766
+ int64_t value64;
767
+ bool obtained = arg_as_int(value64, value_ptr);
768
+ if (obtained) {
769
+ int_value = int(value64);
770
+ return true;
771
+ }
772
+ return false;
773
+ }
774
+
775
+ /// Lexically casts an argument to an int
776
+ bool arg_as_int(
777
+ int &int_value,
778
+ char const *name,
779
+ ProblemSpace const &problem_space,
780
+ ProblemSpace::Problem const &problem) {
781
+
782
+ size_t idx = problem_space.argument_index(name);
783
+ KernelArgument::Value const *value_ptr = problem.at(idx).get();
784
+
785
+ return arg_as_int(int_value, value_ptr);
786
+ }
787
+
788
+ /// Lexically casts an argument to an int64
789
+ bool arg_as_int(
790
+ int64_t &int_value,
791
+ char const *name,
792
+ ProblemSpace const &problem_space,
793
+ ProblemSpace::Problem const &problem) {
794
+
795
+ size_t idx = problem_space.argument_index(name);
796
+ KernelArgument::Value const *value_ptr = problem.at(idx).get();
797
+
798
+ return arg_as_int(int_value, value_ptr);
799
+ }
800
+
801
+ /////////////////////////////////////////////////////////////////////////////////////////////////
802
+
803
+ /// Lexically casts an argument to an int64 if it is defined. Returns true if not null.
804
+ bool arg_as_NumericTypeID(
805
+ library::NumericTypeID &numeric_type,
806
+ KernelArgument::Value const *value_ptr) {
807
+
808
+ if (value_ptr->not_null) {
809
+ if (value_ptr->argument->description->type == ArgumentTypeID::kEnumerated) {
810
+
811
+ numeric_type = library::from_string<library::NumericTypeID>(
812
+ static_cast<EnumeratedTypeArgument::EnumeratedTypeValue const *>(value_ptr)->element);
813
+
814
+ if (numeric_type == library::NumericTypeID::kInvalid) {
815
+ throw std::runtime_error(
816
+ "arg_as_NumericTypeID() - illegal cast.");
817
+ }
818
+ }
819
+ else {
820
+
821
+ throw std::runtime_error(
822
+ "arg_as_NumericTypeID() - illegal cast.");
823
+ }
824
+ return true;
825
+ }
826
+ return false;
827
+ }
828
+
829
+ /// Lexically casts an argument to an int64 if it is defined. Returns true if not null.
830
+ bool arg_as_NumericTypeID(
831
+ library::NumericTypeID &numeric_type,
832
+ char const *name,
833
+ ProblemSpace const &problem_space,
834
+ ProblemSpace::Problem const &problem) {
835
+
836
+ size_t idx = problem_space.argument_index(name);
837
+ KernelArgument::Value const *value_ptr = problem.at(idx).get();
838
+
839
+ return arg_as_NumericTypeID(numeric_type, value_ptr);
840
+ }
841
+
842
+ /////////////////////////////////////////////////////////////////////////////////////////////////
843
+
844
+ /// Lexically casts an argument to an int64 if it is defined. Returns true if not null.
845
+ bool arg_as_RasterOrder(
846
+ library::RasterOrder &raster_order,
847
+ KernelArgument::Value const *value_ptr) {
848
+
849
+ if (value_ptr->not_null) {
850
+ if (value_ptr->argument->description->type == ArgumentTypeID::kEnumerated) {
851
+
852
+ raster_order = library::from_string<library::RasterOrder>(
853
+ static_cast<EnumeratedTypeArgument::EnumeratedTypeValue const *>(value_ptr)->element);
854
+
855
+ if (raster_order == library::RasterOrder::kInvalid) {
856
+ throw std::runtime_error(
857
+ "arg_as_RasterOrder() - illegal cast.");
858
+ }
859
+ }
860
+ else {
861
+ throw std::runtime_error(
862
+ "arg_as_RasterOrder() - illegal cast.");
863
+ }
864
+ return true;
865
+ }
866
+ return false;
867
+ }
868
+
869
+ /// Lexically casts an argument to an int64 if it is defined. Returns true if not null.
870
+ bool arg_as_RasterOrder(
871
+ library::RasterOrder &raster_order,
872
+ char const *name,
873
+ ProblemSpace const &problem_space,
874
+ ProblemSpace::Problem const &problem) {
875
+
876
+ size_t idx = problem_space.argument_index(name);
877
+ KernelArgument::Value const *value_ptr = problem.at(idx).get();
878
+
879
+ return arg_as_RasterOrder(raster_order, value_ptr);
880
+ }
881
+
882
+ /////////////////////////////////////////////////////////////////////////////////////////////////
883
+
884
+ /// Lexically casts an argument to an int64 if it is defined. Returns true if not null.
885
+ bool arg_as_LayoutTypeID(
886
+ library::LayoutTypeID &layout_type,
887
+ KernelArgument::Value const *value_ptr) {
888
+
889
+ if (value_ptr->not_null) {
890
+ if (value_ptr->argument->description->type == ArgumentTypeID::kEnumerated) {
891
+
892
+ layout_type = library::from_string<library::LayoutTypeID>(
893
+ static_cast<EnumeratedTypeArgument::EnumeratedTypeValue const *>(value_ptr)->element);
894
+
895
+ if (layout_type == library::LayoutTypeID::kInvalid) {
896
+ throw std::runtime_error(
897
+ "arg_as_LayoutTypeID() - illegal cast.");
898
+ }
899
+ }
900
+ else {
901
+
902
+ throw std::runtime_error(
903
+ "arg_as_LayoutTypeID() - illegal cast.");
904
+ }
905
+ return true;
906
+ }
907
+ return false;
908
+ }
909
+
910
+ /// Lexically casts an argument to an int64 if it is defined. Returns true if not null.
911
+ bool arg_as_LayoutTypeID(
912
+ library::LayoutTypeID &layout_type,
913
+ char const *name,
914
+ ProblemSpace const &problem_space,
915
+ ProblemSpace::Problem const &problem) {
916
+
917
+ size_t idx = problem_space.argument_index(name);
918
+ KernelArgument::Value const *value_ptr = problem.at(idx).get();
919
+
920
+ return arg_as_LayoutTypeID(layout_type, value_ptr);
921
+ }
922
+
923
+ /////////////////////////////////////////////////////////////////////////////////////////////////
924
+
925
+ /// Lexically casts an argument to an int64 if it is defined. Returns true if not null.
926
+ bool arg_as_OpcodeClassID(
927
+ library::OpcodeClassID &opcode_class,
928
+ KernelArgument::Value const *value_ptr) {
929
+
930
+ if (value_ptr->not_null) {
931
+ if (value_ptr->argument->description->type == ArgumentTypeID::kEnumerated) {
932
+
933
+ opcode_class = library::from_string<library::OpcodeClassID>(
934
+ static_cast<EnumeratedTypeArgument::EnumeratedTypeValue const *>(value_ptr)->element);
935
+
936
+ if (opcode_class == library::OpcodeClassID::kInvalid) {
937
+ throw std::runtime_error(
938
+ "arg_as_OpcodeClassID() - illegal cast.");
939
+ }
940
+ }
941
+ else {
942
+
943
+ throw std::runtime_error(
944
+ "arg_as_OpcodeClassID() - illegal cast.");
945
+ }
946
+ return true;
947
+ }
948
+ return false;
949
+ }
950
+
951
+ /// Lexically casts an argument to an int64 if it is defined. Returns true if not null.
952
+ bool arg_as_OpcodeClassID(
953
+ library::OpcodeClassID &opcode_class,
954
+ char const *name,
955
+ ProblemSpace const &problem_space,
956
+ ProblemSpace::Problem const &problem) {
957
+
958
+ size_t idx = problem_space.argument_index(name);
959
+ KernelArgument::Value const *value_ptr = problem.at(idx).get();
960
+
961
+ return arg_as_OpcodeClassID(opcode_class, value_ptr);
962
+ }
963
+
964
+
965
+ /// Lexically casts an argument to an int64 if it is defined. Returns true if not null.
966
+ bool arg_as_SplitKModeID(
967
+ library::SplitKMode &split_k_mode,
968
+ KernelArgument::Value const *value_ptr) {
969
+
970
+ if (value_ptr->not_null) {
971
+ if (value_ptr->argument->description->type == ArgumentTypeID::kEnumerated) {
972
+
973
+ split_k_mode = library::from_string<library::SplitKMode>(
974
+ static_cast<EnumeratedTypeArgument::EnumeratedTypeValue const *>(value_ptr)->element);
975
+
976
+ if (split_k_mode == library::SplitKMode::kInvalid) {
977
+ throw std::runtime_error(
978
+ "arg_as_SplitKModeID() - illegal cast.");
979
+ }
980
+ }
981
+ else {
982
+
983
+ throw std::runtime_error(
984
+ "arg_as_SplitKModeID() - illegal cast.");
985
+ }
986
+ return true;
987
+ }
988
+ return false;
989
+ }
990
+
991
+ /// Lexically casts an argument to an int64 if it is defined. Returns true if not null.
992
+ bool arg_as_SplitKModeID(
993
+ library::SplitKMode &split_k_mode,
994
+ char const *name,
995
+ ProblemSpace const &problem_space,
996
+ ProblemSpace::Problem const &problem) {
997
+
998
+ size_t idx = problem_space.argument_index(name);
999
+ KernelArgument::Value const *value_ptr = problem.at(idx).get();
1000
+
1001
+ return arg_as_SplitKModeID(split_k_mode, value_ptr);
1002
+ }
1003
+
1004
+
1005
+ /////////////////////////////////////////////////////////////////////////////////////////////////
1006
+ /// Lexically casts an argument to an int64 if it is defined. Returns true if not null.
1007
+ bool arg_as_ConvModeID(
1008
+ library::ConvModeID &conv_mode,
1009
+ KernelArgument::Value const *value_ptr) {
1010
+
1011
+ if (value_ptr->not_null) {
1012
+ if (value_ptr->argument->description->type == ArgumentTypeID::kEnumerated) {
1013
+
1014
+ conv_mode = library::from_string<library::ConvModeID>(
1015
+ static_cast<EnumeratedTypeArgument::EnumeratedTypeValue const *>(value_ptr)->element);
1016
+
1017
+ if (conv_mode == library::ConvModeID::kInvalid) {
1018
+ throw std::runtime_error(
1019
+ "arg_as_ConvModeID() - illegal cast.");
1020
+ }
1021
+ }
1022
+ else {
1023
+
1024
+ throw std::runtime_error(
1025
+ "arg_as_ConvModeID() - illegal cast.");
1026
+ }
1027
+ return true;
1028
+ }
1029
+ return false;
1030
+ }
1031
+
1032
+ /// Lexically casts an argument to an int64 if it is defined. Returns true if not null.
1033
+ bool arg_as_ConvModeID(
1034
+ library::ConvModeID &conv_mode,
1035
+ char const *name,
1036
+ ProblemSpace const &problem_space,
1037
+ ProblemSpace::Problem const &problem) {
1038
+
1039
+ size_t idx = problem_space.argument_index(name);
1040
+ KernelArgument::Value const *value_ptr = problem.at(idx).get();
1041
+
1042
+ return arg_as_ConvModeID(conv_mode, value_ptr);
1043
+ }
1044
+
1045
+ /// Lexically casts an argument to an int64 if it is defined. Returns true if not null.
1046
+ bool arg_as_ProviderID(
1047
+ library::Provider &provider,
1048
+ KernelArgument::Value const *value_ptr) {
1049
+
1050
+ if (value_ptr->not_null) {
1051
+ if (value_ptr->argument->description->type == ArgumentTypeID::kEnumerated) {
1052
+
1053
+ provider = library::from_string<library::Provider>(
1054
+ static_cast<EnumeratedTypeArgument::EnumeratedTypeValue const *>(value_ptr)->element);
1055
+
1056
+ if (provider == library::Provider::kInvalid) {
1057
+ throw std::runtime_error(
1058
+ "arg_as_ProviderID() - illegal cast.");
1059
+ }
1060
+ }
1061
+ else {
1062
+
1063
+ throw std::runtime_error(
1064
+ "arg_as_ProviderID() - illegal cast.");
1065
+ }
1066
+ return true;
1067
+ }
1068
+ return false;
1069
+ }
1070
+
1071
+ /// Lexically casts an argument to an int64 if it is defined. Returns true if not null.
1072
+ bool arg_as_ProviderID(
1073
+ library::Provider &provider,
1074
+ char const *name,
1075
+ ProblemSpace const &problem_space,
1076
+ ProblemSpace::Problem const &problem) {
1077
+
1078
+ size_t idx = problem_space.argument_index(name);
1079
+ KernelArgument::Value const *value_ptr = problem.at(idx).get();
1080
+
1081
+ return arg_as_ProviderID(provider, value_ptr);
1082
+ }
1083
+ /////////////////////////////////////////////////////////////////////////////////////////////////
1084
+
1085
+ /// Lexically casts an argument to a given type stored in a byte array. Returns true if not null.
1086
+ bool arg_as_scalar(
1087
+ std::vector<uint8_t> &bytes,
1088
+ library::NumericTypeID numeric_type,
1089
+ KernelArgument::Value const *value_ptr) {
1090
+
1091
+ if (value_ptr->not_null) {
1092
+ if (value_ptr->argument->description->type == ArgumentTypeID::kInteger) {
1093
+ int64_t int_value = static_cast<IntegerArgument::IntegerValue const *>(value_ptr)->value;
1094
+
1095
+ // TODO - convert int64_t => destination type
1096
+ }
1097
+ else if (value_ptr->argument->description->type == ArgumentTypeID::kScalar) {
1098
+ std::string const &str_value = static_cast<ScalarArgument::ScalarValue const *>(value_ptr)->value;
1099
+
1100
+ return lexical_cast(bytes, numeric_type, str_value);
1101
+ }
1102
+ else {
1103
+ throw std::runtime_error(
1104
+ "arg_as_int() - illegal cast. Problem space argument must be integer or scalar");
1105
+ }
1106
+
1107
+ return true;
1108
+ }
1109
+
1110
+ return false;
1111
+ }
1112
+
1113
+ /// Lexically casts an argument to a given type and returns a byte array
1114
+ bool arg_as_scalar(
1115
+ std::vector<uint8_t> &bytes,
1116
+ library::NumericTypeID numeric_type,
1117
+ char const *name,
1118
+ ProblemSpace const &problem_space,
1119
+ ProblemSpace::Problem const &problem) {
1120
+
1121
+ size_t idx = problem_space.argument_index(name);
1122
+ KernelArgument::Value const *value_ptr = problem.at(idx).get();
1123
+
1124
+ return arg_as_scalar(bytes, numeric_type, value_ptr);
1125
+ }
1126
+
1127
+ /////////////////////////////////////////////////////////////////////////////////////////////////
1128
+
1129
+ /// Returns true if a tensor description satisfies a `tensor` value
1130
+ bool tensor_description_satisfies(
1131
+ library::TensorDescription const &tensor_desc,
1132
+ TensorArgument::TensorValue const *value_ptr) {
1133
+
1134
+ if (value_ptr->not_null) {
1135
+ if (value_ptr->desc.element != library::NumericTypeID::kUnknown &&
1136
+ value_ptr->desc.element != tensor_desc.element) {
1137
+
1138
+ return false;
1139
+ }
1140
+
1141
+ if (value_ptr->desc.layout != library::LayoutTypeID::kUnknown &&
1142
+ value_ptr->desc.layout != tensor_desc.layout) {
1143
+
1144
+ return false;
1145
+ }
1146
+ }
1147
+
1148
+ return true;
1149
+ }
1150
+
1151
+ /// Returns true if a tensor description satisfies a `tensor` value
1152
+ bool tensor_description_satisfies(
1153
+ library::TensorDescription const &tensor_desc,
1154
+ char const *name,
1155
+ ProblemSpace const &problem_space,
1156
+ ProblemSpace::Problem const &problem) {
1157
+
1158
+ size_t idx = problem_space.argument_index(name);
1159
+ KernelArgument::Value const *value_ptr = problem.at(idx).get();
1160
+
1161
+ if (value_ptr->argument->description->type == ArgumentTypeID::kTensor) {
1162
+ return tensor_description_satisfies(
1163
+ tensor_desc,
1164
+ static_cast<TensorArgument::TensorValue const *>(value_ptr));
1165
+ }
1166
+ else {
1167
+ throw std::runtime_error("Kernel argument mismatch");
1168
+ }
1169
+
1170
+ return false;
1171
+ }
1172
+
1173
+ /////////////////////////////////////////////////////////////////////////////////////////////////
1174
+
1175
+ /// Returns true if conv_kind satisfies the value
1176
+ bool conv_kind_satisfies(
1177
+ library::ConvKind const &conv_kind,
1178
+ EnumeratedTypeArgument::EnumeratedTypeValue const *value_ptr) {
1179
+
1180
+ if (value_ptr->not_null) {
1181
+ library::ConvKind conv_kind_cmd_line =
1182
+ library::from_string<library::ConvKind>(value_ptr->element);
1183
+
1184
+ if (conv_kind_cmd_line != library::ConvKind::kUnknown &&
1185
+ conv_kind_cmd_line != conv_kind) {
1186
+
1187
+ return false;
1188
+ }
1189
+ }
1190
+
1191
+ return true;
1192
+ }
1193
+
1194
+ /// Returns true if conv_kind satisfies the value
1195
+ bool conv_kind_satisfies(
1196
+ library::ConvKind const &conv_kind,
1197
+ char const *name,
1198
+ ProblemSpace const &problem_space,
1199
+ ProblemSpace::Problem const &problem) {
1200
+
1201
+ size_t idx = problem_space.argument_index(name);
1202
+ KernelArgument::Value const *value_ptr = problem.at(idx).get();
1203
+
1204
+ if (value_ptr->argument->description->type == ArgumentTypeID::kEnumerated) {
1205
+ return conv_kind_satisfies(
1206
+ conv_kind,
1207
+ static_cast<EnumeratedTypeArgument::EnumeratedTypeValue const *>(value_ptr));
1208
+ }
1209
+ else {
1210
+ throw std::runtime_error("Kernel argument mismatch");
1211
+ }
1212
+
1213
+ return false;
1214
+ }
1215
+
1216
+ /////////////////////////////////////////////////////////////////////////////////////////////////
1217
+
1218
+ /// Returns true if a iterator algorithm satisfies the value
1219
+ bool iterator_algorithm_satisfies(
1220
+ library::IteratorAlgorithmID const &iterator_algorithm,
1221
+ EnumeratedTypeArgument::EnumeratedTypeValue const *value_ptr) {
1222
+
1223
+ if (value_ptr->not_null) {
1224
+ library::IteratorAlgorithmID iterator_algorithm_cmd_line =
1225
+ library::from_string<library::IteratorAlgorithmID>(value_ptr->element);
1226
+
1227
+ if (iterator_algorithm_cmd_line != library::IteratorAlgorithmID::kNone &&
1228
+ iterator_algorithm_cmd_line != iterator_algorithm) {
1229
+
1230
+ return false;
1231
+ }
1232
+ }
1233
+
1234
+ return true;
1235
+ }
1236
+
1237
+ /// Returns true if a iterator algorithm satisfies the value
1238
+ bool iterator_algorithm_satisfies(
1239
+ library::IteratorAlgorithmID const &iterator_algorithm,
1240
+ char const *name,
1241
+ ProblemSpace const &problem_space,
1242
+ ProblemSpace::Problem const &problem) {
1243
+
1244
+ size_t idx = problem_space.argument_index(name);
1245
+ KernelArgument::Value const *value_ptr = problem.at(idx).get();
1246
+
1247
+ if (value_ptr->argument->description->type == ArgumentTypeID::kEnumerated) {
1248
+ return iterator_algorithm_satisfies(
1249
+ iterator_algorithm,
1250
+ static_cast<EnumeratedTypeArgument::EnumeratedTypeValue const *>(value_ptr));
1251
+ }
1252
+ else {
1253
+ throw std::runtime_error("Kernel argument mismatch");
1254
+ }
1255
+
1256
+ return false;
1257
+ }
1258
+
1259
+ /////////////////////////////////////////////////////////////////////////////////////////////////
1260
+ } // namespace profiler
1261
+ } // namespace cutlass
1262
+
1263
+ /////////////////////////////////////////////////////////////////////////////////////////////////
sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/cutlass/tools/profiler/src/rank_2k_operation_profiler.cu ADDED
@@ -0,0 +1,752 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ * SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice, this
9
+ * list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holder nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
+ *
30
+ **************************************************************************************************/
31
+ /* \file
32
+ \brief Execution environment
33
+
34
+
35
+ */
36
+
37
+ #include <iostream>
38
+ #include <stdexcept>
39
+ #include <iomanip>
40
+ #include <ios>
41
+
42
+ #include "cutlass/core_io.h"
43
+
44
+ #include "cutlass/profiler/cublas_helpers.h"
45
+ #include "cutlass/profiler/rank_2k_operation_profiler.h"
46
+ #include "cutlass/profiler/gpu_timer.h"
47
+
48
+ /////////////////////////////////////////////////////////////////////////////////////////////////
49
+
50
+ namespace cutlass {
51
+ namespace profiler {
52
+
53
+
54
+ /////////////////////////////////////////////////////////////////////////////////////////////////
55
+
56
+ /// Ctor
57
+ Rank2KOperationProfiler::Rank2KOperationProfiler(Options const &options):
58
+ OperationProfiler(
59
+ options,
60
+ library::OperationKind::kRank2K,
61
+ {
62
+ {ArgumentTypeID::kEnumerated, {"rank_k_kind"}, "Variant of RankK (universal)"},
63
+ {ArgumentTypeID::kInteger, {"n", "problem-size::n"}, "N dimension of the RankK problem space"},
64
+ {ArgumentTypeID::kInteger, {"k", "problem-size::k"}, "K dimension of the RankK problem space"},
65
+ {ArgumentTypeID::kTensor, {"A"}, "Tensor storing the A operand"},
66
+ {ArgumentTypeID::kTensor, {"B"}, "Tensor storing the B operand"},
67
+ {ArgumentTypeID::kTensor, {"C"}, "Tensor storing the C operand"},
68
+ {ArgumentTypeID::kEnumerated, {"fill_mode"}, "Fill Mode for RankK kernel (lower or upper)"},
69
+ {ArgumentTypeID::kEnumerated, {"blas_mode"}, "Blas Mode for RankK kernel (symmetric or hermitian)"},
70
+ {ArgumentTypeID::kScalar, {"alpha", "epilogue::alpha"}, "Epilogue scalar alpha"},
71
+ {ArgumentTypeID::kScalar, {"beta", "epilogue::beta"}, "Epilogue scalar beta"},
72
+ {ArgumentTypeID::kInteger, {"split_k_slices", "split-k-slices"}, "Number of partitions of K dimension"},
73
+ {ArgumentTypeID::kInteger, {"batch_count", "batch-count"}, "Number of RankK computed in one batch"},
74
+ },
75
+ { library::Provider::kCUBLAS}
76
+ ) {
77
+ description_ = " Rank 2k Update. D = alpha * (A*B^T + B*A^T) + beta * C (symmetric) or D = alpha * (A*B^H+B*A^H) + beta * C (hermitian)";
78
+ }
79
+
80
+ /// Destructor
81
+ Rank2KOperationProfiler::~Rank2KOperationProfiler() {
82
+
83
+ }
84
+
85
+ /// Prints usage statement for the math function
86
+ void Rank2KOperationProfiler::print_usage(std::ostream &out) const {
87
+ out << "RankK" << "\n\n";
88
+
89
+ OperationProfiler::print_usage(out);
90
+ }
91
+
92
+ /// Prints examples
93
+ void Rank2KOperationProfiler::print_examples(std::ostream &out) const {
94
+
95
+ out << "\nExamples:\n\n"
96
+ << "Profile a particular problem size Syrk kernel:\n"
97
+ << " $ cutlass_profiler --operation=rank_2k --blas_mode=symmetric --n=1024 --k=128\n\n"
98
+
99
+ << "Profile a particular problem size Herk kernel:\n"
100
+ << " $ cutlass_profiler --operation=rank_2k --blas_mode=hermitian --n=1024 --k=128\n\n"
101
+
102
+ << "Schmoo over problem size and beta:\n"
103
+ << " $ cutlass_profiler --operation=rank_2k --n=1024:4096:256 --k=128:8192:128 --beta=0,1,2.5\n\n"
104
+
105
+ << "Schmoo over accumulator types:\n"
106
+ << " $ cutlass_profiler --operation=rank_2k --accumulator-type=f16,f32\n\n"
107
+
108
+ << "Schmoo over fill modees:\n"
109
+ << " $ cutlass_profiler --operation=rank_2k --fill_mode=lower/upper\n\n"
110
+
111
+ << "Run when A is f16 with column-major or A is any datatype with row-major (For column major, use column, col, or n. For row major use, row or t):\n"
112
+ << " $ cutlass_profiler --operation=rank_2k --A=f16:column or --A=*:row\n\n"
113
+
114
+ << "Using various input value distribution:\n"
115
+ << " $ cutlass_profiler --operation=rank_2k --dist=uniform,min:0,max:3\n"
116
+ << " $ cutlass_profiler --operation=rank_2k --dist=gaussian,mean:0,stddev:3\n"
117
+ << " $ cutlass_profiler --operation=rank_2k --dist=sequential,start:0,delta:1\n\n"
118
+
119
+ << "Run a kernel with cta tile size of 256x128x32 and save workspace if results are incorrect (note that --cta-tile::k=32 is default cta-tile size):\n"
120
+ << " $ cutlass_profiler --operation=rank_2k --cta_m=256 --cta_n=128 --cta_k=32 --save-workspace=incorrect\n\n"
121
+
122
+ << "Test your changes to rank_2k kernels with a quick functional test and save results in functional-test.csv:\n"
123
+ << " $ cutlass_profiler --operation=rank_2k \\ \n"
124
+ << " --n=8,56,120,136,256,264,512,520,1024,1032,4096,8192,16384 \\ \n"
125
+ << " --k=8,16,32,64,128,256,288,384,504,512,520 \\ \n"
126
+ << " --beta=0,1,2 --profiling-iterations=1 \\ \n"
127
+ << " --providers=cutlass --output=functional-test.csv\n\n";
128
+ }
129
+
130
+ /////////////////////////////////////////////////////////////////////////////////////////////////
131
+
132
+ #if 0
133
+ // used this for debugging
134
+ static std::string byte_string(std::vector<uint8_t> const &bytes) {
135
+ std::stringstream ss;
136
+
137
+ ss << "0x";
138
+
139
+ for (size_t idx = bytes.size(); idx > 0; --idx) {
140
+ ss << std::hex << std::setw(2) << std::setfill('0') << uint32_t(bytes.at(idx - 1));
141
+ }
142
+
143
+ return ss.str();
144
+ }
145
+ #endif
146
+
147
+ Status Rank2KOperationProfiler::RankKProblem::parse(
148
+ library::RankKDescription const &operation_desc,
149
+ ProblemSpace const &problem_space,
150
+ ProblemSpace::Problem const &problem) {
151
+
152
+ if (!arg_as_int(this->n, "n", problem_space, problem)) {
153
+ // default value
154
+ this->n = 1024;
155
+ }
156
+
157
+ if (!arg_as_int(this->k, "k", problem_space, problem)) {
158
+ // default value
159
+ this->k = 1024;
160
+ }
161
+
162
+ if (!arg_as_int(this->split_k_slices, "split_k_slices", problem_space, problem)) {
163
+ // default value
164
+ this->split_k_slices = 1;
165
+ }
166
+
167
+ if (!arg_as_int(this->batch_count, "batch_count", problem_space, problem)) {
168
+ // default value
169
+ this->batch_count = 1;
170
+ }
171
+
172
+ if (this->split_k_slices > 1 && this->batch_count > 1) {
173
+ // At least one of these must be one
174
+ return Status::kErrorInvalidProblem;
175
+ }
176
+
177
+ if (!tensor_description_satisfies(operation_desc.A, "A", problem_space, problem)) {
178
+ return Status::kErrorInvalidProblem;
179
+ }
180
+
181
+ if (!tensor_description_satisfies(operation_desc.B, "B", problem_space, problem)) {
182
+ return Status::kErrorInvalidProblem;
183
+ }
184
+
185
+ if (!tensor_description_satisfies(operation_desc.C, "C", problem_space, problem)) {
186
+ return Status::kErrorInvalidProblem;
187
+ }
188
+
189
+ if (!arg_as_scalar(
190
+ this->alpha,
191
+ operation_desc.element_epilogue,
192
+ "alpha",
193
+ problem_space,
194
+ problem)) {
195
+
196
+ if (!cast_from_double(this->alpha, operation_desc.element_epilogue, 1)) {
197
+ return Status::kErrorInternal;
198
+ }
199
+ }
200
+
201
+ if (!arg_as_scalar(
202
+ this->beta,
203
+ operation_desc.element_epilogue,
204
+ "beta",
205
+ problem_space,
206
+ problem)) {
207
+
208
+ if (!cast_from_double(this->beta, operation_desc.element_epilogue, 0)) {
209
+ return Status::kErrorInternal;
210
+ }
211
+ }
212
+
213
+ this->lda = DeviceAllocation::get_packed_layout(
214
+ operation_desc.A.layout, {int(this->n), int(this->k)}).front();
215
+
216
+ this->ldb = DeviceAllocation::get_packed_layout(
217
+ operation_desc.B.layout, {int(this->n), int(this->k)}).front();
218
+
219
+ this->ldc = DeviceAllocation::get_packed_layout(
220
+ operation_desc.C.layout, {int(this->n), int(this->n)}).front();
221
+
222
+ return Status::kSuccess;
223
+ }
224
+
225
+ /// Total number of bytes loaded
226
+ int64_t Rank2KOperationProfiler::RankKProblem::bytes(library::RankKDescription const &operation_desc) const {
227
+ // Input bytes read and Output bytes written for the gemm problem
228
+ int64_t bytes =
229
+ 2 * int64_t(library::sizeof_bits(operation_desc.A.element) * n / 8) * k +
230
+ 2 * int64_t(library::sizeof_bits(operation_desc.B.element) * n / 8) * k +
231
+ // Half matrix including the diagonal will have (N*(N+1))/2 elements
232
+ int64_t(library::sizeof_bits(operation_desc.C.element) * n / 8) * (n+1) / 2;
233
+
234
+ // Set is_beta_zero true if beta is zero
235
+ bool is_beta_zero = std::all_of(beta.begin(), beta.end(), [](uint8_t i) { return i==0; });
236
+
237
+ // Output bytes read for the gemm problem for non-zero beta values
238
+ if (!is_beta_zero) {
239
+ bytes += int64_t(library::sizeof_bits(operation_desc.C.element) * n / 8) * (n+1) / 2;
240
+ }
241
+
242
+ bytes *= batch_count;
243
+
244
+ return bytes;
245
+ }
246
+
247
+ /// Total number of flops computed
248
+ int64_t Rank2KOperationProfiler::RankKProblem::flops(library::RankKDescription const &operation_desc) const {
249
+
250
+ // FLOPs = 2 * n(n+1)k/2 [mma1] + 2 * n(n+1)k/2 [mma2] + 2 * n(n+1)/2 [epilogue]
251
+ // FLOPs = n(n+1)(2k + 1)
252
+ int64_t flops_ = n * (n + 1) * (2*k + 1);
253
+
254
+ // complex-valued support
255
+ switch (operation_desc.tile_description.math_instruction.math_operation) {
256
+ case library::MathOperationID::kMultiplyAddComplex:
257
+ flops_ *= 4;
258
+ break;
259
+
260
+ case library::MathOperationID::kMultiplyAddComplexFastF32:
261
+ flops_ *= 4;
262
+ break;
263
+
264
+ case library::MathOperationID::kMultiplyAddGaussianComplex:
265
+ flops_ *= 3;
266
+ break;
267
+
268
+ default: break;
269
+ }
270
+
271
+ return flops_;
272
+ }
273
+
274
+ /// Initializes a performance result
275
+ void Rank2KOperationProfiler::RankKProblem::initialize_result(
276
+ PerformanceResult &result,
277
+ library::RankKDescription const &operation_desc,
278
+ ProblemSpace const &problem_space) {
279
+
280
+ result.arguments.resize(problem_space.rank());
281
+
282
+ set_argument(result, "rank_k_kind", problem_space, library::to_string(operation_desc.rank_k_kind));
283
+
284
+ set_argument(result, "A", problem_space,
285
+ std::string(library::to_string(operation_desc.A.element)) + ":" + library::to_string(operation_desc.A.layout));
286
+
287
+ set_argument(result, "B", problem_space,
288
+ std::string(library::to_string(operation_desc.B.element)) + ":" + library::to_string(operation_desc.B.layout));
289
+
290
+ set_argument(result, "C", problem_space,
291
+ std::string(library::to_string(operation_desc.C.element)) + ":" + library::to_string(operation_desc.C.layout));
292
+
293
+ set_argument(result, "fill_mode", problem_space, library::to_string(operation_desc.fill_mode));
294
+
295
+ set_argument(result, "blas_mode", problem_space, library::to_string(operation_desc.blas_mode));
296
+
297
+ set_argument(result, "n", problem_space, n);
298
+ set_argument(result, "k", problem_space, k);
299
+
300
+ set_argument(result, "split_k_slices", problem_space, split_k_slices);
301
+ set_argument(result, "batch_count", problem_space, batch_count);
302
+
303
+ set_argument(result, "alpha", problem_space,
304
+ library::lexical_cast(alpha, operation_desc.element_epilogue));
305
+
306
+ set_argument(result, "beta", problem_space,
307
+ library::lexical_cast(beta, operation_desc.element_epilogue));
308
+ }
309
+
310
+ /////////////////////////////////////////////////////////////////////////////////////////////////
311
+
312
+ /// Extracts the problem dimensions
313
+ Status Rank2KOperationProfiler::initialize_configuration(
314
+ Options const &options,
315
+ PerformanceReport &report,
316
+ DeviceContext &device_context,
317
+ library::Operation const *operation,
318
+ ProblemSpace const &problem_space,
319
+ ProblemSpace::Problem const &problem) {
320
+
321
+ library::RankKDescription const &operation_desc =
322
+ static_cast<library::RankKDescription const &>(operation->description());
323
+
324
+ if (operation_desc.rank_k_kind != library::RankKKind::kUniversal) {
325
+ return Status::kErrorInvalidProblem;
326
+ }
327
+
328
+ Status status = problem_.parse(operation_desc, problem_space, problem);
329
+
330
+ if (status != Status::kSuccess) {
331
+ return status;
332
+ }
333
+
334
+ rank_k_workspace_.configuration.problem_size.m() = int(problem_.n);
335
+ rank_k_workspace_.configuration.problem_size.n() = int(problem_.n);
336
+ rank_k_workspace_.configuration.problem_size.k() = int(problem_.k);
337
+ rank_k_workspace_.configuration.lda = problem_.lda;
338
+ rank_k_workspace_.configuration.ldb = problem_.ldb;
339
+ rank_k_workspace_.configuration.ldc = problem_.ldc;
340
+ rank_k_workspace_.configuration.ldd = problem_.ldc;
341
+ //rank_k_workspace_.configuration.split_k_slices = int(problem_.split_k_slices);
342
+ rank_k_workspace_.configuration.batch_count = int(problem_.split_k_slices);
343
+
344
+ rank_k_workspace_.arguments.A = nullptr;
345
+ rank_k_workspace_.arguments.B = nullptr;
346
+ rank_k_workspace_.arguments.C = nullptr;
347
+ rank_k_workspace_.arguments.D = nullptr;
348
+ rank_k_workspace_.arguments.alpha = problem_.alpha.data();
349
+ rank_k_workspace_.arguments.beta = problem_.beta.data();
350
+ rank_k_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
351
+
352
+ initialize_result_(this->model_result_, options, operation_desc, problem_space);
353
+
354
+ return operation->can_implement(&rank_k_workspace_.configuration, &rank_k_workspace_.arguments);
355
+ }
356
+
357
+ /// Initializes the performance result
358
+ void Rank2KOperationProfiler::initialize_result_(
359
+ PerformanceResult &result,
360
+ Options const &options,
361
+ library::RankKDescription const &operation_desc,
362
+ ProblemSpace const &problem_space) {
363
+
364
+ result.provider = library::Provider::kCUTLASS;
365
+ result.disposition = Disposition::kNotRun;
366
+ result.status = Status::kSuccess;
367
+ result.operation_name = operation_desc.name;
368
+
369
+ problem_.initialize_result(result, operation_desc, problem_space);
370
+
371
+ OperationProfiler::initialize_result_(result, operation_desc, problem_space);
372
+
373
+
374
+ result.bytes = problem_.bytes(operation_desc);
375
+ result.flops = problem_.flops(operation_desc);
376
+ result.runtime = 0;
377
+
378
+
379
+ }
380
+
381
+ /// Initializes workspace
382
+ Status Rank2KOperationProfiler::initialize_workspace(
383
+ Options const &options,
384
+ PerformanceReport &report,
385
+ DeviceContext &device_context,
386
+ library::Operation const *operation,
387
+ ProblemSpace const &problem_space,
388
+ ProblemSpace::Problem const &problem) {
389
+
390
+ if (options.device.devices.size() != 1) {
391
+ throw std::runtime_error("This operation profiler only supports a single "
392
+ "device.");
393
+ }
394
+
395
+ cudaError_t result;
396
+ result = cudaSetDevice(options.device.device_id(0));
397
+ if (result != cudaSuccess) {
398
+ throw std::runtime_error("cudaSetDevice() failed.");
399
+ }
400
+
401
+ library::RankKDescription const &operation_desc =
402
+ static_cast<library::RankKDescription const &>(operation->description());
403
+
404
+ if (options.execution_mode != ExecutionMode::kDryRun) {
405
+ int seed_shift = 0;
406
+ rank_k_workspace_.A = device_context.allocate_and_initialize_tensor(
407
+ options,
408
+ "A",
409
+ operation_desc.A.element,
410
+ operation_desc.A.layout,
411
+ {int(problem_.n), int(problem_.k)},
412
+ {int(problem_.lda)},
413
+ 1, // batch_count
414
+ seed_shift++,
415
+ 0 // device_index
416
+ );
417
+
418
+ rank_k_workspace_.B = device_context.allocate_and_initialize_tensor(
419
+ options,
420
+ "B",
421
+ operation_desc.B.element,
422
+ operation_desc.B.layout,
423
+ {int(problem_.n), int(problem_.k)},
424
+ {int(problem_.ldb)},
425
+ 1, // batch_count
426
+ seed_shift++,
427
+ 0 // device_index
428
+ );
429
+
430
+ rank_k_workspace_.C = device_context.allocate_and_initialize_tensor(
431
+ options,
432
+ "C",
433
+ operation_desc.C.element,
434
+ operation_desc.C.layout,
435
+ {int(problem_.n), int(problem_.n)},
436
+ {int(problem_.ldc)},
437
+ 1, // batch_count
438
+ seed_shift++,
439
+ 0 // device_index
440
+ );
441
+
442
+ rank_k_workspace_.Computed = device_context.allocate_tensor(
443
+ options,
444
+ "D",
445
+ operation_desc.C.element,
446
+ operation_desc.C.layout,
447
+ {int(problem_.n), int(problem_.n)},
448
+ {int(problem_.ldc)},
449
+ 1, // batch_count
450
+ 0 // device_index
451
+ );
452
+
453
+ rank_k_workspace_.Reference = device_context.allocate_tensor(
454
+ options,
455
+ "Reference",
456
+ operation_desc.C.element,
457
+ operation_desc.C.layout,
458
+ {int(problem_.n), int(problem_.n)},
459
+ {int(problem_.ldc)},
460
+ 1, // batch_count
461
+ 0 // device_index
462
+ );
463
+
464
+ rank_k_workspace_.Computed->copy_from_device(rank_k_workspace_.C->data());
465
+ rank_k_workspace_.Reference->copy_from_device(rank_k_workspace_.C->data());
466
+ }
467
+
468
+
469
+ //
470
+ // Initialize the CUTLASS operation
471
+ //
472
+ Status status = Status::kSuccess;
473
+
474
+ if (options.profiling.provider_enabled(library::Provider::kCUTLASS)) {
475
+
476
+ if (options.execution_mode != ExecutionMode::kDryRun) {
477
+
478
+ uint64_t workspace_size = operation->get_host_workspace_size(&rank_k_workspace_.configuration);
479
+ rank_k_workspace_.host_workspace.resize(workspace_size, 0);
480
+
481
+ workspace_size = operation->get_device_workspace_size(&rank_k_workspace_.configuration);
482
+ rank_k_workspace_.device_workspace.reset(library::NumericTypeID::kU8, workspace_size);
483
+
484
+ status = operation->initialize(
485
+ &rank_k_workspace_.configuration,
486
+ rank_k_workspace_.host_workspace.data(),
487
+ rank_k_workspace_.device_workspace.data());
488
+ }
489
+
490
+ //
491
+ // If CUTLASS is enabled, generate a result for it
492
+ //
493
+ results_.push_back(model_result_);
494
+ results_.back().provider = library::Provider::kCUTLASS;
495
+ results_.back().op_kind = library::OperationKind::kRank2K;
496
+ results_.back().disposition = Disposition::kNotRun;
497
+
498
+ for(auto provider : verification_providers_) {
499
+ results_.back().verification_map[provider] = Disposition::kNotRun;
500
+ }
501
+ }
502
+
503
+ return status;
504
+ }
505
+
506
+ /////////////////////////////////////////////////////////////////////////////////////////////////
507
+
508
+ /// Verifies CUTLASS against references
509
+ bool Rank2KOperationProfiler::verify_cutlass(
510
+ Options const &options,
511
+ PerformanceReport &report,
512
+ DeviceContext &device_context,
513
+ library::Operation const *operation,
514
+ ProblemSpace const &problem_space,
515
+ ProblemSpace::Problem const &problem) {
516
+
517
+ if (!options.profiling.provider_enabled(library::Provider::kCUTLASS)) {
518
+ return true;
519
+ }
520
+
521
+ if (options.execution_mode == ExecutionMode::kDryRun) {
522
+ return true;
523
+ }
524
+
525
+ // Initialize structure containing RankK arguments
526
+ rank_k_workspace_.arguments.A = rank_k_workspace_.A->data();
527
+ rank_k_workspace_.arguments.B = rank_k_workspace_.B->data();
528
+ rank_k_workspace_.arguments.C = rank_k_workspace_.C->data();
529
+ rank_k_workspace_.arguments.D = rank_k_workspace_.Computed->data();
530
+ rank_k_workspace_.arguments.alpha = problem_.alpha.data();
531
+ rank_k_workspace_.arguments.beta = problem_.beta.data();
532
+ rank_k_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
533
+
534
+ //
535
+ // Run the CUTLASS operation
536
+ //
537
+
538
+ results_.back().status = operation->run(
539
+ &rank_k_workspace_.arguments,
540
+ rank_k_workspace_.host_workspace.data(),
541
+ rank_k_workspace_.device_workspace.data());
542
+
543
+ if (results_.back().status != Status::kSuccess) {
544
+ results_.back().disposition = Disposition::kFailed;
545
+ return false;
546
+ }
547
+
548
+ cudaError_t result = cudaDeviceSynchronize();
549
+ if (result != cudaSuccess) {
550
+ results_.back().disposition = Disposition::kFailed;
551
+ return false;
552
+ }
553
+
554
+ // CUTLASS op ran the but not yet verified against any verification provider
555
+ results_.back().disposition = Disposition::kNotVerified;
556
+
557
+ //
558
+ // Run verification providers
559
+ //
560
+
561
+ if (options.verification.enabled) {
562
+
563
+ #if CUTLASS_ENABLE_CUBLAS
564
+ if (options.verification.provider_enabled(library::Provider::kCUBLAS)) {
565
+
566
+ // Guard against unsupported cases
567
+ auto const & rank_k_desc = static_cast<library::RankKDescription const &>(operation->description());
568
+
569
+ if (cublas_satisfies(rank_k_desc) == Status::kSuccess) {
570
+
571
+ // call cublas verification if supported
572
+ verify_with_cublas_(
573
+ options,
574
+ report,
575
+ device_context,
576
+ operation,
577
+ problem_space,
578
+ problem);
579
+ }
580
+
581
+ else {
582
+ // set verification map for cublas to not supported
583
+ results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kNotSupported;
584
+ }
585
+ }
586
+ #endif // #if CUTLASS_ENABLE_CUBLAS
587
+
588
+ // Update disposition to worst case verification outcome among all
589
+ // verification providers which are supported
590
+ bool is_any_verification_run_passed = false;
591
+ for(auto &m : results_.back().verification_map) {
592
+ if(m.second == Disposition::kFailed || m.second == Disposition::kIncorrect) {
593
+ results_.back().disposition = m.second;
594
+ return true;
595
+ }
596
+ if(!is_any_verification_run_passed && m.second == Disposition::kPassed) {
597
+ is_any_verification_run_passed = true;
598
+ }
599
+ }
600
+
601
+ if(is_any_verification_run_passed) {
602
+ results_.back().disposition = Disposition::kPassed;
603
+ }
604
+ }
605
+
606
+ // Return true means continue profiling
607
+ return true;
608
+ }
609
+
610
+ ///////////////////////////////////////////////////////////////////////////////////////////////////
611
+
612
+ /// Verifies CUTLASS against references
613
+ bool Rank2KOperationProfiler::verify_with_cublas_(
614
+ Options const &options,
615
+ PerformanceReport &report,
616
+ DeviceContext &device_context,
617
+ library::Operation const *operation,
618
+ ProblemSpace const &problem_space,
619
+ ProblemSpace::Problem const &problem) {
620
+
621
+
622
+ #if CUTLASS_ENABLE_CUBLAS
623
+
624
+ library::RankKDescription const &rank_k_desc =
625
+ static_cast<library::RankKDescription const &>(operation->description());
626
+
627
+ //
628
+ // Construct cuBLAS operators
629
+ //
630
+
631
+ CublasCreate handle;
632
+ cublasStatus_t status = handle.get_cublas_create_status();
633
+
634
+ if (status != CUBLAS_STATUS_SUCCESS) {
635
+
636
+ results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kFailed;
637
+ return true;
638
+ }
639
+
640
+ //
641
+ // Initialize state
642
+ //
643
+
644
+ try {
645
+
646
+ //
647
+ // Construct dispatcher to cublas<t>Syr2k()
648
+ //
649
+
650
+ // Initialize structure containing RankK arguments
651
+ rank_k_workspace_.arguments.A = rank_k_workspace_.A->data();
652
+ rank_k_workspace_.arguments.B = rank_k_workspace_.B->data();
653
+ rank_k_workspace_.arguments.C = rank_k_workspace_.Reference->data();
654
+ rank_k_workspace_.arguments.D = rank_k_workspace_.Reference->data();
655
+ rank_k_workspace_.arguments.alpha = problem_.alpha.data();
656
+ rank_k_workspace_.arguments.beta = problem_.beta.data();
657
+ rank_k_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
658
+
659
+ detail::cublasRankKDispatcher rank_k_op(
660
+ rank_k_desc,
661
+ rank_k_workspace_.configuration,
662
+ rank_k_workspace_.arguments
663
+ );
664
+
665
+ if (rank_k_op.status != Status::kSuccess) {
666
+ results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kNotRun;
667
+ return true;
668
+ }
669
+
670
+ results_.back().status = Status::kSuccess;
671
+
672
+ status = rank_k_op(handle);
673
+
674
+ // Handle errors
675
+ if (status != CUBLAS_STATUS_SUCCESS) {
676
+
677
+ results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kFailed;
678
+ return true;
679
+ }
680
+
681
+ //
682
+ // Verify results
683
+ //
684
+
685
+ results_.back().verification_map[library::Provider::kCUBLAS] = compare_tensors(
686
+ options,
687
+ *rank_k_workspace_.Computed,
688
+ *rank_k_workspace_.Reference
689
+ );
690
+
691
+ // Save workspace if incorrect
692
+ if (options.verification.save_workspace == SaveWorkspace::kIncorrect &&
693
+ results_.back().verification_map[library::Provider::kCUBLAS] == Disposition::kIncorrect) {
694
+
695
+ save_workspace(
696
+ device_context,
697
+ options,
698
+ rank_k_desc,
699
+ library::Provider::kCUTLASS,
700
+ library::Provider::kCUBLAS);
701
+ }
702
+ }
703
+ catch (...) {
704
+ results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kFailed;
705
+ }
706
+
707
+ #endif
708
+
709
+ // Return true means continue profiling
710
+ return true;
711
+ }
712
+
713
+ /////////////////////////////////////////////////////////////////////////////////////////////////
714
+
715
+ /// Measures performance results
716
+ bool Rank2KOperationProfiler::profile(
717
+ Options const &options,
718
+ PerformanceReport &report,
719
+ DeviceContext &device_context,
720
+ library::Operation const *operation,
721
+ ProblemSpace const &problem_space,
722
+ ProblemSpace::Problem const &problem) {
723
+
724
+ if (options.profiling.provider_enabled(library::Provider::kCUTLASS)) {
725
+
726
+ // Initialize structure containing RankK arguments
727
+ rank_k_workspace_.arguments.A = rank_k_workspace_.A->data();
728
+ rank_k_workspace_.arguments.B = rank_k_workspace_.B->data();
729
+ rank_k_workspace_.arguments.C = rank_k_workspace_.C->data();
730
+ rank_k_workspace_.arguments.D = rank_k_workspace_.Computed->data();
731
+ rank_k_workspace_.arguments.alpha = problem_.alpha.data();
732
+ rank_k_workspace_.arguments.beta = problem_.beta.data();
733
+ rank_k_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
734
+
735
+ results_.back().status = profile_cutlass_(
736
+ results_.back().runtime,
737
+ options,
738
+ operation,
739
+ &rank_k_workspace_.arguments,
740
+ rank_k_workspace_.host_workspace.data(),
741
+ rank_k_workspace_.device_workspace.data()
742
+ );
743
+ }
744
+ return true;
745
+ }
746
+
747
+ /////////////////////////////////////////////////////////////////////////////////////////////////
748
+
749
+ } // namespace profiler
750
+ } // namespace cutlass
751
+
752
+ /////////////////////////////////////////////////////////////////////////////////////////////////
sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/cutlass/tools/profiler/src/rank_k_operation_profiler.cu ADDED
@@ -0,0 +1,737 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ * SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice, this
9
+ * list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holder nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
+ *
30
+ **************************************************************************************************/
31
+ /* \file
32
+ \brief Execution environment
33
+
34
+
35
+ */
36
+
37
+ #include <iostream>
38
+ #include <stdexcept>
39
+ #include <iomanip>
40
+ #include <ios>
41
+
42
+ #include "cutlass/core_io.h"
43
+
44
+ #include "cutlass/profiler/cublas_helpers.h"
45
+ #include "cutlass/profiler/rank_k_operation_profiler.h"
46
+ #include "cutlass/profiler/gpu_timer.h"
47
+
48
+ /////////////////////////////////////////////////////////////////////////////////////////////////
49
+
50
+ namespace cutlass {
51
+ namespace profiler {
52
+
53
+
54
+ /////////////////////////////////////////////////////////////////////////////////////////////////
55
+
56
+ /// Ctor
57
+ RankKOperationProfiler::RankKOperationProfiler(Options const &options):
58
+ OperationProfiler(
59
+ options,
60
+ library::OperationKind::kRankK,
61
+ {
62
+ {ArgumentTypeID::kEnumerated, {"rank_k_kind"}, "Variant of RankK (universal)"},
63
+ {ArgumentTypeID::kInteger, {"n", "problem-size::n"}, "N dimension of the RankK problem space"},
64
+ {ArgumentTypeID::kInteger, {"k", "problem-size::k"}, "K dimension of the RankK problem space"},
65
+ {ArgumentTypeID::kTensor, {"A"}, "Tensor storing the A operand"},
66
+ {ArgumentTypeID::kTensor, {"C"}, "Tensor storing the C operand"},
67
+ {ArgumentTypeID::kEnumerated, {"fill_mode"}, "Fill Mode for RankK kernel (lower or upper)"},
68
+ {ArgumentTypeID::kEnumerated, {"blas_mode"}, "Blas Mode for RankK kernel (symmetric or hermitian)"},
69
+ {ArgumentTypeID::kScalar, {"alpha", "epilogue::alpha"}, "Epilogue scalar alpha"},
70
+ {ArgumentTypeID::kScalar, {"beta", "epilogue::beta"}, "Epilogue scalar beta"},
71
+ {ArgumentTypeID::kInteger, {"split_k_slices", "split-k-slices"}, "Number of partitions of K dimension"},
72
+ {ArgumentTypeID::kInteger, {"batch_count", "batch-count"}, "Number of RankK computed in one batch"},
73
+ },
74
+ { library::Provider::kCUBLAS}
75
+ ) {
76
+ description_ = " Rank-k Update. D = alpha * A*A^T + beta * C (symmetric) or D = alpha * A*A^H + beta * C (hermitian)";
77
+ }
78
+
79
+ /// Destructor
80
+ RankKOperationProfiler::~RankKOperationProfiler() {
81
+
82
+ }
83
+
84
+ /// Prints usage statement for the math function
85
+ void RankKOperationProfiler::print_usage(std::ostream &out) const {
86
+ out << "RankK" << "\n\n";
87
+
88
+ OperationProfiler::print_usage(out);
89
+ }
90
+
91
+ /// Prints examples
92
+ void RankKOperationProfiler::print_examples(std::ostream &out) const {
93
+
94
+ out << "\nExamples:\n\n"
95
+ << "Profile a particular problem size Syrk kernel:\n"
96
+ << " $ cutlass_profiler --operation=rank_k --blas_mode=symmetric --n=1024 --k=128\n\n"
97
+
98
+ << "Profile a particular problem size Herk kernel:\n"
99
+ << " $ cutlass_profiler --operation=rank_k --blas_mode=hermitian --n=1024 --k=128\n\n"
100
+
101
+ << "Schmoo over problem size and beta:\n"
102
+ << " $ cutlass_profiler --operation=rank_k --n=1024:4096:256 --k=128:8192:128 --beta=0,1,2.5\n\n"
103
+
104
+ << "Schmoo over accumulator types:\n"
105
+ << " $ cutlass_profiler --operation=rank_k --accumulator-type=f16,f32\n\n"
106
+
107
+ << "Schmoo over fill modees:\n"
108
+ << " $ cutlass_profiler --operation=rank_k --fill_mode=lower/upper\n\n"
109
+
110
+ << "Run when A is f16 with column-major or A is any datatype with row-major (For column major, use column, col, or n. For row major use, row or t):\n"
111
+ << " $ cutlass_profiler --operation=rank_k --A=f16:column or --A=*:row\n\n"
112
+
113
+ << "Using various input value distribution:\n"
114
+ << " $ cutlass_profiler --operation=rank_k --dist=uniform,min:0,max:3\n"
115
+ << " $ cutlass_profiler --operation=rank_k --dist=gaussian,mean:0,stddev:3\n"
116
+ << " $ cutlass_profiler --operation=rank_k --dist=sequential,start:0,delta:1\n\n"
117
+
118
+ << "Run a kernel with cta tile size of 256x128x32 and save workspace if results are incorrect (note that --cta-tile::k=32 is default cta-tile size):\n"
119
+ << " $ cutlass_profiler --operation=rank_k --cta_m=256 --cta_n=128 --cta_k=32 --save-workspace=incorrect\n\n"
120
+
121
+ << "Test your changes to rank_k kernels with a quick functional test and save results in functional-test.csv:\n"
122
+ << " $ cutlass_profiler --operation=rank_k \\ \n"
123
+ << " --n=8,56,120,136,256,264,512,520,1024,1032,4096,8192,16384 \\ \n"
124
+ << " --k=8,16,32,64,128,256,288,384,504,512,520 \\ \n"
125
+ << " --beta=0,1,2 --profiling-iterations=1 \\ \n"
126
+ << " --providers=cutlass --output=functional-test.csv\n\n";
127
+ }
128
+
129
+ /////////////////////////////////////////////////////////////////////////////////////////////////
130
+
131
+ #if 0
132
+ // used this for debugging
133
+ static std::string byte_string(std::vector<uint8_t> const &bytes) {
134
+ std::stringstream ss;
135
+
136
+ ss << "0x";
137
+
138
+ for (size_t idx = bytes.size(); idx > 0; --idx) {
139
+ ss << std::hex << std::setw(2) << std::setfill('0') << uint32_t(bytes.at(idx - 1));
140
+ }
141
+
142
+ return ss.str();
143
+ }
144
+ #endif
145
+
146
+ Status RankKOperationProfiler::RankKProblem::parse(
147
+ library::RankKDescription const &operation_desc,
148
+ ProblemSpace const &problem_space,
149
+ ProblemSpace::Problem const &problem) {
150
+
151
+ if (!arg_as_int(this->n, "n", problem_space, problem)) {
152
+ // default value
153
+ this->n = 1024;
154
+ }
155
+
156
+ if (!arg_as_int(this->k, "k", problem_space, problem)) {
157
+ // default value
158
+ this->k = 1024;
159
+ }
160
+
161
+ if (!arg_as_int(this->split_k_slices, "split_k_slices", problem_space, problem)) {
162
+ // default value
163
+ this->split_k_slices = 1;
164
+ }
165
+
166
+ if (!arg_as_int(this->batch_count, "batch_count", problem_space, problem)) {
167
+ // default value
168
+ this->batch_count = 1;
169
+ }
170
+
171
+ if (this->split_k_slices > 1 && this->batch_count > 1) {
172
+ // At least one of these must be one
173
+ return Status::kErrorInvalidProblem;
174
+ }
175
+
176
+ if (!tensor_description_satisfies(operation_desc.A, "A", problem_space, problem)) {
177
+ return Status::kErrorInvalidProblem;
178
+ }
179
+
180
+ if (!tensor_description_satisfies(operation_desc.C, "C", problem_space, problem)) {
181
+ return Status::kErrorInvalidProblem;
182
+ }
183
+
184
+ if (!arg_as_scalar(
185
+ this->alpha,
186
+ operation_desc.element_epilogue,
187
+ "alpha",
188
+ problem_space,
189
+ problem)) {
190
+
191
+ if (!cast_from_double(this->alpha, operation_desc.element_epilogue, 1)) {
192
+ return Status::kErrorInternal;
193
+ }
194
+ }
195
+
196
+ if (!arg_as_scalar(
197
+ this->beta,
198
+ operation_desc.element_epilogue,
199
+ "beta",
200
+ problem_space,
201
+ problem)) {
202
+
203
+ if (!cast_from_double(this->beta, operation_desc.element_epilogue, 0)) {
204
+ return Status::kErrorInternal;
205
+ }
206
+ }
207
+
208
+ this->lda = DeviceAllocation::get_packed_layout(
209
+ operation_desc.A.layout, {int(this->n), int(this->k)}).front();
210
+
211
+ this->ldc = DeviceAllocation::get_packed_layout(
212
+ operation_desc.C.layout, {int(this->n), int(this->n)}).front();
213
+
214
+ return Status::kSuccess;
215
+ }
216
+
217
+ /// Total number of bytes loaded
218
+ int64_t RankKOperationProfiler::RankKProblem::bytes(library::RankKDescription const &operation_desc) const {
219
+ // Input bytes read and Output bytes written for the gemm problem
220
+ int64_t bytes =
221
+ int64_t(library::sizeof_bits(operation_desc.A.element) * n / 8) * k +
222
+ int64_t(library::sizeof_bits(operation_desc.A.element) * n / 8) * k +
223
+ // Half matrix including the diagonal will have (N*(N+1))/2 elements
224
+ int64_t(library::sizeof_bits(operation_desc.C.element) * n / 8) * (n+1) / 2;
225
+
226
+ // Set is_beta_zero true if beta is zero
227
+ bool is_beta_zero = std::all_of(beta.begin(), beta.end(), [](uint8_t i) { return i==0; });
228
+
229
+ // Output bytes read for the gemm problem for non-zero beta values
230
+ if (!is_beta_zero) {
231
+ bytes += int64_t(library::sizeof_bits(operation_desc.C.element) * n / 8) * (n+1) / 2;
232
+ }
233
+
234
+ bytes *= batch_count;
235
+
236
+ return bytes;
237
+ }
238
+
239
+ /// Total number of flops computed
240
+ int64_t RankKOperationProfiler::RankKProblem::flops(library::RankKDescription const &operation_desc) const {
241
+
242
+ // FLOPs = 2 * n(n+1)k/2 [mma] + 2 * n(n+1)/2 [epilogue]
243
+ // FLOPs = n(n+1)(k + 1)
244
+ int64_t flops_ = n * (n + 1) * (k + 1);
245
+
246
+ // complex-valued support
247
+ switch (operation_desc.tile_description.math_instruction.math_operation) {
248
+ case library::MathOperationID::kMultiplyAddComplex:
249
+ flops_ *= 4;
250
+ break;
251
+
252
+ case library::MathOperationID::kMultiplyAddComplexFastF32:
253
+ flops_ *= 4;
254
+ break;
255
+
256
+ case library::MathOperationID::kMultiplyAddGaussianComplex:
257
+ flops_ *= 3;
258
+ break;
259
+
260
+ default: break;
261
+ }
262
+
263
+ return flops_;
264
+ }
265
+
266
+ /// Initializes a performance result
267
+ void RankKOperationProfiler::RankKProblem::initialize_result(
268
+ PerformanceResult &result,
269
+ library::RankKDescription const &operation_desc,
270
+ ProblemSpace const &problem_space) {
271
+
272
+ result.arguments.resize(problem_space.rank());
273
+
274
+ set_argument(result, "rank_k_kind", problem_space, library::to_string(operation_desc.rank_k_kind));
275
+
276
+ set_argument(result, "A", problem_space,
277
+ std::string(library::to_string(operation_desc.A.element)) + ":" + library::to_string(operation_desc.A.layout));
278
+
279
+ set_argument(result, "C", problem_space,
280
+ std::string(library::to_string(operation_desc.C.element)) + ":" + library::to_string(operation_desc.C.layout));
281
+
282
+ set_argument(result, "fill_mode", problem_space, library::to_string(operation_desc.fill_mode));
283
+
284
+ set_argument(result, "blas_mode", problem_space, library::to_string(operation_desc.blas_mode));
285
+
286
+ set_argument(result, "n", problem_space, n);
287
+ set_argument(result, "k", problem_space, k);
288
+
289
+ set_argument(result, "split_k_slices", problem_space, split_k_slices);
290
+ set_argument(result, "batch_count", problem_space, batch_count);
291
+
292
+ set_argument(result, "alpha", problem_space,
293
+ library::lexical_cast(alpha, operation_desc.element_epilogue));
294
+
295
+ set_argument(result, "beta", problem_space,
296
+ library::lexical_cast(beta, operation_desc.element_epilogue));
297
+ }
298
+
299
+ /////////////////////////////////////////////////////////////////////////////////////////////////
300
+
301
+ /// Extracts the problem dimensions
302
+ Status RankKOperationProfiler::initialize_configuration(
303
+ Options const &options,
304
+ PerformanceReport &report,
305
+ DeviceContext &device_context,
306
+ library::Operation const *operation,
307
+ ProblemSpace const &problem_space,
308
+ ProblemSpace::Problem const &problem) {
309
+
310
+ library::RankKDescription const &operation_desc =
311
+ static_cast<library::RankKDescription const &>(operation->description());
312
+
313
+ if (operation_desc.rank_k_kind != library::RankKKind::kUniversal) {
314
+ return Status::kErrorInvalidProblem;
315
+ }
316
+
317
+ Status status = problem_.parse(operation_desc, problem_space, problem);
318
+
319
+ if (status != Status::kSuccess) {
320
+ return status;
321
+ }
322
+
323
+ rank_k_workspace_.configuration.problem_size.m() = int(problem_.n);
324
+ rank_k_workspace_.configuration.problem_size.n() = int(problem_.n);
325
+ rank_k_workspace_.configuration.problem_size.k() = int(problem_.k);
326
+ rank_k_workspace_.configuration.lda = problem_.lda;
327
+ rank_k_workspace_.configuration.ldc = problem_.ldc;
328
+ rank_k_workspace_.configuration.ldd = problem_.ldc;
329
+ //rank_k_workspace_.configuration.split_k_slices = int(problem_.split_k_slices);
330
+ rank_k_workspace_.configuration.batch_count = int(problem_.split_k_slices);
331
+
332
+ rank_k_workspace_.arguments.A = nullptr;
333
+ rank_k_workspace_.arguments.C = nullptr;
334
+ rank_k_workspace_.arguments.D = nullptr;
335
+ rank_k_workspace_.arguments.alpha = problem_.alpha.data();
336
+ rank_k_workspace_.arguments.beta = problem_.beta.data();
337
+ rank_k_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
338
+
339
+ initialize_result_(this->model_result_, options, operation_desc, problem_space);
340
+
341
+ return operation->can_implement(&rank_k_workspace_.configuration, &rank_k_workspace_.arguments);
342
+ }
343
+
344
+ /// Initializes the performance result
345
+ void RankKOperationProfiler::initialize_result_(
346
+ PerformanceResult &result,
347
+ Options const &options,
348
+ library::RankKDescription const &operation_desc,
349
+ ProblemSpace const &problem_space) {
350
+
351
+ result.provider = library::Provider::kCUTLASS;
352
+ result.disposition = Disposition::kNotRun;
353
+ result.status = Status::kSuccess;
354
+ result.operation_name = operation_desc.name;
355
+
356
+ problem_.initialize_result(result, operation_desc, problem_space);
357
+
358
+ OperationProfiler::initialize_result_(result, operation_desc, problem_space);
359
+
360
+
361
+ result.bytes = problem_.bytes(operation_desc);
362
+ result.flops = problem_.flops(operation_desc);
363
+
364
+ result.runtime = 0;
365
+
366
+ // complex-valued support
367
+ switch (operation_desc.tile_description.math_instruction.math_operation) {
368
+ case library::MathOperationID::kMultiplyAddComplex:
369
+ result.flops *= 4;
370
+ break;
371
+
372
+ case library::MathOperationID::kMultiplyAddComplexFastF32:
373
+ result.flops *= 4;
374
+ break;
375
+
376
+ default: break;
377
+ }
378
+
379
+ }
380
+
381
+ /// Initializes workspace
382
+ Status RankKOperationProfiler::initialize_workspace(
383
+ Options const &options,
384
+ PerformanceReport &report,
385
+ DeviceContext &device_context,
386
+ library::Operation const *operation,
387
+ ProblemSpace const &problem_space,
388
+ ProblemSpace::Problem const &problem) {
389
+
390
+ if (options.device.devices.size() != 1) {
391
+ throw std::runtime_error("This operation profiler only supports a single "
392
+ "device.");
393
+ }
394
+
395
+ cudaError_t result;
396
+ result = cudaSetDevice(options.device.device_id(0));
397
+ if (result != cudaSuccess) {
398
+ throw std::runtime_error("cudaSetDevice() failed.");
399
+ }
400
+
401
+ library::RankKDescription const &operation_desc =
402
+ static_cast<library::RankKDescription const &>(operation->description());
403
+
404
+ if (options.execution_mode != ExecutionMode::kDryRun) {
405
+ int seed_shift = 0;
406
+ rank_k_workspace_.A = device_context.allocate_and_initialize_tensor(
407
+ options,
408
+ "A",
409
+ operation_desc.A.element,
410
+ operation_desc.A.layout,
411
+ {int(problem_.n), int(problem_.k)},
412
+ {int(problem_.lda)},
413
+ 1, // batch_count
414
+ seed_shift++,
415
+ 0 // device_index
416
+ );
417
+
418
+ rank_k_workspace_.C = device_context.allocate_and_initialize_tensor(
419
+ options,
420
+ "C",
421
+ operation_desc.C.element,
422
+ operation_desc.C.layout,
423
+ {int(problem_.n), int(problem_.n)},
424
+ {int(problem_.ldc)},
425
+ 1, // batch_count
426
+ seed_shift++,
427
+ 0 // device_index
428
+ );
429
+
430
+ rank_k_workspace_.Computed = device_context.allocate_tensor(
431
+ options,
432
+ "D",
433
+ operation_desc.C.element,
434
+ operation_desc.C.layout,
435
+ {int(problem_.n), int(problem_.n)},
436
+ {int(problem_.ldc)},
437
+ 1, //batch_count
438
+ 0 // device_index
439
+ );
440
+
441
+ rank_k_workspace_.Reference = device_context.allocate_tensor(
442
+ options,
443
+ "Reference",
444
+ operation_desc.C.element,
445
+ operation_desc.C.layout,
446
+ {int(problem_.n), int(problem_.n)},
447
+ {int(problem_.ldc)},
448
+ 1, //batch_count
449
+ 0 // device_index
450
+ );
451
+
452
+ rank_k_workspace_.Computed->copy_from_device(rank_k_workspace_.C->data());
453
+ rank_k_workspace_.Reference->copy_from_device(rank_k_workspace_.C->data());
454
+ }
455
+
456
+
457
+ //
458
+ // Initialize the CUTLASS operation
459
+ //
460
+ Status status = Status::kSuccess;
461
+
462
+ if (options.profiling.provider_enabled(library::Provider::kCUTLASS)) {
463
+
464
+ if (options.execution_mode != ExecutionMode::kDryRun) {
465
+
466
+ uint64_t workspace_size = operation->get_host_workspace_size(&rank_k_workspace_.configuration);
467
+ rank_k_workspace_.host_workspace.resize(workspace_size, 0);
468
+
469
+ workspace_size = operation->get_device_workspace_size(&rank_k_workspace_.configuration);
470
+ rank_k_workspace_.device_workspace.reset(library::NumericTypeID::kU8, workspace_size);
471
+
472
+ status = operation->initialize(
473
+ &rank_k_workspace_.configuration,
474
+ rank_k_workspace_.host_workspace.data(),
475
+ rank_k_workspace_.device_workspace.data());
476
+ }
477
+
478
+ //
479
+ // If CUTLASS is enabled, generate a result for it
480
+ //
481
+ results_.push_back(model_result_);
482
+ results_.back().provider = library::Provider::kCUTLASS;
483
+ results_.back().op_kind = library::OperationKind::kRankK;
484
+ results_.back().disposition = Disposition::kNotRun;
485
+
486
+ for(auto provider : verification_providers_) {
487
+ results_.back().verification_map[provider] = Disposition::kNotRun;
488
+ }
489
+ }
490
+
491
+ return status;
492
+ }
493
+
494
+ /////////////////////////////////////////////////////////////////////////////////////////////////
495
+
496
+ /// Verifies CUTLASS against references
497
+ bool RankKOperationProfiler::verify_cutlass(
498
+ Options const &options,
499
+ PerformanceReport &report,
500
+ DeviceContext &device_context,
501
+ library::Operation const *operation,
502
+ ProblemSpace const &problem_space,
503
+ ProblemSpace::Problem const &problem) {
504
+
505
+ if (!options.profiling.provider_enabled(library::Provider::kCUTLASS)) {
506
+ return true;
507
+ }
508
+
509
+ if (options.execution_mode == ExecutionMode::kDryRun) {
510
+ return true;
511
+ }
512
+
513
+ // Initialize structure containing RankK arguments
514
+ rank_k_workspace_.arguments.A = rank_k_workspace_.A->data();
515
+ rank_k_workspace_.arguments.C = rank_k_workspace_.C->data();
516
+ rank_k_workspace_.arguments.D = rank_k_workspace_.Computed->data();
517
+ rank_k_workspace_.arguments.alpha = problem_.alpha.data();
518
+ rank_k_workspace_.arguments.beta = problem_.beta.data();
519
+ rank_k_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
520
+
521
+ //
522
+ // Run the CUTLASS operation
523
+ //
524
+
525
+ results_.back().status = operation->run(
526
+ &rank_k_workspace_.arguments,
527
+ rank_k_workspace_.host_workspace.data(),
528
+ rank_k_workspace_.device_workspace.data());
529
+
530
+ if (results_.back().status != Status::kSuccess) {
531
+ results_.back().disposition = Disposition::kFailed;
532
+ return false;
533
+ }
534
+
535
+ cudaError_t result = cudaDeviceSynchronize();
536
+ if (result != cudaSuccess) {
537
+ results_.back().disposition = Disposition::kFailed;
538
+ return false;
539
+ }
540
+
541
+ // CUTLASS op ran the but not yet verified against any verification provider
542
+ results_.back().disposition = Disposition::kNotVerified;
543
+
544
+ //
545
+ // Run verification providers
546
+ //
547
+
548
+ if (options.verification.enabled) {
549
+
550
+ #if CUTLASS_ENABLE_CUBLAS
551
+ if (options.verification.provider_enabled(library::Provider::kCUBLAS)) {
552
+
553
+ // Guard against unsupported cases
554
+ auto const & rank_k_desc = static_cast<library::RankKDescription const &>(operation->description());
555
+
556
+ if (cublas_satisfies(rank_k_desc) == Status::kSuccess) {
557
+
558
+ // call cublas verification if supported
559
+ verify_with_cublas_(
560
+ options,
561
+ report,
562
+ device_context,
563
+ operation,
564
+ problem_space,
565
+ problem);
566
+ }
567
+
568
+ else {
569
+ // set verification map for cublas to not supported
570
+ results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kNotSupported;
571
+ }
572
+ }
573
+ #endif // #if CUTLASS_ENABLE_CUBLAS
574
+
575
+ // Update disposition to worst case verification outcome among all
576
+ // verification providers which are supported
577
+ bool is_any_verification_run_passed = false;
578
+ for(auto &m : results_.back().verification_map) {
579
+ if(m.second == Disposition::kFailed || m.second == Disposition::kIncorrect) {
580
+ results_.back().disposition = m.second;
581
+ return true;
582
+ }
583
+ if(!is_any_verification_run_passed && m.second == Disposition::kPassed) {
584
+ is_any_verification_run_passed = true;
585
+ }
586
+ }
587
+
588
+ if(is_any_verification_run_passed) {
589
+ results_.back().disposition = Disposition::kPassed;
590
+ }
591
+ }
592
+
593
+ // Return true means continue profiling
594
+ return true;
595
+ }
596
+
597
+ ///////////////////////////////////////////////////////////////////////////////////////////////////
598
+
599
+ /// Verifies CUTLASS against references
600
+ bool RankKOperationProfiler::verify_with_cublas_(
601
+ Options const &options,
602
+ PerformanceReport &report,
603
+ DeviceContext &device_context,
604
+ library::Operation const *operation,
605
+ ProblemSpace const &problem_space,
606
+ ProblemSpace::Problem const &problem) {
607
+
608
+
609
+ #if CUTLASS_ENABLE_CUBLAS
610
+
611
+ library::RankKDescription const &rank_k_desc =
612
+ static_cast<library::RankKDescription const &>(operation->description());
613
+
614
+ //
615
+ // Construct cuBLAS operators
616
+ //
617
+
618
+ CublasCreate handle;
619
+ cublasStatus_t status = handle.get_cublas_create_status();
620
+
621
+ if (status != CUBLAS_STATUS_SUCCESS) {
622
+
623
+ results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kFailed;
624
+ return true;
625
+ }
626
+
627
+ //
628
+ // Initialize state
629
+ //
630
+
631
+ try {
632
+
633
+ //
634
+ // Construct dispatcher to cublas<t>Syrk()
635
+ //
636
+
637
+ // Initialize structure containing RankK arguments
638
+ rank_k_workspace_.arguments.A = rank_k_workspace_.A->data();
639
+ rank_k_workspace_.arguments.C = rank_k_workspace_.Reference->data();
640
+ rank_k_workspace_.arguments.D = rank_k_workspace_.Reference->data();
641
+ rank_k_workspace_.arguments.alpha = problem_.alpha.data();
642
+ rank_k_workspace_.arguments.beta = problem_.beta.data();
643
+ rank_k_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
644
+
645
+ detail::cublasRankKDispatcher rank_k_op(
646
+ rank_k_desc,
647
+ rank_k_workspace_.configuration,
648
+ rank_k_workspace_.arguments
649
+ );
650
+
651
+ if (rank_k_op.status != Status::kSuccess) {
652
+ results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kNotRun;
653
+ return true;
654
+ }
655
+
656
+ results_.back().status = Status::kSuccess;
657
+
658
+ status = rank_k_op(handle);
659
+
660
+ // Handle errors
661
+ if (status != CUBLAS_STATUS_SUCCESS) {
662
+
663
+ results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kFailed;
664
+ return true;
665
+ }
666
+
667
+ //
668
+ // Verify results
669
+ //
670
+
671
+ results_.back().verification_map[library::Provider::kCUBLAS] = compare_tensors(
672
+ options,
673
+ *rank_k_workspace_.Computed,
674
+ *rank_k_workspace_.Reference
675
+ );
676
+
677
+ // Save workspace if incorrect
678
+ if (options.verification.save_workspace == SaveWorkspace::kIncorrect &&
679
+ results_.back().verification_map[library::Provider::kCUBLAS] == Disposition::kIncorrect) {
680
+
681
+ save_workspace(
682
+ device_context,
683
+ options,
684
+ rank_k_desc,
685
+ library::Provider::kCUTLASS,
686
+ library::Provider::kCUBLAS);
687
+ }
688
+ }
689
+ catch (...) {
690
+ results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kFailed;
691
+ }
692
+
693
+ #endif
694
+
695
+ // Return true means continue profiling
696
+ return true;
697
+ }
698
+
699
+ /////////////////////////////////////////////////////////////////////////////////////////////////
700
+
701
+ /// Measures performance results
702
+ bool RankKOperationProfiler::profile(
703
+ Options const &options,
704
+ PerformanceReport &report,
705
+ DeviceContext &device_context,
706
+ library::Operation const *operation,
707
+ ProblemSpace const &problem_space,
708
+ ProblemSpace::Problem const &problem) {
709
+
710
+ if (options.profiling.provider_enabled(library::Provider::kCUTLASS)) {
711
+
712
+ // Initialize structure containing RankK arguments
713
+ rank_k_workspace_.arguments.A = rank_k_workspace_.A->data();
714
+ rank_k_workspace_.arguments.C = rank_k_workspace_.C->data();
715
+ rank_k_workspace_.arguments.D = rank_k_workspace_.Computed->data();
716
+ rank_k_workspace_.arguments.alpha = problem_.alpha.data();
717
+ rank_k_workspace_.arguments.beta = problem_.beta.data();
718
+ rank_k_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
719
+
720
+ results_.back().status = profile_cutlass_(
721
+ results_.back().runtime,
722
+ options,
723
+ operation,
724
+ &rank_k_workspace_.arguments,
725
+ rank_k_workspace_.host_workspace.data(),
726
+ rank_k_workspace_.device_workspace.data()
727
+ );
728
+ }
729
+ return true;
730
+ }
731
+
732
+ /////////////////////////////////////////////////////////////////////////////////////////////////
733
+
734
+ } // namespace profiler
735
+ } // namespace cutlass
736
+
737
+ /////////////////////////////////////////////////////////////////////////////////////////////////
sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/cutlass/tools/profiler/src/sparse_gemm_operation_profiler.cu ADDED
@@ -0,0 +1,598 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ * SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice, this
9
+ * list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holder nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
+ *
30
+ **************************************************************************************************/
31
+ /* \file
32
+ \brief Execution environment
33
+
34
+ */
35
+
36
+ #include <iostream>
37
+ #include <stdexcept>
38
+ #include <iomanip>
39
+ #include <ios>
40
+
41
+ #include "cutlass/profiler/cublas_helpers.h"
42
+ #include "cutlass/profiler/sparse_gemm_operation_profiler.h"
43
+ #include "cutlass/profiler/gpu_timer.h"
44
+
45
+ /////////////////////////////////////////////////////////////////////////////////////////////////
46
+
47
+ namespace cutlass {
48
+ namespace profiler {
49
+
50
+
51
+ /////////////////////////////////////////////////////////////////////////////////////////////////
52
+
53
+ /// Ctor
54
+ SparseGemmOperationProfiler::SparseGemmOperationProfiler(Options const &options):
55
+ OperationProfiler(
56
+ options,
57
+ library::OperationKind::kSparseGemm,
58
+ {
59
+ {ArgumentTypeID::kEnumerated, {"gemm_kind"}, "Variant of GEMM (e.g. sparse, ...)"},
60
+ {ArgumentTypeID::kInteger, {"m", "problem-size::m"}, "M dimension of the GEMM problem space"},
61
+ {ArgumentTypeID::kInteger, {"n", "problem-size::n"}, "N dimension of the GEMM problem space"},
62
+ {ArgumentTypeID::kInteger, {"k", "problem-size::k"}, "K dimension of the GEMM problem space"},
63
+ {ArgumentTypeID::kTensor, {"A"}, "Tensor storing the A operand"},
64
+ {ArgumentTypeID::kTensor, {"B"}, "Tensor storing the B operand"},
65
+ {ArgumentTypeID::kTensor, {"C"}, "Tensor storing the C operand"},
66
+ {ArgumentTypeID::kTensor, {"E"}, "Tensor storing the E operand"},
67
+ {ArgumentTypeID::kScalar, {"alpha", "epilogue::alpha"}, "Epilogue scalar alpha"},
68
+ {ArgumentTypeID::kScalar, {"beta", "epilogue::beta"}, "Epilogue scalar beta"},
69
+ {ArgumentTypeID::kInteger, {"split_k_slices"}, "Number of partitions of K dimension"},
70
+ {ArgumentTypeID::kInteger, {"batch_count"}, "Number of GEMMs computed in one batch"},
71
+ }
72
+ ) {
73
+
74
+ description_ = " Structured sparse GEMM. D = alpha * A*B + beta * C";
75
+ }
76
+
77
+ /// Destructor
78
+ SparseGemmOperationProfiler::~SparseGemmOperationProfiler() {
79
+
80
+ }
81
+
82
+ /// Prints usage statement for the math function
83
+ void SparseGemmOperationProfiler::print_usage(std::ostream &out) const {
84
+ out << "Sparse GEMM" << "\n\n";
85
+
86
+ OperationProfiler::print_usage(out);
87
+ }
88
+
89
+ /// Prints examples
90
+ void SparseGemmOperationProfiler::print_examples(std::ostream &out) const {
91
+
92
+ out << "\nExamples:\n\n"
93
+ << "Profile a particular problem size:\n"
94
+ << " $ cutlass_profiler --operation=SparseGemm --m=1024 --n=1024 --k=128\n\n"
95
+
96
+ << "Schmoo over problem size and beta:\n"
97
+ << " $ cutlass_profiler --operation=SparseGemm --m=1024:4096:256 --n=1024:4096:256 --k=128:8192:128 --beta=0,1,2.5\n\n"
98
+
99
+ << "Schmoo over accumulator types:\n"
100
+ << " $ cutlass_profiler --operation=SparseGemm --accumulator-type=f16,f32\n\n"
101
+
102
+ << "Run when A is f16 with column-major and B is any datatype with row-major (For column major, use column, col, or n. For row major use, row or t):\n"
103
+ << " $ cutlass_profiler --operation=SparseGemm --A=f16:column --B=*:row\n\n"
104
+
105
+ << "Using various input value distribution:\n"
106
+ << " $ cutlass_profiler --operation=SparseGemm --dist=uniform,min:0,max:3\n"
107
+ << " $ cutlass_profiler --operation=SparseGemm --dist=gaussian,mean:0,stddev:3\n"
108
+ << " $ cutlass_profiler --operation=SparseGemm --dist=sequential,start:0,delta:1\n\n"
109
+
110
+ << "Run a kernel with cta tile size of 256x128x32 and save workspace if results are incorrect (note that --cta-tile::k=32 is default cta-tile size):\n"
111
+ << " $ cutlass_profiler --operation=SparseGemm --cta_m=256 --cta_n=128 --cta_k=32 --save-workspace=incorrect\n\n"
112
+
113
+ << "Test your changes to gemm kernels with a quick functional test and save results in functional-test.csv:\n"
114
+ << " $ cutlass_profiler --operation=SparseGemm \\ \n"
115
+ << " --m=8,56,120,136,256,264,512,520,1024,1032,4096,8192,16384 \\ \n"
116
+ << " --n=8,56,120,136,256,264,512,520,1024,1032,4096,8192,16384 \\ \n"
117
+ << " --k=8,16,32,64,128,256,288,384,504,512,520 \\ \n"
118
+ << " --beta=0,1,2 --profiling-iterations=1 \\ \n"
119
+ << " --providers=cutlass --output=functional-test.csv\n\n";
120
+ }
121
+
122
+ /////////////////////////////////////////////////////////////////////////////////////////////////
123
+
124
+ Status SparseGemmOperationProfiler::SparseGemmProblem::parse(
125
+ library::SparseGemmDescription const &operation_desc,
126
+ ProblemSpace const &problem_space,
127
+ ProblemSpace::Problem const &problem) {
128
+
129
+ if (!arg_as_int(this->m, "m", problem_space, problem)) {
130
+ // default value
131
+ this->m = 1024;
132
+ }
133
+
134
+ if (!arg_as_int(this->n, "n", problem_space, problem)) {
135
+ // default value
136
+ this->n = 1024;
137
+ }
138
+
139
+ if (!arg_as_int(this->k, "k", problem_space, problem)) {
140
+ // default value
141
+ this->k = 1024;
142
+ }
143
+
144
+ if (!arg_as_int(this->split_k_slices, "split_k_slices", problem_space, problem)) {
145
+ // default value
146
+ this->split_k_slices = 1;
147
+ }
148
+
149
+ if (!arg_as_int(this->batch_count, "batch_count", problem_space, problem)) {
150
+ // default value
151
+ this->batch_count = 1;
152
+ }
153
+
154
+ if (!tensor_description_satisfies(operation_desc.A, "A", problem_space, problem)) {
155
+ return Status::kErrorInvalidProblem;
156
+ }
157
+
158
+ if (!tensor_description_satisfies(operation_desc.B, "B", problem_space, problem)) {
159
+ return Status::kErrorInvalidProblem;
160
+ }
161
+
162
+ if (!tensor_description_satisfies(operation_desc.C, "C", problem_space, problem)) {
163
+ return Status::kErrorInvalidProblem;
164
+ }
165
+
166
+ if (!tensor_description_satisfies(operation_desc.E, "E", problem_space, problem)) {
167
+ return Status::kErrorInvalidProblem;
168
+ }
169
+
170
+ if (!arg_as_scalar(
171
+ this->alpha,
172
+ operation_desc.element_epilogue,
173
+ "alpha",
174
+ problem_space,
175
+ problem)) {
176
+
177
+ if (!cast_from_double(this->alpha, operation_desc.element_epilogue, 1)) {
178
+ return Status::kErrorInternal;
179
+ }
180
+ }
181
+
182
+ if (!arg_as_scalar(
183
+ this->beta,
184
+ operation_desc.element_epilogue,
185
+ "beta",
186
+ problem_space,
187
+ problem)) {
188
+
189
+ if (!cast_from_double(this->beta, operation_desc.element_epilogue, 0)) {
190
+ return Status::kErrorInternal;
191
+ }
192
+ }
193
+
194
+ this->elements_per_128b =
195
+ 128 / library::sizeof_bits(operation_desc.A.element);
196
+
197
+ this->lda = DeviceAllocation::get_packed_layout(
198
+ operation_desc.A.layout,
199
+ {int(this->m), int(this->k) / int(this->sparse)})
200
+ .front();
201
+
202
+ this->ldb = DeviceAllocation::get_packed_layout(
203
+ operation_desc.B.layout, {int(this->k), int(this->n)}).front();
204
+
205
+ this->ldc = DeviceAllocation::get_packed_layout(
206
+ operation_desc.C.layout, {int(this->m), int(this->n)}).front();
207
+
208
+ this->lde =
209
+ DeviceAllocation::get_packed_layout(
210
+ operation_desc.E.layout,
211
+ {int(this->m), int(this->k / this->sparse / this->elements_per_128b)})
212
+ .front();
213
+
214
+ return Status::kSuccess;
215
+ }
216
+
217
+ /// Initializes a performance result
218
+ void SparseGemmOperationProfiler::SparseGemmProblem::initialize_result(
219
+ PerformanceResult &result,
220
+ library::SparseGemmDescription const &operation_desc,
221
+ ProblemSpace const &problem_space) {
222
+
223
+ result.arguments.resize(problem_space.rank());
224
+
225
+ set_argument(result, "gemm_kind", problem_space, library::to_string(operation_desc.gemm_kind));
226
+
227
+ set_argument(result, "A", problem_space,
228
+ std::string(library::to_string(operation_desc.A.element)) + ":" + library::to_string(operation_desc.A.layout));
229
+
230
+ set_argument(result, "B", problem_space,
231
+ std::string(library::to_string(operation_desc.B.element)) + ":" + library::to_string(operation_desc.B.layout));
232
+
233
+ set_argument(result, "C", problem_space,
234
+ std::string(library::to_string(operation_desc.C.element)) + ":" + library::to_string(operation_desc.C.layout));
235
+
236
+ set_argument(result, "E", problem_space,
237
+ std::string(library::to_string(operation_desc.E.element)) + ":" + library::to_string(operation_desc.E.layout));
238
+
239
+ set_argument(result, "m", problem_space, m);
240
+ set_argument(result, "n", problem_space, n);
241
+ set_argument(result, "k", problem_space, k);
242
+
243
+ set_argument(result, "split_k_slices", problem_space, split_k_slices);
244
+ set_argument(result, "batch_count", problem_space, batch_count);
245
+
246
+ set_argument(result, "alpha", problem_space,
247
+ library::lexical_cast(alpha, operation_desc.element_epilogue));
248
+
249
+ set_argument(result, "beta", problem_space,
250
+ library::lexical_cast(beta, operation_desc.element_epilogue));
251
+ }
252
+
253
+ /// Extracts the problem dimensions
254
+ Status SparseGemmOperationProfiler::initialize_configuration(
255
+ Options const &options,
256
+ PerformanceReport &report,
257
+ DeviceContext &device_context,
258
+ library::Operation const *operation,
259
+ ProblemSpace const &problem_space,
260
+ ProblemSpace::Problem const &problem) {
261
+
262
+ library::SparseGemmDescription const &operation_desc =
263
+ static_cast<library::SparseGemmDescription const &>(operation->description());
264
+
265
+ if (operation_desc.gemm_kind != library::GemmKind::kSparse) {
266
+ return Status::kErrorInvalidProblem;
267
+ }
268
+
269
+ Status status = problem_.parse(operation_desc, problem_space, problem);
270
+
271
+ if (status != Status::kSuccess) {
272
+ return status;
273
+ }
274
+
275
+ gemm_workspace_.configuration.problem_size.m() = int(problem_.m);
276
+ gemm_workspace_.configuration.problem_size.n() = int(problem_.n);
277
+ gemm_workspace_.configuration.problem_size.k() = int(problem_.k);
278
+ gemm_workspace_.configuration.lda = problem_.lda;
279
+ gemm_workspace_.configuration.ldb = problem_.ldb;
280
+ gemm_workspace_.configuration.ldc = problem_.ldc;
281
+ gemm_workspace_.configuration.ldd = problem_.ldc;
282
+ gemm_workspace_.configuration.lde = problem_.lde;
283
+
284
+ gemm_workspace_.arguments.A = nullptr;
285
+ gemm_workspace_.arguments.B = nullptr;
286
+ gemm_workspace_.arguments.C = nullptr;
287
+ gemm_workspace_.arguments.D = nullptr;
288
+ gemm_workspace_.arguments.E = nullptr;
289
+ gemm_workspace_.arguments.alpha = problem_.alpha.data();
290
+ gemm_workspace_.arguments.beta = problem_.beta.data();
291
+ gemm_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
292
+
293
+ initialize_result_(this->model_result_, options, operation_desc, problem_space);
294
+
295
+ return operation->can_implement(&gemm_workspace_.configuration, &gemm_workspace_.arguments);
296
+ }
297
+
298
+ /// Initializes the performance result
299
+ void SparseGemmOperationProfiler::initialize_result_(
300
+ PerformanceResult &result,
301
+ Options const &options,
302
+ library::SparseGemmDescription const &operation_desc,
303
+ ProblemSpace const &problem_space) {
304
+
305
+ result.provider = library::Provider::kCUTLASS;
306
+ result.disposition = Disposition::kNotRun;
307
+ result.status = Status::kSuccess;
308
+ result.operation_name = operation_desc.name;
309
+
310
+ problem_.initialize_result(result, operation_desc, problem_space);
311
+
312
+ OperationProfiler::initialize_result_(result, operation_desc, problem_space);
313
+
314
+ // Input bytes read and Output bytes written for the gemm problem
315
+ result.bytes =
316
+ int64_t(library::sizeof_bits(operation_desc.A.element) * problem_.m / 8) *
317
+ problem_.k / problem_.sparse +
318
+ int64_t(library::sizeof_bits(operation_desc.B.element) * problem_.n / 8) *
319
+ problem_.k +
320
+ int64_t(library::sizeof_bits(operation_desc.C.element) * problem_.m / 8) *
321
+ problem_.n +
322
+ int64_t(library::sizeof_bits(operation_desc.E.element) * problem_.m / 8) *
323
+ problem_.k / problem_.sparse / problem_.elements_per_128b;
324
+
325
+ // Set is_beta_zero true if beta is zero
326
+ bool is_beta_zero = std::all_of(problem_.beta.begin(), problem_.beta.end(), [](uint8_t i) { return i==0; });
327
+
328
+ // Output bytes read for the gemm problem for non-zero beta values
329
+ if (!is_beta_zero) {
330
+ result.bytes += int64_t(library::sizeof_bits(operation_desc.C.element) * problem_.m / 8) * problem_.n;
331
+ }
332
+
333
+ result.flops = 2 * (problem_.m * problem_.n * problem_.k + problem_.m * problem_.n);
334
+ result.runtime = 0;
335
+
336
+ }
337
+
338
+ /// Initializes workspace
339
+ Status SparseGemmOperationProfiler::initialize_workspace(
340
+ Options const &options,
341
+ PerformanceReport &report,
342
+ DeviceContext &device_context,
343
+ library::Operation const *operation,
344
+ ProblemSpace const &problem_space,
345
+ ProblemSpace::Problem const &problem) {
346
+
347
+ if (options.device.devices.size() != 1) {
348
+ throw std::runtime_error("This operation profiler only supports a single "
349
+ "device.");
350
+ }
351
+
352
+ cudaError_t result;
353
+ result = cudaSetDevice(options.device.device_id(0));
354
+ if (result != cudaSuccess) {
355
+ throw std::runtime_error("cudaSetDevice() failed.");
356
+ }
357
+
358
+ library::SparseGemmDescription const &operation_desc =
359
+ static_cast<library::SparseGemmDescription const &>(operation->description());
360
+
361
+ if (options.execution_mode != ExecutionMode::kDryRun) {
362
+ int seed_shift = 0;
363
+ gemm_workspace_.A = device_context.allocate_and_initialize_tensor(
364
+ options,
365
+ "A",
366
+ operation_desc.A.element,
367
+ operation_desc.A.layout,
368
+ {int(problem_.m), int(problem_.k) / int(problem_.sparse)},
369
+ {int(problem_.lda)},
370
+ 1, // batch_count
371
+ seed_shift++,
372
+ 0 // device_index
373
+ );
374
+
375
+ gemm_workspace_.B = device_context.allocate_and_initialize_tensor(
376
+ options,
377
+ "B",
378
+ operation_desc.B.element,
379
+ operation_desc.B.layout,
380
+ {int(problem_.k), int(problem_.n)},
381
+ {int(problem_.ldb)},
382
+ 1, // batch_count
383
+ seed_shift++,
384
+ 0 // device_index
385
+ );
386
+
387
+ gemm_workspace_.C = device_context.allocate_and_initialize_tensor(
388
+ options,
389
+ "C",
390
+ operation_desc.C.element,
391
+ operation_desc.C.layout,
392
+ {int(problem_.m), int(problem_.n)},
393
+ {int(problem_.ldc)},
394
+ 1, // batch_count
395
+ seed_shift++,
396
+ 0 // device_index
397
+ );
398
+
399
+ gemm_workspace_.Computed = device_context.allocate_tensor(
400
+ options,
401
+ "D",
402
+ operation_desc.C.element,
403
+ operation_desc.C.layout,
404
+ {int(problem_.m), int(problem_.n)},
405
+ {int(problem_.ldc)},
406
+ 1, // batch_count
407
+ 0 // device_index
408
+ );
409
+
410
+ gemm_workspace_.E = device_context.allocate_and_initialize_sparsemeta_tensor(
411
+ options,
412
+ "E",
413
+ operation_desc.E.element,
414
+ operation_desc.E.layout,
415
+ operation_desc.A.element,
416
+ {int(problem_.m), int(problem_.k) / int(problem_.sparse) / int(problem_.elements_per_128b)},
417
+ {int(problem_.lde)},
418
+ 1, // batch_count
419
+ seed_shift++,
420
+ 0 // device_index
421
+ );
422
+
423
+ gemm_workspace_.Reference = device_context.allocate_tensor(
424
+ options,
425
+ "Reference",
426
+ operation_desc.C.element,
427
+ operation_desc.C.layout,
428
+ {int(problem_.m), int(problem_.n)},
429
+ {int(problem_.ldc)},
430
+ 1, // batch_count
431
+ 0 // device_index
432
+ );
433
+
434
+ gemm_workspace_.Reference->copy_from_device(gemm_workspace_.C->data());
435
+ }
436
+
437
+ //
438
+ // Initialize the CUTLASS operation
439
+ //
440
+
441
+ Status status = Status::kSuccess;
442
+
443
+ if (options.profiling.provider_enabled(library::Provider::kCUTLASS)) {
444
+
445
+ if (options.execution_mode != ExecutionMode::kDryRun) {
446
+
447
+ uint64_t workspace_size = operation->get_host_workspace_size(&gemm_workspace_.configuration);
448
+ gemm_workspace_.host_workspace.resize(workspace_size, 0);
449
+
450
+ workspace_size = operation->get_device_workspace_size(&gemm_workspace_.configuration);
451
+ gemm_workspace_.device_workspace.reset(library::NumericTypeID::kU8, workspace_size);
452
+
453
+ status = operation->initialize(
454
+ &gemm_workspace_.configuration,
455
+ gemm_workspace_.host_workspace.data(),
456
+ gemm_workspace_.device_workspace.data());
457
+ }
458
+
459
+ //
460
+ // If CUTLASS is enabled, generate a result for it
461
+ //
462
+
463
+ results_.push_back(model_result_);
464
+ results_.back().provider = library::Provider::kCUTLASS;
465
+ results_.back().op_kind = library::OperationKind::kSparseGemm;
466
+ results_.back().disposition = Disposition::kNotRun;
467
+
468
+ for(auto &verification_provider : options.verification.providers) {
469
+ results_.back().verification_map[verification_provider] = Disposition::kNotRun;
470
+ }
471
+ }
472
+
473
+ return status;
474
+ }
475
+
476
+ /////////////////////////////////////////////////////////////////////////////////////////////////
477
+
478
+ /// Verifies CUTLASS against references
479
+ bool SparseGemmOperationProfiler::verify_cutlass(
480
+ Options const &options,
481
+ PerformanceReport &report,
482
+ DeviceContext &device_context,
483
+ library::Operation const *operation,
484
+ ProblemSpace const &problem_space,
485
+ ProblemSpace::Problem const &problem) {
486
+
487
+ if (!options.profiling.provider_enabled(library::Provider::kCUTLASS)) {
488
+ return true;
489
+ }
490
+
491
+ if (options.execution_mode == ExecutionMode::kDryRun) {
492
+ return true;
493
+ }
494
+
495
+ // Initialize structure containing GEMM arguments
496
+ gemm_workspace_.arguments.A = gemm_workspace_.A->data();
497
+ gemm_workspace_.arguments.B = gemm_workspace_.B->data();
498
+ gemm_workspace_.arguments.C = gemm_workspace_.C->data();
499
+ gemm_workspace_.arguments.D = gemm_workspace_.Computed->data();
500
+ gemm_workspace_.arguments.E = gemm_workspace_.E->data();
501
+ gemm_workspace_.arguments.alpha = problem_.alpha.data();
502
+ gemm_workspace_.arguments.beta = problem_.beta.data();
503
+ gemm_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
504
+
505
+ //
506
+ // Run the CUTLASS operation
507
+ //
508
+
509
+ results_.back().status = operation->run(
510
+ &gemm_workspace_.arguments,
511
+ gemm_workspace_.host_workspace.data(),
512
+ gemm_workspace_.device_workspace.data());
513
+
514
+ if (results_.back().status != Status::kSuccess) {
515
+ results_.back().disposition = Disposition::kFailed;
516
+ return false;
517
+ }
518
+
519
+ cudaError_t result = cudaDeviceSynchronize();
520
+ if (result != cudaSuccess) {
521
+ results_.back().disposition = Disposition::kFailed;
522
+ return false;
523
+ }
524
+
525
+ // CUTLASS op ran the but not yet verified against any verification provider
526
+ results_.back().disposition = Disposition::kNotVerified;
527
+
528
+ //
529
+ // Run verification providers
530
+ //
531
+
532
+ if (options.verification.enabled) {
533
+
534
+ // Update disposition to worst case verification outcome among all
535
+ // verification providers which are supported
536
+ bool is_any_verification_run_passed = false;
537
+
538
+ for(auto &m : results_.back().verification_map) {
539
+ if(m.second == Disposition::kFailed || m.second == Disposition::kIncorrect) {
540
+ results_.back().disposition = m.second;
541
+ return true;
542
+ }
543
+ if(!is_any_verification_run_passed && m.second == Disposition::kPassed) {
544
+ is_any_verification_run_passed = true;
545
+ }
546
+ }
547
+
548
+ if(is_any_verification_run_passed) {
549
+ results_.back().disposition = Disposition::kPassed;
550
+ }
551
+ }
552
+
553
+ // Return true means continue profiling
554
+ return true;
555
+ }
556
+
557
+ ///////////////////////////////////////////////////////////////////////////////////////////////////
558
+
559
+ /// Measures performance results
560
+ bool SparseGemmOperationProfiler::profile(
561
+ Options const &options,
562
+ PerformanceReport &report,
563
+ DeviceContext &device_context,
564
+ library::Operation const *operation,
565
+ ProblemSpace const &problem_space,
566
+ ProblemSpace::Problem const &problem) {
567
+
568
+ if (options.profiling.provider_enabled(library::Provider::kCUTLASS)) {
569
+
570
+ // Initialize structure containing GEMM arguments
571
+ gemm_workspace_.arguments.A = gemm_workspace_.A->data();
572
+ gemm_workspace_.arguments.B = gemm_workspace_.B->data();
573
+ gemm_workspace_.arguments.C = gemm_workspace_.C->data();
574
+ gemm_workspace_.arguments.D = gemm_workspace_.Computed->data();
575
+ gemm_workspace_.arguments.E = gemm_workspace_.E->data();
576
+ gemm_workspace_.arguments.alpha = problem_.alpha.data();
577
+ gemm_workspace_.arguments.beta = problem_.beta.data();
578
+ gemm_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
579
+
580
+ results_.back().status = profile_cutlass_(
581
+ results_.back().runtime,
582
+ options,
583
+ operation,
584
+ &gemm_workspace_.arguments,
585
+ gemm_workspace_.host_workspace.data(),
586
+ gemm_workspace_.device_workspace.data()
587
+ );
588
+ }
589
+
590
+ return true;
591
+ }
592
+
593
+ /////////////////////////////////////////////////////////////////////////////////////////////////
594
+
595
+ } // namespace profiler
596
+ } // namespace cutlass
597
+
598
+ /////////////////////////////////////////////////////////////////////////////////////////////////
sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/cutlass/tools/profiler/src/symm_operation_profiler.cu ADDED
@@ -0,0 +1,790 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ * SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice, this
9
+ * list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holder nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
+ *
30
+ **************************************************************************************************/
31
+ /* \file
32
+ \brief Execution environment
33
+
34
+
35
+ */
36
+
37
+ #include <iostream>
38
+ #include <stdexcept>
39
+ #include <iomanip>
40
+ #include <ios>
41
+
42
+ #include "cutlass/core_io.h"
43
+
44
+ #include "cutlass/profiler/cublas_helpers.h"
45
+ #include "cutlass/profiler/symm_operation_profiler.h"
46
+ #include "cutlass/profiler/gpu_timer.h"
47
+
48
+ /////////////////////////////////////////////////////////////////////////////////////////////////
49
+
50
+ namespace cutlass {
51
+ namespace profiler {
52
+
53
+
54
+ /////////////////////////////////////////////////////////////////////////////////////////////////
55
+
56
+ /// Ctor
57
+ SymmOperationProfiler::SymmOperationProfiler(Options const &options):
58
+ OperationProfiler(
59
+ options,
60
+ library::OperationKind::kSymm,
61
+ {
62
+ {ArgumentTypeID::kEnumerated, {"symm_kind"}, "Variant of Symm (universal)"},
63
+ {ArgumentTypeID::kInteger, {"m", "problem-size::m"}, "M dimension of the Symm problem space"},
64
+ {ArgumentTypeID::kInteger, {"n", "problem-size::n"}, "N dimension of the Symm problem space"},
65
+ {ArgumentTypeID::kTensor, {"A"}, "Tensor storing the A operand"},
66
+ {ArgumentTypeID::kTensor, {"B"}, "Tensor storing the B operand"},
67
+ {ArgumentTypeID::kTensor, {"C"}, "Tensor storing the C operand"},
68
+ {ArgumentTypeID::kEnumerated, {"side_mode"}, "Side Mode for Symm kernel (left or right)"},
69
+ {ArgumentTypeID::kEnumerated, {"fill_mode"}, "Fill Mode for Symm kernel (lower or upper)"},
70
+ {ArgumentTypeID::kEnumerated, {"blas_mode"}, "Blas Mode for Symm kernel (symmetric or hermitian)"},
71
+ {ArgumentTypeID::kScalar, {"alpha", "epilogue::alpha"}, "Epilogue scalar alpha"},
72
+ {ArgumentTypeID::kScalar, {"beta", "epilogue::beta"}, "Epilogue scalar beta"},
73
+ {ArgumentTypeID::kInteger, {"split_k_slices", "split-k-slices"}, "Number of partitions of K dimension"},
74
+ {ArgumentTypeID::kInteger, {"batch_count", "batch-count"}, "Number of Symm computed in one batch"},
75
+ },
76
+ { library::Provider::kCUBLAS }
77
+ ) {
78
+ description_ = " Symmetric Matrix-Matrix Multiplication. D = alpha * A * B OR alpha * B * A + beta * C (where A is symmetric/hermitian)";
79
+ }
80
+
81
+ /// Destructor
82
+ SymmOperationProfiler::~SymmOperationProfiler() {
83
+
84
+ }
85
+
86
+ /// Prints usage statement for the math function
87
+ void SymmOperationProfiler::print_usage(std::ostream &out) const {
88
+ out << "Symm" << "\n\n";
89
+
90
+ OperationProfiler::print_usage(out);
91
+ }
92
+
93
+ /// Prints examples
94
+ void SymmOperationProfiler::print_examples(std::ostream &out) const {
95
+
96
+ out << "\nExamples:\n\n"
97
+ << "Profile a particular problem size SYMM kernel:\n"
98
+ << " $ cutlass_profiler --operation=Symm --blas_mode=symmetric --m=1024 --n=128\n\n"
99
+
100
+ << "Profile a particular problem size HEMM kernel:\n"
101
+ << " $ cutlass_profiler --operation=Symm --blas_mode=hermitian --m=1024 --n=128\n\n"
102
+
103
+ << "Schmoo over problem size and beta:\n"
104
+ << " $ cutlass_profiler --operation=Symm --m=1024:4096:256 --n=128:8192:128 --beta=0,1,2.5\n\n"
105
+
106
+ << "Schmoo over accumulator types:\n"
107
+ << " $ cutlass_profiler --operation=Symm --accumulator-type=f16,f32\n\n"
108
+
109
+ << "Schmoo over side modees:\n"
110
+ << " $ cutlass_profiler --operation=Symm --side_mode=left/right\n\n"
111
+
112
+ << "Schmoo over fill modees:\n"
113
+ << " $ cutlass_profiler --operation=Symm --fill_mode=lower/upper\n\n"
114
+
115
+ << "Run when A is f16 with column-major or A is any datatype with row-major (For column major, use column, col, or n. For row major use, row or t):\n"
116
+ << " $ cutlass_profiler --operation=Symm --A=f16:column or --A=*:row\n\n"
117
+
118
+ << "Using various input value distribution:\n"
119
+ << " $ cutlass_profiler --operation=Symm --dist=uniform,min:0,max:3\n"
120
+ << " $ cutlass_profiler --operation=Symm --dist=gaussian,mean:0,stddev:3\n"
121
+ << " $ cutlass_profiler --operation=Symm --dist=sequential,start:0,delta:1\n\n"
122
+
123
+ << "Run a kernel with cta tile size of 256x128x32 and save workspace if results are incorrect (note that --cta-tile::k=32 is default cta-tile size):\n"
124
+ << " $ cutlass_profiler --operation=Symm --cta_m=256 --cta_n=128 --cta_k=32 --save-workspace=incorrect\n\n"
125
+
126
+ << "Test your changes to symm kernels with a quick functional test and save results in functional-test.csv:\n"
127
+ << " $ cutlass_profiler --operation=Symm \\ \n"
128
+ << " --m=8,56,120,136,256,264,512,520,1024,1032,4096,8192,16384 \\ \n"
129
+ << " --n=8,16,32,64,128,256,288,384,504,512,520 \\ \n"
130
+ << " --beta=0,1,2 --profiling-iterations=1 \\ \n"
131
+ << " --providers=cutlass --output=functional-test.csv\n\n";
132
+ }
133
+
134
+ /////////////////////////////////////////////////////////////////////////////////////////////////
135
+
136
+ #if 0
137
+ // used this for debugging
138
+ static std::string byte_string(std::vector<uint8_t> const &bytes) {
139
+ std::stringstream ss;
140
+
141
+ ss << "0x";
142
+
143
+ for (size_t idx = bytes.size(); idx > 0; --idx) {
144
+ ss << std::hex << std::setw(2) << std::setfill('0') << uint32_t(bytes.at(idx - 1));
145
+ }
146
+
147
+ return ss.str();
148
+ }
149
+ #endif
150
+
151
+ Status SymmOperationProfiler::SymmProblem::parse(
152
+ library::SymmDescription const &operation_desc,
153
+ ProblemSpace const &problem_space,
154
+ ProblemSpace::Problem const &problem) {
155
+
156
+ if (!arg_as_int(this->m, "m", problem_space, problem)) {
157
+ // default value
158
+ this->m = 1024;
159
+ }
160
+
161
+ if (!arg_as_int(this->n, "n", problem_space, problem)) {
162
+ // default value
163
+ this->n = 1024;
164
+ }
165
+
166
+ if (!arg_as_int(this->split_k_slices, "split_k_slices", problem_space, problem)) {
167
+ // default value
168
+ this->split_k_slices = 1;
169
+ }
170
+
171
+ if (!arg_as_int(this->batch_count, "batch_count", problem_space, problem)) {
172
+ // default value
173
+ this->batch_count = 1;
174
+ }
175
+
176
+ if (this->split_k_slices > 1 && this->batch_count > 1) {
177
+ // At least one of these must be one
178
+ return Status::kErrorInvalidProblem;
179
+ }
180
+
181
+ if (!tensor_description_satisfies(operation_desc.A, "A", problem_space, problem)) {
182
+ return Status::kErrorInvalidProblem;
183
+ }
184
+
185
+ if (!tensor_description_satisfies(operation_desc.B, "B", problem_space, problem)) {
186
+ return Status::kErrorInvalidProblem;
187
+ }
188
+
189
+ if (!tensor_description_satisfies(operation_desc.C, "C", problem_space, problem)) {
190
+ return Status::kErrorInvalidProblem;
191
+ }
192
+
193
+ if (!arg_as_scalar(
194
+ this->alpha,
195
+ operation_desc.element_epilogue,
196
+ "alpha",
197
+ problem_space,
198
+ problem)) {
199
+
200
+ if (!cast_from_double(this->alpha, operation_desc.element_epilogue, 1)) {
201
+ return Status::kErrorInternal;
202
+ }
203
+ }
204
+
205
+ if (!arg_as_scalar(
206
+ this->beta,
207
+ operation_desc.element_epilogue,
208
+ "beta",
209
+ problem_space,
210
+ problem)) {
211
+
212
+ if (!cast_from_double(this->beta, operation_desc.element_epilogue, 0)) {
213
+ return Status::kErrorInternal;
214
+ }
215
+ }
216
+
217
+ if (operation_desc.side_mode == SideMode::kLeft) {
218
+ this->lda = DeviceAllocation::get_packed_layout(
219
+ operation_desc.A.layout, {int(this->m), int(this->m)}).front();
220
+ }
221
+ else if (operation_desc.side_mode == SideMode::kRight) {
222
+ this->lda = DeviceAllocation::get_packed_layout(
223
+ operation_desc.A.layout, {int(this->n), int(this->n)}).front();
224
+ }
225
+
226
+ this->ldb = DeviceAllocation::get_packed_layout(
227
+ operation_desc.B.layout, {int(this->m), int(this->n)}).front();
228
+
229
+ this->ldc = DeviceAllocation::get_packed_layout(
230
+ operation_desc.C.layout, {int(this->m), int(this->n)}).front();
231
+
232
+ return Status::kSuccess;
233
+ }
234
+
235
+ /// Total number of bytes loaded
236
+ int64_t SymmOperationProfiler::SymmProblem::bytes(library::SymmDescription const &operation_desc) const {
237
+ int64_t bytes = 0;
238
+ // Input bytes read and Output bytes written for the gemm problem
239
+ // Half matrix including the diagonal will have (X*(X+1))/2 elements
240
+ if (operation_desc.side_mode == SideMode::kLeft) {
241
+ bytes =
242
+ int64_t(library::sizeof_bits(operation_desc.A.element) * m / 8) * (m + 1) / 2 +
243
+ int64_t(library::sizeof_bits(operation_desc.B.element) * m / 8) * n +
244
+ int64_t(library::sizeof_bits(operation_desc.C.element) * m / 8) * n;
245
+ } else if (operation_desc.side_mode == SideMode::kRight) {
246
+ bytes =
247
+ int64_t(library::sizeof_bits(operation_desc.A.element) * n / 8) * (n + 1) / 2 +
248
+ int64_t(library::sizeof_bits(operation_desc.B.element) * m / 8) * n +
249
+ int64_t(library::sizeof_bits(operation_desc.C.element) * m / 8) * n;
250
+ }
251
+ // Set is_beta_zero true if beta is zero
252
+ bool is_beta_zero = std::all_of(beta.begin(), beta.end(), [](uint8_t i) { return i==0; });
253
+
254
+ // Output bytes read for the gemm problem for non-zero beta values
255
+ if (!is_beta_zero) {
256
+ bytes += int64_t(library::sizeof_bits(operation_desc.C.element) * m / 8) * n;
257
+ }
258
+
259
+ bytes *= batch_count;
260
+
261
+ return bytes;
262
+ }
263
+
264
+ /// Total number of flops computed
265
+ int64_t SymmOperationProfiler::SymmProblem::flops(library::SymmDescription const &operation_desc) const {
266
+
267
+ // FLOPs for first TRMM kernel (with diagonal) = 2 * [ ( M * (M+1)/2 * N ) ] // Beta is zero
268
+ // FLOPs for second TRMM kernel (with diagonal) = 2 * [ ( M * (M-1)/2 * N ) ] // Beta is zero
269
+ // FLOPs = m*(m+1)*n [mma1] + m*(m-1)*n [mma2] + 2*m*n [epilogue]
270
+ // FLOPs = 2*m*n(m+1) for left side mode
271
+ // FLOPs can also be calculated to be same as GEMM with correct value for 'k' as below.
272
+ int64_t k = (operation_desc.side_mode == SideMode::kLeft) ? int64_t(m) : int64_t(n);
273
+ int64_t flops_ = (int64_t(m) * n * k + m * n) * 2;
274
+
275
+ // complex-valued support
276
+ switch (operation_desc.tile_description.math_instruction.math_operation) {
277
+ case library::MathOperationID::kMultiplyAddComplex:
278
+ flops_ *= 4;
279
+ break;
280
+
281
+ case library::MathOperationID::kMultiplyAddComplexFastF32:
282
+ flops_ *= 4;
283
+ break;
284
+
285
+ case library::MathOperationID::kMultiplyAddGaussianComplex:
286
+ flops_ *= 3;
287
+ break;
288
+
289
+ default: break;
290
+ }
291
+
292
+ return flops_;
293
+ }
294
+
295
+ /// Initializes a performance result
296
+ void SymmOperationProfiler::SymmProblem::initialize_result(
297
+ PerformanceResult &result,
298
+ library::SymmDescription const &operation_desc,
299
+ ProblemSpace const &problem_space) {
300
+
301
+ result.arguments.resize(problem_space.rank());
302
+
303
+ set_argument(result, "symm_kind", problem_space, library::to_string(operation_desc.symm_kind));
304
+
305
+ set_argument(result, "A", problem_space,
306
+ std::string(library::to_string(operation_desc.A.element)) + ":" + library::to_string(operation_desc.A.layout));
307
+
308
+ set_argument(result, "B", problem_space,
309
+ std::string(library::to_string(operation_desc.B.element)) + ":" + library::to_string(operation_desc.B.layout));
310
+
311
+ set_argument(result, "C", problem_space,
312
+ std::string(library::to_string(operation_desc.C.element)) + ":" + library::to_string(operation_desc.C.layout));
313
+
314
+ set_argument(result, "side_mode", problem_space, library::to_string(operation_desc.side_mode));
315
+
316
+ set_argument(result, "fill_mode", problem_space, library::to_string(operation_desc.fill_mode));
317
+
318
+ set_argument(result, "blas_mode", problem_space, library::to_string(operation_desc.blas_mode));
319
+
320
+ set_argument(result, "m", problem_space, m);
321
+ set_argument(result, "n", problem_space, n);
322
+
323
+ set_argument(result, "split_k_slices", problem_space, split_k_slices);
324
+ set_argument(result, "batch_count", problem_space, batch_count);
325
+
326
+ set_argument(result, "alpha", problem_space,
327
+ library::lexical_cast(alpha, operation_desc.element_epilogue));
328
+
329
+ set_argument(result, "beta", problem_space,
330
+ library::lexical_cast(beta, operation_desc.element_epilogue));
331
+ }
332
+
333
+ /////////////////////////////////////////////////////////////////////////////////////////////////
334
+
335
+ /// Extracts the problem dimensions
336
+ Status SymmOperationProfiler::initialize_configuration(
337
+ Options const &options,
338
+ PerformanceReport &report,
339
+ DeviceContext &device_context,
340
+ library::Operation const *operation,
341
+ ProblemSpace const &problem_space,
342
+ ProblemSpace::Problem const &problem) {
343
+
344
+ library::SymmDescription const &operation_desc =
345
+ static_cast<library::SymmDescription const &>(operation->description());
346
+
347
+ if (operation_desc.symm_kind != library::SymmKind::kUniversal) {
348
+ return Status::kErrorInvalidProblem;
349
+ }
350
+
351
+ Status status = problem_.parse(operation_desc, problem_space, problem);
352
+
353
+ if (status != Status::kSuccess) {
354
+ return status;
355
+ }
356
+
357
+ symm_workspace_.configuration.problem_size.m() = int(problem_.m);
358
+ symm_workspace_.configuration.problem_size.n() = int(problem_.n);
359
+ symm_workspace_.configuration.problem_size.k() = (operation_desc.side_mode == SideMode::kLeft)
360
+ ? int(problem_.m) : int(problem_.n);
361
+ symm_workspace_.configuration.lda = problem_.lda;
362
+ symm_workspace_.configuration.ldb = problem_.ldb;
363
+ symm_workspace_.configuration.ldc = problem_.ldc;
364
+ symm_workspace_.configuration.ldd = problem_.ldc;
365
+ //symm_workspace_.configuration.split_k_slices = int(problem_.split_k_slices);
366
+ symm_workspace_.configuration.batch_count = int(problem_.split_k_slices);
367
+
368
+ symm_workspace_.arguments.A = nullptr;
369
+ symm_workspace_.arguments.B = nullptr;
370
+ symm_workspace_.arguments.C = nullptr;
371
+ symm_workspace_.arguments.D = nullptr;
372
+ symm_workspace_.arguments.alpha = problem_.alpha.data();
373
+ symm_workspace_.arguments.beta = problem_.beta.data();
374
+ symm_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
375
+
376
+ initialize_result_(this->model_result_, options, operation_desc, problem_space);
377
+
378
+ return operation->can_implement(&symm_workspace_.configuration, &symm_workspace_.arguments);
379
+ }
380
+
381
+ /// Initializes the performance result
382
+ void SymmOperationProfiler::initialize_result_(
383
+ PerformanceResult &result,
384
+ Options const &options,
385
+ library::SymmDescription const &operation_desc,
386
+ ProblemSpace const &problem_space) {
387
+
388
+ result.provider = library::Provider::kCUTLASS;
389
+ result.disposition = Disposition::kNotRun;
390
+ result.status = Status::kSuccess;
391
+ result.operation_name = operation_desc.name;
392
+
393
+ problem_.initialize_result(result, operation_desc, problem_space);
394
+
395
+ OperationProfiler::initialize_result_(result, operation_desc, problem_space);
396
+
397
+
398
+ result.bytes = problem_.bytes(operation_desc);
399
+ result.flops = problem_.flops(operation_desc);
400
+ result.runtime = 0;
401
+
402
+
403
+ }
404
+
405
+ /// Initializes workspace
406
+ Status SymmOperationProfiler::initialize_workspace(
407
+ Options const &options,
408
+ PerformanceReport &report,
409
+ DeviceContext &device_context,
410
+ library::Operation const *operation,
411
+ ProblemSpace const &problem_space,
412
+ ProblemSpace::Problem const &problem) {
413
+
414
+ if (options.device.devices.size() != 1) {
415
+ throw std::runtime_error("This operation profiler only supports a single "
416
+ "device.");
417
+ }
418
+
419
+ cudaError_t result;
420
+ result = cudaSetDevice(options.device.device_id(0));
421
+ if (result != cudaSuccess) {
422
+ throw std::runtime_error("cudaSetDevice() failed.");
423
+ }
424
+
425
+ library::SymmDescription const &operation_desc =
426
+ static_cast<library::SymmDescription const &>(operation->description());
427
+
428
+ if (options.execution_mode != ExecutionMode::kDryRun) {
429
+ int seed_shift = 0;
430
+ if (operation_desc.side_mode == SideMode::kLeft) {
431
+ symm_workspace_.A = device_context.allocate_and_initialize_tensor(
432
+ options,
433
+ "A",
434
+ operation_desc.A.element,
435
+ operation_desc.A.layout,
436
+ {int(problem_.m), int(problem_.m)},
437
+ {int(problem_.lda)},
438
+ 1, // batch_count
439
+ seed_shift++,
440
+ 0 // device_index
441
+ );
442
+ } else if (operation_desc.side_mode == SideMode::kRight) {
443
+ symm_workspace_.A = device_context.allocate_and_initialize_tensor(
444
+ options,
445
+ "A",
446
+ operation_desc.A.element,
447
+ operation_desc.A.layout,
448
+ {int(problem_.n), int(problem_.n)},
449
+ {int(problem_.lda)},
450
+ 1, // batch_count
451
+ seed_shift++,
452
+ 0 // device_index
453
+ );
454
+ }
455
+
456
+ symm_workspace_.B = device_context.allocate_and_initialize_tensor(
457
+ options,
458
+ "B",
459
+ operation_desc.B.element,
460
+ operation_desc.B.layout,
461
+ {int(problem_.m), int(problem_.n)},
462
+ {int(problem_.ldb)},
463
+ 1, // batch_count
464
+ seed_shift++,
465
+ 0 // device_index
466
+ );
467
+
468
+ symm_workspace_.C = device_context.allocate_and_initialize_tensor(
469
+ options,
470
+ "C",
471
+ operation_desc.C.element,
472
+ operation_desc.C.layout,
473
+ {int(problem_.m), int(problem_.n)},
474
+ {int(problem_.ldc)},
475
+ 1, // batch_count
476
+ seed_shift++,
477
+ 0 // device_index
478
+ );
479
+
480
+ symm_workspace_.Computed = device_context.allocate_tensor(
481
+ options,
482
+ "D",
483
+ operation_desc.C.element,
484
+ operation_desc.C.layout,
485
+ {int(problem_.m), int(problem_.n)},
486
+ {int(problem_.ldc)},
487
+ 1, // batch_count
488
+ 0 // device_index
489
+ );
490
+
491
+ symm_workspace_.Reference = device_context.allocate_tensor(
492
+ options,
493
+ "Reference",
494
+ operation_desc.C.element,
495
+ operation_desc.C.layout,
496
+ {int(problem_.m), int(problem_.n)},
497
+ {int(problem_.ldc)},
498
+ 1, // batch_count
499
+ 0 // device_index
500
+ );
501
+
502
+ symm_workspace_.Computed->copy_from_device(symm_workspace_.C->data());
503
+ symm_workspace_.Reference->copy_from_device(symm_workspace_.C->data());
504
+ }
505
+
506
+
507
+ //
508
+ // Initialize the CUTLASS operation
509
+ //
510
+ Status status = Status::kSuccess;
511
+
512
+ if (options.profiling.provider_enabled(library::Provider::kCUTLASS)) {
513
+
514
+ if (options.execution_mode != ExecutionMode::kDryRun) {
515
+
516
+ uint64_t workspace_size = operation->get_host_workspace_size(&symm_workspace_.configuration);
517
+ symm_workspace_.host_workspace.resize(workspace_size, 0);
518
+
519
+ workspace_size = operation->get_device_workspace_size(&symm_workspace_.configuration);
520
+ symm_workspace_.device_workspace.reset(library::NumericTypeID::kU8, workspace_size);
521
+
522
+ status = operation->initialize(
523
+ &symm_workspace_.configuration,
524
+ symm_workspace_.host_workspace.data(),
525
+ symm_workspace_.device_workspace.data());
526
+ }
527
+
528
+ //
529
+ // If CUTLASS is enabled, generate a result for it
530
+ //
531
+ results_.push_back(model_result_);
532
+ results_.back().provider = library::Provider::kCUTLASS;
533
+ results_.back().op_kind = library::OperationKind::kSymm;
534
+ results_.back().disposition = Disposition::kNotRun;
535
+
536
+ for(auto provider : verification_providers_) {
537
+ results_.back().verification_map[provider] = Disposition::kNotRun;
538
+ }
539
+ }
540
+
541
+ return status;
542
+ }
543
+
544
+ /////////////////////////////////////////////////////////////////////////////////////////////////
545
+
546
+ /// Verifies CUTLASS against references
547
+ bool SymmOperationProfiler::verify_cutlass(
548
+ Options const &options,
549
+ PerformanceReport &report,
550
+ DeviceContext &device_context,
551
+ library::Operation const *operation,
552
+ ProblemSpace const &problem_space,
553
+ ProblemSpace::Problem const &problem) {
554
+
555
+ if (!options.profiling.provider_enabled(library::Provider::kCUTLASS)) {
556
+ return true;
557
+ }
558
+
559
+ if (options.execution_mode == ExecutionMode::kDryRun) {
560
+ return true;
561
+ }
562
+
563
+ // Initialize structure containing Symm arguments
564
+ symm_workspace_.arguments.A = symm_workspace_.A->data();
565
+ symm_workspace_.arguments.B = symm_workspace_.B->data();
566
+ symm_workspace_.arguments.C = symm_workspace_.C->data();
567
+ symm_workspace_.arguments.D = symm_workspace_.Computed->data();
568
+ symm_workspace_.arguments.alpha = problem_.alpha.data();
569
+ symm_workspace_.arguments.beta = problem_.beta.data();
570
+ symm_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
571
+
572
+ //
573
+ // Run the CUTLASS operation
574
+ //
575
+
576
+ results_.back().status = operation->run(
577
+ &symm_workspace_.arguments,
578
+ symm_workspace_.host_workspace.data(),
579
+ symm_workspace_.device_workspace.data());
580
+
581
+ if (results_.back().status != Status::kSuccess) {
582
+ results_.back().disposition = Disposition::kFailed;
583
+ return false;
584
+ }
585
+
586
+ cudaError_t result = cudaDeviceSynchronize();
587
+ if (result != cudaSuccess) {
588
+ results_.back().disposition = Disposition::kFailed;
589
+ return false;
590
+ }
591
+
592
+ // CUTLASS op ran the but not yet verified against any verification provider
593
+ results_.back().disposition = Disposition::kNotVerified;
594
+
595
+ //
596
+ // Run verification providers
597
+ //
598
+
599
+ if (options.verification.enabled) {
600
+
601
+ #if CUTLASS_ENABLE_CUBLAS
602
+ if (options.verification.provider_enabled(library::Provider::kCUBLAS)) {
603
+
604
+ // Guard against unsupported cases
605
+ auto const & symm_desc = static_cast<library::SymmDescription const &>(operation->description());
606
+
607
+ if (cublas_satisfies(symm_desc) == Status::kSuccess) {
608
+
609
+ // call cublas verification if supported
610
+ verify_with_cublas_(
611
+ options,
612
+ report,
613
+ device_context,
614
+ operation,
615
+ problem_space,
616
+ problem);
617
+ }
618
+
619
+ else {
620
+ // set verification map for cublas to not supported
621
+ results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kNotSupported;
622
+ }
623
+ }
624
+ #endif // #if CUTLASS_ENABLE_CUBLAS
625
+
626
+ // Update disposition to worst case verification outcome among all
627
+ // verification providers which are supported
628
+ bool is_any_verification_run_passed = false;
629
+ for(auto &m : results_.back().verification_map) {
630
+ if(m.second == Disposition::kFailed || m.second == Disposition::kIncorrect) {
631
+ results_.back().disposition = m.second;
632
+ return true;
633
+ }
634
+ if(!is_any_verification_run_passed && m.second == Disposition::kPassed) {
635
+ is_any_verification_run_passed = true;
636
+ }
637
+ }
638
+
639
+ if(is_any_verification_run_passed) {
640
+ results_.back().disposition = Disposition::kPassed;
641
+ }
642
+ }
643
+
644
+ // Return true means continue profiling
645
+ return true;
646
+ }
647
+
648
+ ///////////////////////////////////////////////////////////////////////////////////////////////////
649
+
650
+ /// Verifies CUTLASS against references
651
+ bool SymmOperationProfiler::verify_with_cublas_(
652
+ Options const &options,
653
+ PerformanceReport &report,
654
+ DeviceContext &device_context,
655
+ library::Operation const *operation,
656
+ ProblemSpace const &problem_space,
657
+ ProblemSpace::Problem const &problem) {
658
+
659
+
660
+ #if CUTLASS_ENABLE_CUBLAS
661
+
662
+ library::SymmDescription const &symm_desc =
663
+ static_cast<library::SymmDescription const &>(operation->description());
664
+
665
+ //
666
+ // Construct cuBLAS operators
667
+ //
668
+
669
+ CublasCreate handle;
670
+ cublasStatus_t status = handle.get_cublas_create_status();
671
+
672
+ if (status != CUBLAS_STATUS_SUCCESS) {
673
+
674
+ results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kFailed;
675
+ return true;
676
+ }
677
+
678
+ //
679
+ // Initialize state
680
+ //
681
+
682
+ try {
683
+
684
+ //
685
+ // Construct dispatcher to cublas<t>Symm()
686
+ //
687
+
688
+ // Initialize structure containing Symm arguments
689
+ symm_workspace_.arguments.A = symm_workspace_.A->data();
690
+ symm_workspace_.arguments.B = symm_workspace_.B->data();
691
+ symm_workspace_.arguments.C = symm_workspace_.Reference->data();
692
+ symm_workspace_.arguments.D = symm_workspace_.Reference->data();
693
+ symm_workspace_.arguments.alpha = problem_.alpha.data();
694
+ symm_workspace_.arguments.beta = problem_.beta.data();
695
+ symm_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
696
+
697
+ detail::cublasSymmDispatcher symm_op(
698
+ symm_desc,
699
+ symm_workspace_.configuration,
700
+ symm_workspace_.arguments
701
+ );
702
+
703
+ if (symm_op.status != Status::kSuccess) {
704
+ results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kNotRun;
705
+ return true;
706
+ }
707
+
708
+ results_.back().status = Status::kSuccess;
709
+
710
+ status = symm_op(handle);
711
+
712
+ // Handle errors
713
+ if (status != CUBLAS_STATUS_SUCCESS) {
714
+
715
+ results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kFailed;
716
+ return true;
717
+ }
718
+
719
+ //
720
+ // Verify results
721
+ //
722
+
723
+ results_.back().verification_map[library::Provider::kCUBLAS] = compare_tensors(
724
+ options,
725
+ *symm_workspace_.Computed,
726
+ *symm_workspace_.Reference
727
+ );
728
+
729
+ // Save workspace if incorrect
730
+ if (options.verification.save_workspace == SaveWorkspace::kIncorrect &&
731
+ results_.back().verification_map[library::Provider::kCUBLAS] == Disposition::kIncorrect) {
732
+
733
+ save_workspace(
734
+ device_context,
735
+ options,
736
+ symm_desc,
737
+ library::Provider::kCUTLASS,
738
+ library::Provider::kCUBLAS);
739
+ }
740
+ }
741
+ catch (...) {
742
+ results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kFailed;
743
+ }
744
+
745
+ #endif
746
+
747
+ // Return true means continue profiling
748
+ return true;
749
+ }
750
+
751
+ /////////////////////////////////////////////////////////////////////////////////////////////////
752
+
753
+ /// Measures performance results
754
+ bool SymmOperationProfiler::profile(
755
+ Options const &options,
756
+ PerformanceReport &report,
757
+ DeviceContext &device_context,
758
+ library::Operation const *operation,
759
+ ProblemSpace const &problem_space,
760
+ ProblemSpace::Problem const &problem) {
761
+
762
+ if (options.profiling.provider_enabled(library::Provider::kCUTLASS)) {
763
+
764
+ // Initialize structure containing Symm arguments
765
+ symm_workspace_.arguments.A = symm_workspace_.A->data();
766
+ symm_workspace_.arguments.B = symm_workspace_.B->data();
767
+ symm_workspace_.arguments.C = symm_workspace_.C->data();
768
+ symm_workspace_.arguments.D = symm_workspace_.Computed->data();
769
+ symm_workspace_.arguments.alpha = problem_.alpha.data();
770
+ symm_workspace_.arguments.beta = problem_.beta.data();
771
+ symm_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
772
+
773
+ results_.back().status = profile_cutlass_(
774
+ results_.back().runtime,
775
+ options,
776
+ operation,
777
+ &symm_workspace_.arguments,
778
+ symm_workspace_.host_workspace.data(),
779
+ symm_workspace_.device_workspace.data()
780
+ );
781
+ }
782
+ return true;
783
+ }
784
+
785
+ /////////////////////////////////////////////////////////////////////////////////////////////////
786
+
787
+ } // namespace profiler
788
+ } // namespace cutlass
789
+
790
+ /////////////////////////////////////////////////////////////////////////////////////////////////
sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/cutlass/tools/profiler/src/trmm_operation_profiler.cu ADDED
@@ -0,0 +1,728 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ * SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice, this
9
+ * list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holder nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
+ *
30
+ **************************************************************************************************/
31
+ /* \file
32
+ \brief Execution environment
33
+
34
+
35
+ */
36
+
37
+ #include <iostream>
38
+ #include <stdexcept>
39
+ #include <iomanip>
40
+ #include <ios>
41
+
42
+ #include "cutlass/core_io.h"
43
+
44
+ #include "cutlass/profiler/cublas_helpers.h"
45
+ #include "cutlass/profiler/trmm_operation_profiler.h"
46
+ #include "cutlass/profiler/gpu_timer.h"
47
+
48
+ /////////////////////////////////////////////////////////////////////////////////////////////////
49
+
50
+ namespace cutlass {
51
+ namespace profiler {
52
+
53
+
54
+ /////////////////////////////////////////////////////////////////////////////////////////////////
55
+
56
+ /// Ctor
57
+ TrmmOperationProfiler::TrmmOperationProfiler(Options const &options):
58
+ OperationProfiler(
59
+ options,
60
+ library::OperationKind::kTrmm,
61
+ {
62
+ {ArgumentTypeID::kEnumerated, {"trmm_kind"}, "Variant of TRMM (universal)"},
63
+ {ArgumentTypeID::kInteger, {"m", "problem-size::m"}, "M dimension of the TRMM problem space"},
64
+ {ArgumentTypeID::kInteger, {"n", "problem-size::n"}, "N dimension of the TRMM problem space"},
65
+ {ArgumentTypeID::kTensor, {"A"}, "Tensor storing the A operand"},
66
+ {ArgumentTypeID::kEnumerated, {"side_mode"}, "Side Mode for TRMM (left, right)"},
67
+ {ArgumentTypeID::kEnumerated, {"fill_mode"}, "Fill Mode for TRMM (lower, upper)"},
68
+ {ArgumentTypeID::kEnumerated, {"diag_type"}, "Diag Type for TRMM (nonunit, unit)"},
69
+ {ArgumentTypeID::kTensor, {"B"}, "Tensor storing the B operand"},
70
+ {ArgumentTypeID::kTensor, {"D"}, "Tensor storing the D operand"},
71
+ {ArgumentTypeID::kScalar, {"alpha", "epilogue::alpha"}, "Epilogue scalar alpha"},
72
+ {ArgumentTypeID::kScalar, {"beta", "epilogue::beta"}, "Epilogue scalar beta"},
73
+ {ArgumentTypeID::kInteger, {"split_k_slices", "split-k-slices"}, "Number of partitions of K dimension"},
74
+ {ArgumentTypeID::kInteger, {"batch_count", "batch-count"}, "Number of TRMMs computed in one batch"},
75
+ },
76
+ { library::Provider::kCUBLAS}
77
+ ) {
78
+ description_ = " Triangular Matrix-Multiplication. D = alpha * A * B or alpha * B * A";
79
+ }
80
+
81
+ /// Destructor
82
+ TrmmOperationProfiler::~TrmmOperationProfiler() {
83
+
84
+ }
85
+
86
+ /// Prints usage statement for the math function
87
+ void TrmmOperationProfiler::print_usage(std::ostream &out) const {
88
+ out << "TRMM" << "\n\n";
89
+
90
+ OperationProfiler::print_usage(out);
91
+ }
92
+
93
+ /// Prints examples
94
+ void TrmmOperationProfiler::print_examples(std::ostream &out) const {
95
+
96
+ out << "\nExamples:\n\n"
97
+ << "Profile a particular problem size:\n"
98
+ << " $ cutlass_profiler --operation=Trmm --n=1024 --m=128\n\n"
99
+
100
+ << "Schmoo over problem size and beta:\n"
101
+ << " $ cutlass_profiler --operation=Trmm --n=1024:4096:256 --m=128:8192:128 --beta=0,1,2.5\n\n"
102
+
103
+ << "Schmoo over accumulator types:\n"
104
+ << " $ cutlass_profiler --operation=Trmm --accumulator-type=f16,f32\n\n"
105
+
106
+ << "Run when A is f16 with column-major or A is any datatype with row-major (For column major, use column, col, or n. For row major use, row or t):\n"
107
+ << " $ cutlass_profiler --operation=Trmm --A=f16:column or --A=*:row\n\n"
108
+
109
+ << "Using various input value distribution:\n"
110
+ << " $ cutlass_profiler --operation=Trmm --dist=uniform,min:0,max:3\n"
111
+ << " $ cutlass_profiler --operation=Trmm --dist=gaussian,mean:0,stddev:3\n"
112
+ << " $ cutlass_profiler --operation=Trmm --dist=sequential,start:0,delta:1\n\n"
113
+
114
+ << "Run a kernel with cta tile size of 256x128x32 and save workspace if results are incorrect (note that --cta-tile::k=32 is default cta-tile size):\n"
115
+ << " $ cutlass_profiler --operation=Trmm --cta_m=256 --cta_n=128 --cta_k=32 --save-workspace=incorrect\n\n"
116
+
117
+ << "Test your changes to trmm kernels with a quick functional test and save results in functional-test.csv:\n"
118
+ << " $ cutlass_profiler --operation=Trmm \\ \n"
119
+ << " --n=8,56,120,136,256,264,512,520,1024,1032,4096,8192,16384 \\ \n"
120
+ << " --k=8,16,32,64,128,256,288,384,504,512,520 \\ \n"
121
+ << " --beta=0,1,2 --profiling-iterations=1 \\ \n"
122
+ << " --providers=cutlass --output=functional-test.csv\n\n";
123
+ }
124
+
125
+ /////////////////////////////////////////////////////////////////////////////////////////////////
126
+
127
+ #if 0
128
+ // used this for debugging
129
+ static std::string byte_string(std::vector<uint8_t> const &bytes) {
130
+ std::stringstream ss;
131
+
132
+ ss << "0x";
133
+
134
+ for (size_t idx = bytes.size(); idx > 0; --idx) {
135
+ ss << std::hex << std::setw(2) << std::setfill('0') << uint32_t(bytes.at(idx - 1));
136
+ }
137
+
138
+ return ss.str();
139
+ }
140
+ #endif
141
+
142
+ Status TrmmOperationProfiler::TrmmProblem::parse(
143
+ library::TrmmDescription const &operation_desc,
144
+ ProblemSpace const &problem_space,
145
+ ProblemSpace::Problem const &problem) {
146
+
147
+ if (!arg_as_int(this->m, "m", problem_space, problem)) {
148
+ // default value
149
+ this->m = 1024;
150
+ }
151
+
152
+ if (!arg_as_int(this->n, "n", problem_space, problem)) {
153
+ // default value
154
+ this->n = 1024;
155
+ }
156
+
157
+ if (!arg_as_int(this->split_k_slices, "split_k_slices", problem_space, problem)) {
158
+ // default value
159
+ this->split_k_slices = 1;
160
+ }
161
+
162
+ if (!arg_as_int(this->batch_count, "batch_count", problem_space, problem)) {
163
+ // default value
164
+ this->batch_count = 1;
165
+ }
166
+
167
+ if (this->split_k_slices > 1 && this->batch_count > 1) {
168
+ // At least one of these must be one
169
+ return Status::kErrorInvalidProblem;
170
+ }
171
+
172
+ if (!tensor_description_satisfies(operation_desc.A, "A", problem_space, problem)) {
173
+ return Status::kErrorInvalidProblem;
174
+ }
175
+
176
+ if (!tensor_description_satisfies(operation_desc.B, "B", problem_space, problem)) {
177
+ return Status::kErrorInvalidProblem;
178
+ }
179
+
180
+ if (!tensor_description_satisfies(operation_desc.D, "D", problem_space, problem)) {
181
+ return Status::kErrorInvalidProblem;
182
+ }
183
+
184
+ if (!arg_as_scalar(
185
+ this->alpha,
186
+ operation_desc.element_epilogue,
187
+ "alpha",
188
+ problem_space,
189
+ problem)) {
190
+
191
+ if (!cast_from_double(this->alpha, operation_desc.element_epilogue, 1)) {
192
+ return Status::kErrorInternal;
193
+ }
194
+ }
195
+
196
+ if (!arg_as_scalar(
197
+ this->beta,
198
+ operation_desc.element_epilogue,
199
+ "beta",
200
+ problem_space,
201
+ problem)) {
202
+
203
+ if (!cast_from_double(this->beta, operation_desc.element_epilogue, 0)) {
204
+ return Status::kErrorInternal;
205
+ }
206
+ }
207
+
208
+ if (operation_desc.side_mode == SideMode::kLeft) {
209
+ this->lda = DeviceAllocation::get_packed_layout(
210
+ operation_desc.A.layout, {int(this->m), int(this->m)}).front();
211
+ }
212
+ else if (operation_desc.side_mode == SideMode::kRight) {
213
+ this->lda = DeviceAllocation::get_packed_layout(
214
+ operation_desc.A.layout, {int(this->n), int(this->n)}).front();
215
+ }
216
+
217
+ this->ldb = DeviceAllocation::get_packed_layout(
218
+ operation_desc.B.layout, {int(this->m), int(this->n)}).front();
219
+
220
+ this->ldd = DeviceAllocation::get_packed_layout(
221
+ operation_desc.D.layout, {int(this->m), int(this->n)}).front();
222
+
223
+ return Status::kSuccess;
224
+ }
225
+
226
+ /// Initializes a performance result
227
+ void TrmmOperationProfiler::TrmmProblem::initialize_result(
228
+ PerformanceResult &result,
229
+ library::TrmmDescription const &operation_desc,
230
+ ProblemSpace const &problem_space) {
231
+
232
+ result.arguments.resize(problem_space.rank());
233
+
234
+ set_argument(result, "trmm_kind", problem_space, library::to_string(operation_desc.trmm_kind));
235
+
236
+ set_argument(result, "A", problem_space,
237
+ std::string(library::to_string(operation_desc.A.element)) + ":" + library::to_string(operation_desc.A.layout));
238
+
239
+ set_argument(result, "side_mode", problem_space, library::to_string(operation_desc.side_mode));
240
+
241
+ set_argument(result, "fill_mode", problem_space, library::to_string(operation_desc.fill_mode));
242
+
243
+ set_argument(result, "diag_type", problem_space, library::to_string(operation_desc.diag_type));
244
+
245
+ set_argument(result, "B", problem_space,
246
+ std::string(library::to_string(operation_desc.B.element)) + ":" + library::to_string(operation_desc.B.layout));
247
+
248
+ set_argument(result, "D", problem_space,
249
+ std::string(library::to_string(operation_desc.D.element)) + ":" + library::to_string(operation_desc.D.layout));
250
+
251
+ set_argument(result, "m", problem_space, m);
252
+ set_argument(result, "n", problem_space, n);
253
+
254
+ set_argument(result, "split_k_slices", problem_space, split_k_slices);
255
+ set_argument(result, "batch_count", problem_space, batch_count);
256
+
257
+ set_argument(result, "alpha", problem_space,
258
+ library::lexical_cast(alpha, operation_desc.element_epilogue));
259
+
260
+ set_argument(result, "beta", problem_space,
261
+ library::lexical_cast(beta, operation_desc.element_epilogue));
262
+ }
263
+
264
+ /////////////////////////////////////////////////////////////////////////////////////////////////
265
+
266
+ /// Extracts the problem dimensions
267
+ Status TrmmOperationProfiler::initialize_configuration(
268
+ Options const &options,
269
+ PerformanceReport &report,
270
+ DeviceContext &device_context,
271
+ library::Operation const *operation,
272
+ ProblemSpace const &problem_space,
273
+ ProblemSpace::Problem const &problem) {
274
+
275
+ library::TrmmDescription const &operation_desc =
276
+ static_cast<library::TrmmDescription const &>(operation->description());
277
+
278
+ if (operation_desc.trmm_kind != library::TrmmKind::kUniversal) {
279
+ return Status::kErrorInvalidProblem;
280
+ }
281
+
282
+ Status status = problem_.parse(operation_desc, problem_space, problem);
283
+
284
+ if (status != Status::kSuccess) {
285
+ return status;
286
+ }
287
+
288
+ trmm_workspace_.configuration.problem_size.m() = int(problem_.m);
289
+ trmm_workspace_.configuration.problem_size.n() = int(problem_.n);
290
+ trmm_workspace_.configuration.problem_size.k() = (operation_desc.side_mode == SideMode::kLeft)
291
+ ? int(problem_.m) : int(problem_.n);
292
+ trmm_workspace_.configuration.lda = problem_.lda;
293
+ trmm_workspace_.configuration.ldb = problem_.ldb;
294
+ trmm_workspace_.configuration.ldd = problem_.ldd;
295
+ //trmm_workspace_.configuration.split_k_slices = int(problem_.split_k_slices);
296
+ trmm_workspace_.configuration.batch_count = int(problem_.split_k_slices);
297
+
298
+ trmm_workspace_.arguments.A = nullptr;
299
+ trmm_workspace_.arguments.B = nullptr;
300
+ trmm_workspace_.arguments.D = nullptr;
301
+ trmm_workspace_.arguments.alpha = problem_.alpha.data();
302
+ trmm_workspace_.arguments.beta = problem_.beta.data();
303
+ trmm_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
304
+
305
+ initialize_result_(this->model_result_, options, operation_desc, problem_space);
306
+
307
+ return operation->can_implement(&trmm_workspace_.configuration, &trmm_workspace_.arguments);
308
+ }
309
+
310
+ /// Initializes the performance result
311
+ void TrmmOperationProfiler::initialize_result_(
312
+ PerformanceResult &result,
313
+ Options const &options,
314
+ library::TrmmDescription const &operation_desc,
315
+ ProblemSpace const &problem_space) {
316
+
317
+ result.provider = library::Provider::kCUTLASS;
318
+ result.disposition = Disposition::kNotRun;
319
+ result.status = Status::kSuccess;
320
+ result.operation_name = operation_desc.name;
321
+
322
+ problem_.initialize_result(result, operation_desc, problem_space);
323
+
324
+ OperationProfiler::initialize_result_(result, operation_desc, problem_space);
325
+
326
+ if (operation_desc.side_mode == SideMode::kLeft) {
327
+ // Input bytes read and Output bytes written for the trmm problem
328
+ result.bytes =
329
+ // Half matrix including the diagonal will have (M*(M+1))/2 elements
330
+ int64_t(library::sizeof_bits(operation_desc.A.element) * problem_.m / 8) * (problem_.m + 1) / 2 +
331
+ int64_t(library::sizeof_bits(operation_desc.B.element) * problem_.m / 8) * problem_.n +
332
+ int64_t(library::sizeof_bits(operation_desc.D.element) * problem_.m / 8) * problem_.n;
333
+ } else if (operation_desc.side_mode == SideMode::kRight) {
334
+ // Input bytes read and Output bytes written for the trmm problem
335
+ result.bytes =
336
+ // Half matrix including the diagonal will have (N*(N+1))/2 elements
337
+ int64_t(library::sizeof_bits(operation_desc.A.element) * problem_.n / 8) * (problem_.n + 1) / 2 +
338
+ int64_t(library::sizeof_bits(operation_desc.B.element) * problem_.m / 8) * problem_.n +
339
+ int64_t(library::sizeof_bits(operation_desc.D.element) * problem_.m / 8) * problem_.n;
340
+ }
341
+
342
+ // FLOPs = 2 * [ ( M * (M+1)/2 * N ) ] // Beta is zero
343
+ result.flops = problem_.m * (problem_.m + 1) * problem_.n;
344
+
345
+ result.runtime = 0;
346
+
347
+ // complex-valued support
348
+ switch (operation_desc.tile_description.math_instruction.math_operation) {
349
+ case library::MathOperationID::kMultiplyAddComplex:
350
+ result.flops *= 4;
351
+ break;
352
+
353
+ case library::MathOperationID::kMultiplyAddComplexFastF32:
354
+ result.flops *= 4;
355
+ break;
356
+
357
+ default: break;
358
+ }
359
+
360
+ }
361
+
362
+ /// Initializes workspace
363
+ Status TrmmOperationProfiler::initialize_workspace(
364
+ Options const &options,
365
+ PerformanceReport &report,
366
+ DeviceContext &device_context,
367
+ library::Operation const *operation,
368
+ ProblemSpace const &problem_space,
369
+ ProblemSpace::Problem const &problem) {
370
+
371
+ if (options.device.devices.size() != 1) {
372
+ throw std::runtime_error("This operation profiler only supports a single "
373
+ "device.");
374
+ }
375
+
376
+ cudaError_t result;
377
+ result = cudaSetDevice(options.device.device_id(0));
378
+ if (result != cudaSuccess) {
379
+ throw std::runtime_error("cudaSetDevice() failed.");
380
+ }
381
+
382
+ library::TrmmDescription const &operation_desc =
383
+ static_cast<library::TrmmDescription const &>(operation->description());
384
+
385
+ if (options.execution_mode != ExecutionMode::kDryRun) {
386
+ int seed_shift = 0;
387
+ if (operation_desc.side_mode == SideMode::kLeft) {
388
+ trmm_workspace_.A = device_context.allocate_and_initialize_tensor(
389
+ options,
390
+ "A",
391
+ operation_desc.A.element,
392
+ operation_desc.A.layout,
393
+ {int(problem_.m), int(problem_.m)},
394
+ {int(problem_.lda)},
395
+ 1, // batch_count
396
+ seed_shift++,
397
+ 0 // device_index
398
+ );
399
+ } else if (operation_desc.side_mode == SideMode::kRight) {
400
+ trmm_workspace_.A = device_context.allocate_and_initialize_tensor(
401
+ options,
402
+ "A",
403
+ operation_desc.A.element,
404
+ operation_desc.A.layout,
405
+ {int(problem_.n), int(problem_.n)},
406
+ {int(problem_.lda)},
407
+ 1, // batch_count
408
+ seed_shift++,
409
+ 0 // device_index
410
+ );
411
+ }
412
+
413
+ trmm_workspace_.B = device_context.allocate_and_initialize_tensor(
414
+ options,
415
+ "B",
416
+ operation_desc.B.element,
417
+ operation_desc.B.layout,
418
+ {int(problem_.m), int(problem_.n)},
419
+ {int(problem_.ldb)},
420
+ 1, // batch_count
421
+ seed_shift++,
422
+ 0 // device_index
423
+ );
424
+
425
+ trmm_workspace_.Computed = device_context.allocate_tensor(
426
+ options,
427
+ "D",
428
+ operation_desc.D.element,
429
+ operation_desc.D.layout,
430
+ {int(problem_.m), int(problem_.n)},
431
+ {int(problem_.ldd)},
432
+ 1, // batch_count
433
+ 0 // device_index
434
+ );
435
+
436
+ trmm_workspace_.Reference = device_context.allocate_tensor(
437
+ options,
438
+ "Reference",
439
+ operation_desc.D.element,
440
+ operation_desc.D.layout,
441
+ {int(problem_.m), int(problem_.n)},
442
+ {int(problem_.ldd)},
443
+ 1, // batch_count
444
+ 0 // device_index
445
+ );
446
+
447
+ }
448
+
449
+ //
450
+ // Initialize the CUTLASS operation
451
+ //
452
+ Status status = Status::kSuccess;
453
+
454
+ if (options.profiling.provider_enabled(library::Provider::kCUTLASS)) {
455
+
456
+ if (options.execution_mode != ExecutionMode::kDryRun) {
457
+
458
+ uint64_t workspace_size = operation->get_host_workspace_size(&trmm_workspace_.configuration);
459
+ trmm_workspace_.host_workspace.resize(workspace_size, 0);
460
+
461
+ workspace_size = operation->get_device_workspace_size(&trmm_workspace_.configuration);
462
+ trmm_workspace_.device_workspace.reset(library::NumericTypeID::kU8, workspace_size);
463
+
464
+ status = operation->initialize(
465
+ &trmm_workspace_.configuration,
466
+ trmm_workspace_.host_workspace.data(),
467
+ trmm_workspace_.device_workspace.data());
468
+ }
469
+
470
+ //
471
+ // If CUTLASS is enabled, generate a result for it
472
+ //
473
+ results_.push_back(model_result_);
474
+ results_.back().provider = library::Provider::kCUTLASS;
475
+ results_.back().op_kind = library::OperationKind::kTrmm;
476
+ results_.back().disposition = Disposition::kNotRun;
477
+
478
+ for(auto provider : verification_providers_) {
479
+ results_.back().verification_map[provider] = Disposition::kNotRun;
480
+ }
481
+ }
482
+
483
+ return status;
484
+ }
485
+
486
+ /////////////////////////////////////////////////////////////////////////////////////////////////
487
+
488
+ /// Verifies CUTLASS against references
489
+ bool TrmmOperationProfiler::verify_cutlass(
490
+ Options const &options,
491
+ PerformanceReport &report,
492
+ DeviceContext &device_context,
493
+ library::Operation const *operation,
494
+ ProblemSpace const &problem_space,
495
+ ProblemSpace::Problem const &problem) {
496
+
497
+ if (!options.profiling.provider_enabled(library::Provider::kCUTLASS)) {
498
+ return true;
499
+ }
500
+
501
+ if (options.execution_mode == ExecutionMode::kDryRun) {
502
+ return true;
503
+ }
504
+
505
+ // Initialize structure containing TRMM arguments
506
+ trmm_workspace_.arguments.A = trmm_workspace_.A->data();
507
+ trmm_workspace_.arguments.B = trmm_workspace_.B->data();
508
+ trmm_workspace_.arguments.D = trmm_workspace_.Computed->data();
509
+ trmm_workspace_.arguments.alpha = problem_.alpha.data();
510
+ trmm_workspace_.arguments.beta = problem_.beta.data();
511
+ trmm_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
512
+
513
+ //
514
+ // Run the CUTLASS operation
515
+ //
516
+
517
+ results_.back().status = operation->run(
518
+ &trmm_workspace_.arguments,
519
+ trmm_workspace_.host_workspace.data(),
520
+ trmm_workspace_.device_workspace.data());
521
+
522
+ if (results_.back().status != Status::kSuccess) {
523
+ results_.back().disposition = Disposition::kFailed;
524
+ return false;
525
+ }
526
+
527
+ cudaError_t result = cudaDeviceSynchronize();
528
+ if (result != cudaSuccess) {
529
+ results_.back().disposition = Disposition::kFailed;
530
+ return false;
531
+ }
532
+
533
+ // CUTLASS op ran the but not yet verified against any verification provider
534
+ results_.back().disposition = Disposition::kNotVerified;
535
+
536
+ //
537
+ // Run verification providers
538
+ //
539
+
540
+ if (options.verification.enabled) {
541
+
542
+ #if CUTLASS_ENABLE_CUBLAS
543
+ if (options.verification.provider_enabled(library::Provider::kCUBLAS)) {
544
+
545
+ // Guard against unsupported cases
546
+ auto const & trmm_desc = static_cast<library::TrmmDescription const &>(operation->description());
547
+
548
+ if (cublas_satisfies(trmm_desc) == Status::kSuccess) {
549
+
550
+ // call cublas verification if supported
551
+ verify_with_cublas_(
552
+ options,
553
+ report,
554
+ device_context,
555
+ operation,
556
+ problem_space,
557
+ problem);
558
+ }
559
+
560
+ else {
561
+ // set verification map for cublas to not supported
562
+ results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kNotSupported;
563
+ }
564
+ }
565
+ #endif // #if CUTLASS_ENABLE_CUBLAS
566
+
567
+ // Update disposition to worst case verification outcome among all
568
+ // verification providers which are supported
569
+ bool is_any_verification_run_passed = false;
570
+ for(auto &m : results_.back().verification_map) {
571
+ if(m.second == Disposition::kFailed || m.second == Disposition::kIncorrect) {
572
+ results_.back().disposition = m.second;
573
+ return true;
574
+ }
575
+ if(!is_any_verification_run_passed && m.second == Disposition::kPassed) {
576
+ is_any_verification_run_passed = true;
577
+ }
578
+ }
579
+
580
+ if(is_any_verification_run_passed) {
581
+ results_.back().disposition = Disposition::kPassed;
582
+ }
583
+ }
584
+
585
+ // Return true means continue profiling
586
+ return true;
587
+ }
588
+
589
+ ///////////////////////////////////////////////////////////////////////////////////////////////////
590
+
591
+ /// Verifies CUTLASS against references
592
+ bool TrmmOperationProfiler::verify_with_cublas_(
593
+ Options const &options,
594
+ PerformanceReport &report,
595
+ DeviceContext &device_context,
596
+ library::Operation const *operation,
597
+ ProblemSpace const &problem_space,
598
+ ProblemSpace::Problem const &problem) {
599
+
600
+
601
+ #if CUTLASS_ENABLE_CUBLAS
602
+
603
+ library::TrmmDescription const &trmm_desc =
604
+ static_cast<library::TrmmDescription const &>(operation->description());
605
+
606
+ //
607
+ // Construct cuBLAS operators
608
+ //
609
+
610
+ CublasCreate handle;
611
+ cublasStatus_t status = handle.get_cublas_create_status();
612
+
613
+ if (status != CUBLAS_STATUS_SUCCESS) {
614
+
615
+ results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kFailed;
616
+ return true;
617
+ }
618
+
619
+ //
620
+ // Initialize state
621
+ //
622
+
623
+ try {
624
+
625
+ //
626
+ // Construct dispatcher to cublas<t>Trmm()
627
+ //
628
+
629
+ // Initialize structure containing TRMM arguments
630
+ trmm_workspace_.arguments.A = trmm_workspace_.A->data();
631
+ trmm_workspace_.arguments.B = trmm_workspace_.B->data();
632
+ trmm_workspace_.arguments.D = trmm_workspace_.Reference->data();
633
+ trmm_workspace_.arguments.alpha = problem_.alpha.data();
634
+ trmm_workspace_.arguments.beta = problem_.beta.data();
635
+ trmm_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
636
+
637
+ detail::cublasTrmmDispatcher trmm_op(
638
+ trmm_desc,
639
+ trmm_workspace_.configuration,
640
+ trmm_workspace_.arguments
641
+ );
642
+
643
+ if (trmm_op.status != Status::kSuccess) {
644
+ results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kNotRun;
645
+ return true;
646
+ }
647
+
648
+ results_.back().status = Status::kSuccess;
649
+
650
+ status = trmm_op(handle);
651
+
652
+ // Handle errors
653
+ if (status != CUBLAS_STATUS_SUCCESS) {
654
+
655
+ results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kFailed;
656
+ return true;
657
+ }
658
+
659
+ //
660
+ // Verify results
661
+ //
662
+ results_.back().verification_map[library::Provider::kCUBLAS] = compare_tensors(
663
+ options,
664
+ *trmm_workspace_.Computed,
665
+ *trmm_workspace_.Reference
666
+ );
667
+
668
+ // Save workspace if incorrect
669
+ if (options.verification.save_workspace == SaveWorkspace::kIncorrect &&
670
+ results_.back().verification_map[library::Provider::kCUBLAS] == Disposition::kIncorrect) {
671
+
672
+ save_workspace(
673
+ device_context,
674
+ options,
675
+ trmm_desc,
676
+ library::Provider::kCUTLASS,
677
+ library::Provider::kCUBLAS);
678
+ }
679
+ }
680
+ catch (...) {
681
+ results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kFailed;
682
+ }
683
+
684
+ #endif
685
+
686
+ // Return true means continue profiling
687
+ return true;
688
+ }
689
+
690
+ /////////////////////////////////////////////////////////////////////////////////////////////////
691
+
692
+ /// Measures performance results
693
+ bool TrmmOperationProfiler::profile(
694
+ Options const &options,
695
+ PerformanceReport &report,
696
+ DeviceContext &device_context,
697
+ library::Operation const *operation,
698
+ ProblemSpace const &problem_space,
699
+ ProblemSpace::Problem const &problem) {
700
+
701
+ if (options.profiling.provider_enabled(library::Provider::kCUTLASS)) {
702
+
703
+ // Initialize structure containing TRMM arguments
704
+ trmm_workspace_.arguments.A = trmm_workspace_.A->data();
705
+ trmm_workspace_.arguments.B = trmm_workspace_.B->data();
706
+ trmm_workspace_.arguments.D = trmm_workspace_.Computed->data();
707
+ trmm_workspace_.arguments.alpha = problem_.alpha.data();
708
+ trmm_workspace_.arguments.beta = problem_.beta.data();
709
+ trmm_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
710
+
711
+ results_.back().status = profile_cutlass_(
712
+ results_.back().runtime,
713
+ options,
714
+ operation,
715
+ &trmm_workspace_.arguments,
716
+ trmm_workspace_.host_workspace.data(),
717
+ trmm_workspace_.device_workspace.data()
718
+ );
719
+ }
720
+ return true;
721
+ }
722
+
723
+ /////////////////////////////////////////////////////////////////////////////////////////////////
724
+
725
+ } // namespace profiler
726
+ } // namespace cutlass
727
+
728
+ /////////////////////////////////////////////////////////////////////////////////////////////////
sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/cutlass/tools/util/include/cutlass/util/GPU_Clock.hpp ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ * SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice, this
9
+ * list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holder nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
+ *
30
+ **************************************************************************************************/
31
+
32
+ #pragma once
33
+
34
+ #include <cuda_runtime.h>
35
+
36
+ struct GPU_Clock
37
+ {
38
+ GPU_Clock() {
39
+ cudaEventCreate(&start_);
40
+ cudaEventCreate(&stop_);
41
+ cudaEventRecord(start_);
42
+ }
43
+
44
+ ~GPU_Clock() {
45
+ cudaEventDestroy(start_);
46
+ cudaEventDestroy(stop_);
47
+ }
48
+
49
+ void start() {
50
+ cudaEventRecord(start_);
51
+ }
52
+
53
+ float milliseconds() {
54
+ cudaEventRecord(stop_);
55
+ cudaEventSynchronize(stop_);
56
+ float time;
57
+ cudaEventElapsedTime(&time, start_, stop_);
58
+ return time;
59
+ }
60
+
61
+ float seconds() {
62
+ return milliseconds() * float(1e-3);
63
+ }
64
+
65
+ private:
66
+ cudaEvent_t start_, stop_;
67
+ };
sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/cutlass/tools/util/include/cutlass/util/command_line.h ADDED
@@ -0,0 +1,313 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /******************************************************************************
2
+ * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ * SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice, this
9
+ * list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holder nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
+ *
30
+ ******************************************************************************/
31
+
32
+ #pragma once
33
+
34
+ /**
35
+ * \file
36
+ * Utility for parsing command line arguments
37
+ */
38
+
39
+ #include <iostream>
40
+ #include <limits>
41
+ #include <sstream>
42
+ #include <string>
43
+ #include <vector>
44
+
45
+ #include <cuda_runtime.h>
46
+
47
+ #include "cutlass/cutlass.h"
48
+
49
+ namespace cutlass {
50
+
51
+ /******************************************************************************
52
+ * command_line
53
+ ******************************************************************************/
54
+
55
+ /**
56
+ * Utility for parsing command line arguments
57
+ */
58
+ struct CommandLine {
59
+ std::vector<std::string> keys;
60
+ std::vector<std::string> values;
61
+ std::vector<std::string> args;
62
+
63
+ /**
64
+ * Constructor
65
+ */
66
+ CommandLine(int argc, const char** argv) {
67
+ using namespace std;
68
+
69
+ for (int i = 1; i < argc; i++) {
70
+ string arg = argv[i];
71
+
72
+ if ((arg[0] != '-') || (arg[1] != '-')) {
73
+ args.push_back(arg);
74
+ continue;
75
+ }
76
+
77
+ string::size_type pos;
78
+ string key, val;
79
+ if ((pos = arg.find('=')) == string::npos) {
80
+ key = string(arg, 2, arg.length() - 2);
81
+ val = "";
82
+ } else {
83
+ key = string(arg, 2, pos - 2);
84
+ val = string(arg, pos + 1, arg.length() - 1);
85
+ }
86
+
87
+ keys.push_back(key);
88
+ values.push_back(val);
89
+ }
90
+ }
91
+
92
+ /**
93
+ * Checks whether a flag "--<flag>" is present in the commandline
94
+ */
95
+ bool check_cmd_line_flag(const char* arg_name) const {
96
+ using namespace std;
97
+
98
+ for (int i = 0; i < int(keys.size()); ++i) {
99
+ if (keys[i] == string(arg_name)) return true;
100
+ }
101
+ return false;
102
+ }
103
+
104
+ /**
105
+ * Returns number of naked (non-flag and non-key-value) commandline parameters
106
+ */
107
+ size_t num_naked_args() const {
108
+ return args.size();
109
+ }
110
+
111
+ /**
112
+ * Print naked (non-flag and non-key-value) commandline parameters
113
+ */
114
+ void print_naked_args(std::ostream &out) const {
115
+ for (auto arg : args) {
116
+ out << " " << arg <<"\n";
117
+ }
118
+ }
119
+
120
+ /**
121
+ * Returns the commandline parameter for a given index (not including flags)
122
+ */
123
+ template <typename value_t>
124
+ void get_cmd_line_argument(size_t index, value_t& val) const {
125
+ using namespace std;
126
+ if (index < args.size()) {
127
+ istringstream str_stream(args[index]);
128
+ str_stream >> val;
129
+ }
130
+ }
131
+
132
+ /**
133
+ * Obtains the boolean value specified for a given commandline parameter --<flag>=<bool>
134
+ */
135
+ void get_cmd_line_argument(const char* arg_name, bool& val, bool _default) const {
136
+ val = _default;
137
+ if (check_cmd_line_flag(arg_name)) {
138
+ std::string value;
139
+ get_cmd_line_argument(arg_name, value);
140
+
141
+ val = !(value == "0" || value == "false");
142
+ }
143
+ }
144
+
145
+ /**
146
+ * Obtains the value specified for a given commandline parameter --<flag>=<value>
147
+ */
148
+ template <typename value_t>
149
+ void get_cmd_line_argument(const char* arg_name,
150
+ value_t& val) const {
151
+
152
+ get_cmd_line_argument(arg_name, val, val);
153
+ }
154
+
155
+ /**
156
+ * Obtains the value specified for a given commandline parameter --<flag>=<value>
157
+ */
158
+ template <typename value_t>
159
+ void get_cmd_line_argument(const char* arg_name,
160
+ value_t& val,
161
+ value_t const& _default) const {
162
+ using namespace std;
163
+
164
+ val = _default;
165
+
166
+ for (int i = 0; i < int(keys.size()); ++i) {
167
+ if (keys[i] == string(arg_name)) {
168
+ istringstream str_stream(values[i]);
169
+ str_stream >> val;
170
+ }
171
+ }
172
+ }
173
+
174
+ /**
175
+ * Returns the values specified for a given commandline parameter --<flag>=<value>,<value>*
176
+ */
177
+ template <typename value_t>
178
+ void get_cmd_line_arguments(const char* arg_name,
179
+ std::vector<value_t>& vals,
180
+ char sep = ',') const {
181
+ using namespace std;
182
+
183
+ if (check_cmd_line_flag(arg_name)) {
184
+ // Clear any default values
185
+ vals.clear();
186
+
187
+ // Recover from multi-value string
188
+ for (size_t i = 0; i < keys.size(); ++i) {
189
+ if (keys[i] == string(arg_name)) {
190
+ string val_string(values[i]);
191
+ separate_string(val_string, vals, sep);
192
+ }
193
+ }
194
+ }
195
+ }
196
+
197
+ /**
198
+ * Returns the values specified for a given commandline parameter
199
+ * --<flag>=<value>,<value_start:value_end>*
200
+ */
201
+ void get_cmd_line_argument_pairs(const char* arg_name,
202
+ std::vector<std::pair<std::string, std::string> >& tokens,
203
+ char delim = ',',
204
+ char sep = ':') const {
205
+ if (check_cmd_line_flag(arg_name)) {
206
+ std::string value;
207
+ get_cmd_line_argument(arg_name, value);
208
+
209
+ tokenize(tokens, value, delim, sep);
210
+ }
211
+ }
212
+
213
+ /**
214
+ * Returns a list of ranges specified for a given commandline parameter
215
+ * --<flag>=<key:value>,<key:value>*
216
+ */
217
+ void get_cmd_line_argument_ranges(const char* arg_name,
218
+ std::vector<std::vector<std::string> >& vals,
219
+ char delim = ',',
220
+ char sep = ':') const {
221
+ std::vector<std::string> ranges;
222
+ get_cmd_line_arguments(arg_name, ranges, delim);
223
+
224
+ for (std::vector<std::string>::const_iterator range = ranges.begin();
225
+ range != ranges.end(); ++range) {
226
+
227
+ std::vector<std::string> range_vals;
228
+ separate_string(*range, range_vals, sep);
229
+ vals.push_back(range_vals);
230
+ }
231
+ }
232
+
233
+ /**
234
+ * The number of pairs parsed
235
+ */
236
+ int parsed_argc() const { return (int)keys.size(); }
237
+
238
+ //-------------------------------------------------------------------------
239
+ // Utility functions
240
+ //-------------------------------------------------------------------------
241
+
242
+ /// Tokenizes a comma-delimited list of string pairs delimited by ':'
243
+ static void tokenize(std::vector<std::pair<std::string, std::string> >& tokens,
244
+ std::string const& str,
245
+ char delim = ',',
246
+ char sep = ':') {
247
+ // Home-built to avoid Boost dependency
248
+ size_t s_idx = 0;
249
+ size_t d_idx = std::string::npos;
250
+ while (s_idx < str.size()) {
251
+ d_idx = str.find_first_of(delim, s_idx);
252
+
253
+ size_t end_idx = (d_idx != std::string::npos ? d_idx : str.size());
254
+ size_t sep_idx = str.find_first_of(sep, s_idx);
255
+ size_t offset = 1;
256
+ if (sep_idx == std::string::npos || sep_idx >= end_idx) {
257
+ sep_idx = end_idx;
258
+ offset = 0;
259
+ }
260
+
261
+ std::pair<std::string, std::string> item(
262
+ str.substr(s_idx, sep_idx - s_idx),
263
+ str.substr(sep_idx + offset, end_idx - sep_idx - offset));
264
+
265
+ tokens.push_back(item);
266
+ s_idx = end_idx + 1;
267
+ }
268
+ }
269
+
270
+ /// Tokenizes a comma-delimited list of string pairs delimited by ':'
271
+ static void tokenize(std::vector<std::string>& tokens,
272
+ std::string const& str,
273
+ char delim = ',',
274
+ char sep = ':') {
275
+ typedef std::vector<std::pair<std::string, std::string> > TokenVector;
276
+ typedef TokenVector::const_iterator token_iterator;
277
+
278
+ std::vector<std::pair<std::string, std::string> > token_pairs;
279
+ tokenize(token_pairs, str, delim, sep);
280
+ for (token_iterator tok = token_pairs.begin(); tok != token_pairs.end(); ++tok) {
281
+ tokens.push_back(tok->first);
282
+ }
283
+ }
284
+
285
+ template <typename value_t>
286
+ static void separate_string(std::string const& str,
287
+ std::vector<value_t>& vals,
288
+ char sep = ',') {
289
+ std::istringstream str_stream(str);
290
+ std::string::size_type old_pos = 0;
291
+ std::string::size_type new_pos = 0;
292
+
293
+ // Iterate <sep>-delimited values
294
+ value_t val;
295
+ while ((new_pos = str.find(sep, old_pos)) != std::string::npos) {
296
+ if (new_pos != old_pos) {
297
+ str_stream.width(new_pos - old_pos);
298
+ str_stream >> val;
299
+ vals.push_back(val);
300
+ }
301
+
302
+ // skip over delimiter
303
+ str_stream.ignore(1);
304
+ old_pos = new_pos + 1;
305
+ }
306
+
307
+ // Read last value
308
+ str_stream >> val;
309
+ vals.push_back(val);
310
+ }
311
+ };
312
+
313
+ } // namespace cutlass
sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/cutlass/tools/util/include/cutlass/util/cublas_wrappers.hpp ADDED
@@ -0,0 +1,526 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ * SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice, this
9
+ * list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holder nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
+ *
30
+ **************************************************************************************************/
31
+
32
+ #pragma once
33
+
34
+ #include <cuda_runtime.h>
35
+ #include <cublas_v2.h>
36
+
37
+ //-- BLAM_DEBUG_OUT ---------------------------------------------------------
38
+ #ifdef BLAM_DEBUG
39
+ # include <iostream>
40
+ # ifndef BLAM_DEBUG_OUT
41
+ # define BLAM_DEBUG_OUT(msg) std::cerr << "BLAM: " << msg << std::endl
42
+ # define BLAM_DEBUG_OUT_2(msg) std::cerr << msg << std::endl
43
+ # endif // BLAM_DEBUG_OUT
44
+ #else
45
+ # ifndef BLAM_DEBUG_OUT
46
+ # define BLAM_DEBUG_OUT(msg)
47
+ # define BLAM_DEBUG_OUT_2(msg)
48
+ # endif // BLAM_DEBUG_OUT
49
+ #endif // BLAM_DEBUG
50
+
51
+ // User could potentially define ComplexFloat/ComplexDouble instead of std::
52
+ #ifndef BLAM_COMPLEX_TYPES
53
+ #define BLAM_COMPLEX_TYPES 1
54
+ #include <cuda/std/complex>
55
+ namespace blam {
56
+ template <typename T>
57
+ using Complex = cuda::std::complex<T>;
58
+ using ComplexFloat = cuda::std::complex<float>;
59
+ using ComplexDouble = cuda::std::complex<double>;
60
+ }
61
+ #endif // BLAM_COMPLEX_TYPES
62
+
63
+ // User could potentially define Half instead of cute::
64
+ #ifndef BLAM_HALF_TYPE
65
+ #define BLAM_HALF_TYPE 1
66
+ #include <cute/numeric/numeric_types.hpp>
67
+ namespace blam {
68
+ using Half = cute::half_t;
69
+ }
70
+ #endif // BLAM_HALF_TYPE
71
+
72
+ namespace blam
73
+ {
74
+ namespace cublas
75
+ {
76
+
77
+ inline const char*
78
+ cublas_get_error(cublasStatus_t status)
79
+ {
80
+ switch (status) {
81
+ case CUBLAS_STATUS_SUCCESS:
82
+ return "CUBLAS_STATUS_SUCCESS";
83
+ case CUBLAS_STATUS_NOT_INITIALIZED:
84
+ return "CUBLAS_STATUS_NOT_INITIALIZED -- The cuBLAS library was not initialized.";
85
+ case CUBLAS_STATUS_ALLOC_FAILED:
86
+ return "CUBLAS_STATUS_ALLOC_FAILED -- Resource allocation failed inside the cuBLAS library.";
87
+ case CUBLAS_STATUS_INVALID_VALUE:
88
+ return "CUBLAS_STATUS_INVALID_VALUE -- An unsupported value or parameter was passed to the function.";
89
+ case CUBLAS_STATUS_ARCH_MISMATCH:
90
+ return "CUBLAS_STATUS_ARCH_MISMATCH -- The function requires a feature absent from the device architecture.";
91
+ case CUBLAS_STATUS_MAPPING_ERROR:
92
+ return "CUBLAS_STATUS_MAPPING_ERROR -- An access to GPU memory space failed.";
93
+ case CUBLAS_STATUS_EXECUTION_FAILED:
94
+ return "CUBLAS_STATUS_EXECUTION_FAILED -- The GPU program failed to execute.";
95
+ case CUBLAS_STATUS_INTERNAL_ERROR:
96
+ return "CUBLAS_STATUS_INTERNAL_ERROR -- An internal cuBLAS operation failed.";
97
+ case CUBLAS_STATUS_NOT_SUPPORTED:
98
+ return "CUBLAS_STATUS_NOT_SUPPORTED -- The functionality requested is not supported.";
99
+ case CUBLAS_STATUS_LICENSE_ERROR:
100
+ return "CUBLAS_STATUS_LICENSE_ERROR -- An error was detected when checking the current licensing.";
101
+ default:
102
+ return "CUBLAS_ERROR -- <unknown>";
103
+ }
104
+ }
105
+
106
+ inline bool
107
+ cublas_is_error(cublasStatus_t status)
108
+ {
109
+ return status != CUBLAS_STATUS_SUCCESS;
110
+ }
111
+
112
+
113
+ // hgemm
114
+ inline cublasStatus_t
115
+ gemm(cublasHandle_t handle,
116
+ cublasOperation_t transA, cublasOperation_t transB,
117
+ int m, int n, int k,
118
+ const Half* alpha,
119
+ const Half* A, int ldA,
120
+ const Half* B, int ldB,
121
+ const Half* beta,
122
+ Half* C, int ldC)
123
+ {
124
+ BLAM_DEBUG_OUT("cublasHgemm");
125
+
126
+ return cublasGemmEx(handle, transA, transB,
127
+ m, n, k,
128
+ reinterpret_cast<const __half*>(alpha),
129
+ reinterpret_cast<const __half*>(A), CUDA_R_16F, ldA,
130
+ reinterpret_cast<const __half*>(B), CUDA_R_16F, ldB,
131
+ reinterpret_cast<const __half*>(beta),
132
+ reinterpret_cast< __half*>(C), CUDA_R_16F, ldC,
133
+ CUDA_R_16F, CUBLAS_GEMM_DEFAULT_TENSOR_OP);
134
+ }
135
+
136
+ // mixed hf gemm
137
+ inline cublasStatus_t
138
+ gemm(cublasHandle_t handle,
139
+ cublasOperation_t transA, cublasOperation_t transB,
140
+ int m, int n, int k,
141
+ const float* alpha,
142
+ const Half* A, int ldA,
143
+ const Half* B, int ldB,
144
+ const float* beta,
145
+ float* C, int ldC)
146
+ {
147
+ BLAM_DEBUG_OUT("cublasGemmEx mixed half-float");
148
+
149
+ return cublasGemmEx(handle, transA, transB,
150
+ m, n, k,
151
+ alpha,
152
+ reinterpret_cast<const __half*>(A), CUDA_R_16F, ldA,
153
+ reinterpret_cast<const __half*>(B), CUDA_R_16F, ldB,
154
+ beta,
155
+ C, CUDA_R_32F, ldC,
156
+ CUDA_R_32F, CUBLAS_GEMM_DEFAULT_TENSOR_OP);
157
+ }
158
+
159
+ // igemm
160
+ inline cublasStatus_t
161
+ gemm(cublasHandle_t handle,
162
+ cublasOperation_t transA, cublasOperation_t transB,
163
+ int m, int n, int k,
164
+ const int32_t* alpha,
165
+ const int8_t* A, int ldA,
166
+ const int8_t* B, int ldB,
167
+ const int32_t* beta,
168
+ int32_t* C, int ldC)
169
+ {
170
+ BLAM_DEBUG_OUT("cublasIgemm");
171
+
172
+ return cublasGemmEx(handle, transA, transB,
173
+ m, n, k,
174
+ alpha,
175
+ A, CUDA_R_8I, ldA,
176
+ B, CUDA_R_8I, ldB,
177
+ beta,
178
+ C, CUDA_R_32I, ldC,
179
+ CUDA_R_32I, CUBLAS_GEMM_DEFAULT_TENSOR_OP);
180
+ }
181
+
182
+ // sgemm
183
+ inline cublasStatus_t
184
+ gemm(cublasHandle_t handle,
185
+ cublasOperation_t transA, cublasOperation_t transB,
186
+ int m, int n, int k,
187
+ const float* alpha,
188
+ const float* A, int ldA,
189
+ const float* B, int ldB,
190
+ const float* beta,
191
+ float* C, int ldC)
192
+ {
193
+ BLAM_DEBUG_OUT("cublasSgemm");
194
+
195
+ return cublasSgemm(handle, transA, transB,
196
+ m, n, k,
197
+ alpha,
198
+ A, ldA,
199
+ B, ldB,
200
+ beta,
201
+ C, ldC);
202
+ }
203
+
204
+ // dgemm
205
+ inline cublasStatus_t
206
+ gemm(cublasHandle_t handle,
207
+ cublasOperation_t transA, cublasOperation_t transB,
208
+ int m, int n, int k,
209
+ const double* alpha,
210
+ const double* A, int ldA,
211
+ const double* B, int ldB,
212
+ const double* beta,
213
+ double* C, int ldC)
214
+ {
215
+ BLAM_DEBUG_OUT("cublasDgemm");
216
+
217
+ return cublasDgemm(handle, transA, transB,
218
+ m, n, k,
219
+ alpha,
220
+ A, ldA,
221
+ B, ldB,
222
+ beta,
223
+ C, ldC);
224
+ }
225
+
226
+ // cgemm
227
+ inline cublasStatus_t
228
+ gemm(cublasHandle_t handle,
229
+ cublasOperation_t transA, cublasOperation_t transB,
230
+ int m, int n, int k,
231
+ const ComplexFloat* alpha,
232
+ const ComplexFloat* A, int ldA,
233
+ const ComplexFloat* B, int ldB,
234
+ const ComplexFloat* beta,
235
+ ComplexFloat* C, int ldC)
236
+ {
237
+ BLAM_DEBUG_OUT("cublasCgemm");
238
+
239
+ return cublasCgemm(handle, transA, transB,
240
+ m, n, k,
241
+ reinterpret_cast<const cuFloatComplex*>(alpha),
242
+ reinterpret_cast<const cuFloatComplex*>(A), ldA,
243
+ reinterpret_cast<const cuFloatComplex*>(B), ldB,
244
+ reinterpret_cast<const cuFloatComplex*>(beta),
245
+ reinterpret_cast<cuFloatComplex*>(C), ldC);
246
+ }
247
+
248
+ // zgemm
249
+ inline cublasStatus_t
250
+ gemm(cublasHandle_t handle,
251
+ cublasOperation_t transA, cublasOperation_t transB,
252
+ int m, int n, int k,
253
+ const ComplexDouble* alpha,
254
+ const ComplexDouble* A, int ldA,
255
+ const ComplexDouble* B, int ldB,
256
+ const ComplexDouble* beta,
257
+ ComplexDouble* C, int ldC)
258
+ {
259
+ BLAM_DEBUG_OUT("cublasZgemm");
260
+
261
+ return cublasZgemm(handle, transA, transB,
262
+ m, n, k,
263
+ reinterpret_cast<const cuDoubleComplex*>(alpha),
264
+ reinterpret_cast<const cuDoubleComplex*>(A), ldA,
265
+ reinterpret_cast<const cuDoubleComplex*>(B), ldB,
266
+ reinterpret_cast<const cuDoubleComplex*>(beta),
267
+ reinterpret_cast<cuDoubleComplex*>(C), ldC);
268
+ }
269
+
270
+ // hgemm
271
+ inline cublasStatus_t
272
+ gemm_batch(cublasHandle_t handle,
273
+ cublasOperation_t transA, cublasOperation_t transB,
274
+ int m, int n, int k,
275
+ const Half* alpha,
276
+ const Half* A, int ldA, int loA,
277
+ const Half* B, int ldB, int loB,
278
+ const Half* beta,
279
+ Half* C, int ldC, int loC,
280
+ int batch_size)
281
+ {
282
+ BLAM_DEBUG_OUT("cublasHgemmStridedBatched");
283
+
284
+ return cublasHgemmStridedBatched(handle, transA, transB,
285
+ m, n, k,
286
+ reinterpret_cast<const __half*>(alpha),
287
+ reinterpret_cast<const __half*>(A), ldA, loA,
288
+ reinterpret_cast<const __half*>(B), ldB, loB,
289
+ reinterpret_cast<const __half*>(beta),
290
+ reinterpret_cast<__half*>(C), ldC, loC,
291
+ batch_size);
292
+ }
293
+
294
+ // sgemm
295
+ inline cublasStatus_t
296
+ gemm_batch(cublasHandle_t handle,
297
+ cublasOperation_t transA, cublasOperation_t transB,
298
+ int m, int n, int k,
299
+ const float* alpha,
300
+ const float* A, int ldA, int loA,
301
+ const float* B, int ldB, int loB,
302
+ const float* beta,
303
+ float* C, int ldC, int loC,
304
+ int batch_size)
305
+ {
306
+ BLAM_DEBUG_OUT("cublasSgemmStridedBatched");
307
+
308
+ return cublasSgemmStridedBatched(handle, transA, transB,
309
+ m, n, k,
310
+ alpha,
311
+ A, ldA, loA,
312
+ B, ldB, loB,
313
+ beta,
314
+ C, ldC, loC,
315
+ batch_size);
316
+ }
317
+
318
+ // dgemm
319
+ inline cublasStatus_t
320
+ gemm_batch(cublasHandle_t handle,
321
+ cublasOperation_t transA, cublasOperation_t transB,
322
+ int m, int n, int k,
323
+ const double* alpha,
324
+ const double* A, int ldA, int loA,
325
+ const double* B, int ldB, int loB,
326
+ const double* beta,
327
+ double* C, int ldC, int loC,
328
+ int batch_size)
329
+ {
330
+ BLAM_DEBUG_OUT("cublasDgemmStridedBatched");
331
+
332
+ return cublasDgemmStridedBatched(handle, transA, transB,
333
+ m, n, k,
334
+ alpha,
335
+ A, ldA, loA,
336
+ B, ldB, loB,
337
+ beta,
338
+ C, ldC, loC,
339
+ batch_size);
340
+ }
341
+
342
+ // cgemm
343
+ inline cublasStatus_t
344
+ gemm_batch(cublasHandle_t handle,
345
+ cublasOperation_t transA, cublasOperation_t transB,
346
+ int m, int n, int k,
347
+ const ComplexFloat* alpha,
348
+ const ComplexFloat* A, int ldA, int loA,
349
+ const ComplexFloat* B, int ldB, int loB,
350
+ const ComplexFloat* beta,
351
+ ComplexFloat* C, int ldC, int loC,
352
+ int batch_size)
353
+ {
354
+ BLAM_DEBUG_OUT("cublasCgemmStridedBatched");
355
+
356
+ return cublasCgemmStridedBatched(handle, transA, transB,
357
+ m, n, k,
358
+ reinterpret_cast<const cuFloatComplex*>(alpha),
359
+ reinterpret_cast<const cuFloatComplex*>(A), ldA, loA,
360
+ reinterpret_cast<const cuFloatComplex*>(B), ldB, loB,
361
+ reinterpret_cast<const cuFloatComplex*>(beta),
362
+ reinterpret_cast<cuFloatComplex*>(C), ldC, loC,
363
+ batch_size);
364
+ }
365
+
366
+ // zgemm
367
+ inline cublasStatus_t
368
+ gemm_batch(cublasHandle_t handle,
369
+ cublasOperation_t transA, cublasOperation_t transB,
370
+ int m, int n, int k,
371
+ const ComplexDouble* alpha,
372
+ const ComplexDouble* A, int ldA, int loA,
373
+ const ComplexDouble* B, int ldB, int loB,
374
+ const ComplexDouble* beta,
375
+ ComplexDouble* C, int ldC, int loC,
376
+ int batch_size)
377
+ {
378
+ BLAM_DEBUG_OUT("cublasZgemmStridedBatched");
379
+
380
+ return cublasZgemmStridedBatched(handle, transA, transB,
381
+ m, n, k,
382
+ reinterpret_cast<const cuDoubleComplex*>(alpha),
383
+ reinterpret_cast<const cuDoubleComplex*>(A), ldA, loA,
384
+ reinterpret_cast<const cuDoubleComplex*>(B), ldB, loB,
385
+ reinterpret_cast<const cuDoubleComplex*>(beta),
386
+ reinterpret_cast<cuDoubleComplex*>(C), ldC, loC,
387
+ batch_size);
388
+ }
389
+
390
+ // hgemm
391
+ inline cublasStatus_t
392
+ gemm_batch(cublasHandle_t handle,
393
+ cublasOperation_t transA, cublasOperation_t transB,
394
+ int m, int n, int k,
395
+ const Half* alpha,
396
+ const Half* const A[], int ldA,
397
+ const Half* const B[], int ldB,
398
+ const Half* beta,
399
+ Half* const C[], int ldC,
400
+ int batch_size)
401
+ {
402
+ BLAM_DEBUG_OUT("cublasHgemmBatched");
403
+
404
+ return cublasHgemmBatched(handle, transA, transB,
405
+ m, n, k,
406
+ reinterpret_cast<const __half*>(alpha),
407
+ reinterpret_cast<const __half**>(const_cast<const Half**>(A)), ldA,
408
+ // A, ldA, // cuBLAS 9.2
409
+ reinterpret_cast<const __half**>(const_cast<const Half**>(B)), ldB,
410
+ // B, ldB, // cuBLAS 9.2
411
+ reinterpret_cast<const __half*>(beta),
412
+ reinterpret_cast<__half**>(const_cast<Half**>(C)), ldC,
413
+ // C, ldC, // cuBLAS 9.2
414
+ batch_size);
415
+ }
416
+
417
+ // sgemm
418
+ inline cublasStatus_t
419
+ gemm_batch(cublasHandle_t handle,
420
+ cublasOperation_t transA, cublasOperation_t transB,
421
+ int m, int n, int k,
422
+ const float* alpha,
423
+ const float* const A[], int ldA,
424
+ const float* const B[], int ldB,
425
+ const float* beta,
426
+ float* const C[], int ldC,
427
+ int batch_size)
428
+ {
429
+ BLAM_DEBUG_OUT("cublasSgemmBatched");
430
+
431
+ return cublasSgemmBatched(handle, transA, transB,
432
+ m, n, k,
433
+ alpha,
434
+ const_cast<const float**>(A), ldA,
435
+ // A, ldA, // cuBLAS 9.2
436
+ const_cast<const float**>(B), ldB,
437
+ // B, ldB, // cuBLAS 9.2
438
+ beta,
439
+ const_cast<float**>(C), ldC,
440
+ // C, ldC, // cuBLAS 9.2
441
+ batch_size);
442
+ }
443
+
444
+ // dgemm
445
+ inline cublasStatus_t
446
+ gemm_batch(cublasHandle_t handle,
447
+ cublasOperation_t transA, cublasOperation_t transB,
448
+ int m, int n, int k,
449
+ const double* alpha,
450
+ const double* const A[], int ldA,
451
+ const double* const B[], int ldB,
452
+ const double* beta,
453
+ double* const C[], int ldC,
454
+ int batch_size)
455
+ {
456
+ BLAM_DEBUG_OUT("cublasDgemmBatched");
457
+
458
+ return cublasDgemmBatched(handle, transA, transB,
459
+ m, n, k,
460
+ alpha,
461
+ const_cast<const double**>(A), ldA,
462
+ // A, ldA, // cuBLAS 9.2
463
+ const_cast<const double**>(B), ldB,
464
+ // B, ldB, // cuBLAS 9.2
465
+ beta,
466
+ const_cast<double**>(C), ldC,
467
+ // C, ldC, // cuBLAS 9.2
468
+ batch_size);
469
+ }
470
+
471
+ // cgemm
472
+ inline cublasStatus_t
473
+ gemm_batch(cublasHandle_t handle,
474
+ cublasOperation_t transA, cublasOperation_t transB,
475
+ int m, int n, int k,
476
+ const ComplexFloat* alpha,
477
+ const ComplexFloat* const A[], int ldA,
478
+ const ComplexFloat* const B[], int ldB,
479
+ const ComplexFloat* beta,
480
+ ComplexFloat* const C[], int ldC,
481
+ int batch_size)
482
+ {
483
+ BLAM_DEBUG_OUT("cublasCgemmBatched");
484
+
485
+ return cublasCgemmBatched(handle, transA, transB,
486
+ m, n, k,
487
+ reinterpret_cast<const cuFloatComplex*>(alpha),
488
+ const_cast<const cuFloatComplex**>(reinterpret_cast<const cuFloatComplex* const *>(A)), ldA,
489
+ //reinterpret_cast<const cuFloatComplex* const *>(A), ldA, // cuBLAS 9.2
490
+ const_cast<const cuFloatComplex**>(reinterpret_cast<const cuFloatComplex* const *>(B)), ldB,
491
+ //reinterpret_cast<const cuFloatComplex* const *>(B), ldB, // cuBLAS 9.2
492
+ reinterpret_cast<const cuFloatComplex*>(beta),
493
+ const_cast<cuFloatComplex**>(reinterpret_cast<cuFloatComplex* const *>(C)), ldC,
494
+ //reinterpret_cast<cuFloatComplex* const *>(C), ldC, // cuBLAS 9.2
495
+ batch_size);
496
+ }
497
+
498
+ // zgemm
499
+ inline cublasStatus_t
500
+ gemm_batch(cublasHandle_t handle,
501
+ cublasOperation_t transA, cublasOperation_t transB,
502
+ int m, int n, int k,
503
+ const ComplexDouble* alpha,
504
+ const ComplexDouble* const A[], int ldA,
505
+ const ComplexDouble* const B[], int ldB,
506
+ const ComplexDouble* beta,
507
+ ComplexDouble* const C[], int ldC,
508
+ int batch_size)
509
+ {
510
+ BLAM_DEBUG_OUT("cublasZgemmBatched");
511
+
512
+ return cublasZgemmBatched(handle, transA, transB,
513
+ m, n, k,
514
+ reinterpret_cast<const cuDoubleComplex*>(alpha),
515
+ const_cast<const cuDoubleComplex**>(reinterpret_cast<const cuDoubleComplex* const *>(A)), ldA,
516
+ //reinterpret_cast<const cuDoubleComplex* const *>(A), ldA, // cuBLAS 9.2
517
+ const_cast<const cuDoubleComplex**>(reinterpret_cast<const cuDoubleComplex* const *>(B)), ldB,
518
+ //reinterpret_cast<const cuDoubleComplex* const *>(B), ldB, // cuBLAS 9.2
519
+ reinterpret_cast<const cuDoubleComplex*>(beta),
520
+ const_cast<cuDoubleComplex**>(reinterpret_cast<cuDoubleComplex* const *>(C)), ldC,
521
+ //reinterpret_cast<cuDoubleComplex* const *>(C), ldC, // cuBLAS 9.2
522
+ batch_size);
523
+ }
524
+
525
+ } // end namespace cublas
526
+ } // end namespace blam
sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/cutlass/tools/util/include/cutlass/util/debug.h ADDED
@@ -0,0 +1,143 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ * SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice, this
9
+ * list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holder nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
+ *
30
+ **************************************************************************************************/
31
+
32
+ /*! \file
33
+ \brief Contains code for debugging cutlass code
34
+ */
35
+
36
+ #pragma once
37
+
38
+ #include "device_dump.h"
39
+
40
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
41
+
42
+ /******************************************************************************
43
+ * Debug and logging macros
44
+ ******************************************************************************/
45
+
46
+ /**
47
+ * Formats and prints the given message to stdout
48
+ */
49
+ #if !defined(CUDA_LOG)
50
+ #if !defined(__CUDA_ARCH__)
51
+ #define CUDA_LOG(format, ...) printf(format, __VA_ARGS__)
52
+ #else
53
+ #define CUDA_LOG(format, ...) \
54
+ printf("[block (%d,%d,%d), thread (%d,%d,%d)]: " format, \
55
+ blockIdx.x, \
56
+ blockIdx.y, \
57
+ blockIdx.z, \
58
+ threadIdx.x, \
59
+ threadIdx.y, \
60
+ threadIdx.z, \
61
+ __VA_ARGS__);
62
+ #endif
63
+ #endif
64
+
65
+ /**
66
+ * Formats and prints the given message to stdout only if DEBUG is defined
67
+ */
68
+ #if !defined(CUDA_LOG_DEBUG)
69
+ #ifdef DEBUG
70
+ #define CUDA_LOG_DEBUG(format, ...) CUDA_LOG(format, __VA_ARGS__)
71
+ #else
72
+ #define CUDA_LOG_DEBUG(format, ...)
73
+ #endif
74
+ #endif
75
+
76
+ /**
77
+ * \brief The corresponding error message is printed to \p stderr (or \p stdout in device code)
78
+ * along with the supplied source context.
79
+ *
80
+ * \return The CUDA error.
81
+ */
82
+ __host__ CUTLASS_DEVICE cudaError_t cuda_perror_impl(cudaError_t error,
83
+ const char* expression,
84
+ const char* filename,
85
+ int line) {
86
+ (void)filename;
87
+ (void)line;
88
+ if (error) {
89
+ #if !defined(__CUDA_ARCH__)
90
+ fprintf(
91
+ stderr, "CUDA error %d [%s, %d] in expression '%s': %s\n", error, filename, line, expression, cudaGetErrorString(error));
92
+ fflush(stderr);
93
+ #else
94
+ printf("CUDA error %d [%s, %d] in expression '%s'\n", error, filename, line, expression);
95
+ #endif
96
+ }
97
+ return error;
98
+ }
99
+
100
+ /**
101
+ * \brief Perror macro
102
+ */
103
+ #ifndef CUDA_PERROR
104
+ #define CUDA_PERROR(e) cuda_perror_impl((cudaError_t)(e), #e, __FILE__, __LINE__)
105
+ #endif
106
+
107
+ /**
108
+ * \brief Perror macro with exit
109
+ */
110
+ #ifndef CUDA_PERROR_EXIT
111
+ #define CUDA_PERROR_EXIT(e) \
112
+ do { if (cuda_perror_impl((cudaError_t)(e), #e, __FILE__, __LINE__)) { \
113
+ exit(1); \
114
+ } } while (0)
115
+ #endif
116
+
117
+ /**
118
+ * \brief Perror macro only if DEBUG is defined
119
+ */
120
+ #ifndef CUDA_PERROR_DEBUG
121
+ #ifdef DEBUG
122
+ #define CUDA_PERROR_DEBUG(e) CUDA_PERROR(e)
123
+ #else
124
+ #define CUDA_PERROR_DEBUG(e) (e)
125
+ #endif
126
+ #endif
127
+
128
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
129
+
130
+ // A small helper class to dump a type at compile time
131
+ // Usage:: DumpType<Class>::Class
132
+ template <typename T>
133
+ struct DebugType {};
134
+
135
+ template <typename T>
136
+ void DebugTypeFunc(T const& t) {
137
+ T::t;
138
+ }
139
+
140
+ // A small helper class to dump a compile time constant at compile time
141
+ // Usage: DumpValue<Class::kConstant>::kConstant
142
+ template <int Value>
143
+ struct DebugValue {};