Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- rag-evaluation-harness/lm_eval/tasks/bigbench/generate_until/checkmate_in_one.yaml +4 -0
- rag-evaluation-harness/lm_eval/tasks/bigbench/generate_until/codenames.yaml +4 -0
- rag-evaluation-harness/lm_eval/tasks/bigbench/generate_until/conceptual_combinations.yaml +4 -0
- rag-evaluation-harness/lm_eval/tasks/bigbench/generate_until/disambiguation_qa.yaml +4 -0
- rag-evaluation-harness/lm_eval/tasks/bigbench/generate_until/intersect_geometry.yaml +4 -0
- rag-evaluation-harness/lm_eval/tasks/bigbench/generate_until/key_value_maps.yaml +4 -0
- rag-evaluation-harness/lm_eval/tasks/bigbench/generate_until/misconceptions_russian.yaml +4 -0
- rag-evaluation-harness/lm_eval/tasks/bigbench/generate_until/similarities_abstraction.yaml +4 -0
- rag-evaluation-harness/lm_eval/tasks/bigbench/generate_until/social_iqa.yaml +4 -0
- rag-evaluation-harness/lm_eval/tasks/bigbench/generate_until/temporal_sequences.yaml +4 -0
- sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/composable_kernels/example/01_gemm/README.md +9 -0
- sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/composable_kernels/example/01_gemm/common.hpp +222 -0
- sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/composable_kernels/example/01_gemm/gemm_dl_fp16.cpp +37 -0
- sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/composable_kernels/example/01_gemm/gemm_dl_int4.cpp +44 -0
- sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/composable_kernels/example/01_gemm/gemm_wmma_fp16.cpp +73 -0
- sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/composable_kernels/example/01_gemm/gemm_xdl_fp16_fp8_v3.cpp +53 -0
- sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/composable_kernels/example/01_gemm/gemm_xdl_fp16_v2.cpp +51 -0
- sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/composable_kernels/example/01_gemm/gemm_xdl_int4.cpp +45 -0
- sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/composable_kernels/example/01_gemm/run_gemm_example_v2.inc +297 -0
- sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/composable_kernels/example/09_convnd_fwd/convnd_fwd_common.hpp +257 -0
- sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/composable_kernels/example/09_convnd_fwd/convnd_fwd_dl_common.hpp +196 -0
- sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/composable_kernels/example/09_convnd_fwd/convnd_fwd_dl_int8.cpp +40 -0
- sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/composable_kernels/example/09_convnd_fwd/convnd_fwd_xdl_fp64.cpp +79 -0
- sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/composable_kernels/example/09_convnd_fwd/convnd_fwd_xdl_int8.cpp +79 -0
- sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/composable_kernels/example/15_grouped_gemm/grouped_gemm_multiple_d_splitk_xdl_fp16.cpp +394 -0
- sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/composable_kernels/example/17_convnd_bwd_data/convnd_bwd_data_common.hpp +153 -0
- sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/composable_kernels/example/22_cgemm/cgemm_xdl_common.hpp +254 -0
- sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/composable_kernels/example/22_cgemm/cgemm_xdl_fp32.cpp +132 -0
- sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/composable_kernels/example/22_cgemm/cgemm_xdl_int4.cpp +140 -0
- sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/composable_kernels/example/22_cgemm/cgemm_xdl_int8.cpp +132 -0
- sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/composable_kernels/example/27_layernorm2d_fwd/common.hpp +22 -0
- sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/composable_kernels/example/27_layernorm2d_fwd/layernorm2d_fwd_fp16.cpp +44 -0
- sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/composable_kernels/example/27_layernorm2d_fwd/layernorm2d_fwd_splitk_fp16.cpp +45 -0
- sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/composable_kernels/example/31_batched_gemm_gemm/batched_gemm_gemm_xdl_fp32.cpp +135 -0
- sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/composable_kernels/example/31_batched_gemm_gemm/batched_gemm_gemm_xdl_int8.cpp +133 -0
- sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/composable_kernels/example/33_multiple_reduce/README.md +37 -0
- sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/composable_kernels/example/33_multiple_reduce/dual_reduce_common.hpp +314 -0
- sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/composable_kernels/example/33_multiple_reduce/dual_reduce_multiblock.cpp +98 -0
- sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/composable_kernels/example/33_multiple_reduce/dual_reduce_threadwise.cpp +93 -0
- sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/composable_kernels/example/52_im2col_col2im/common.hpp +97 -0
- sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/composable_kernels/example/52_im2col_col2im/image_to_column_f32.cpp +168 -0
- sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/composable_kernels/example/60_gemm_multi_ABD/gemm_multi_ABD_xdl_bias_fastgelu_bf16_i8.cpp +273 -0
- sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/composable_kernels/example/60_gemm_multi_ABD/gemm_multi_ABD_xdl_fastgelu_bf16_i8.cpp +273 -0
- sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/composable_kernels/example/60_gemm_multi_ABD/gemm_multi_ABD_xdl_fp16.cpp +363 -0
- sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/composable_kernels/example/60_gemm_multi_ABD/gemm_multi_ABD_xdl_multiply_bias_fastgelu_bf16_i8.cpp +274 -0
- sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/composable_kernels/profiler/include/profiler/data_type_enum.hpp +20 -0
- sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/composable_kernels/profiler/include/profiler/profile_batched_gemm_add_relu_gemm_add_impl.hpp +360 -0
- sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/composable_kernels/profiler/include/profiler/profile_batched_gemm_gemm_impl.hpp +319 -0
- sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/composable_kernels/profiler/include/profiler/profile_batched_gemm_impl.hpp +264 -0
- sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/composable_kernels/profiler/include/profiler/profile_batched_gemm_reduce_impl.hpp +362 -0
rag-evaluation-harness/lm_eval/tasks/bigbench/generate_until/checkmate_in_one.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: checkmate_in_one_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_checkmate_in_one_generate_until
|
rag-evaluation-harness/lm_eval/tasks/bigbench/generate_until/codenames.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: codenames_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_codenames_generate_until
|
rag-evaluation-harness/lm_eval/tasks/bigbench/generate_until/conceptual_combinations.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: conceptual_combinations_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_conceptual_combinations_generate_until
|
rag-evaluation-harness/lm_eval/tasks/bigbench/generate_until/disambiguation_qa.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: disambiguation_qa_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_disambiguation_qa_generate_until
|
rag-evaluation-harness/lm_eval/tasks/bigbench/generate_until/intersect_geometry.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: intersect_geometry_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_intersect_geometry_generate_until
|
rag-evaluation-harness/lm_eval/tasks/bigbench/generate_until/key_value_maps.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: key_value_maps_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_key_value_maps_generate_until
|
rag-evaluation-harness/lm_eval/tasks/bigbench/generate_until/misconceptions_russian.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: misconceptions_russian_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_misconceptions_russian_generate_until
|
rag-evaluation-harness/lm_eval/tasks/bigbench/generate_until/similarities_abstraction.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: similarities_abstraction_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_similarities_abstraction_generate_until
|
rag-evaluation-harness/lm_eval/tasks/bigbench/generate_until/social_iqa.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: social_iqa_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_social_iqa_generate_until
|
rag-evaluation-harness/lm_eval/tasks/bigbench/generate_until/temporal_sequences.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: temporal_sequences_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_temporal_sequences_generate_until
|
sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/composable_kernels/example/01_gemm/README.md
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Instructions for ```example_gemm_xdl```
|
2 |
+
|
3 |
+
## Run ```example_gemm_xdl```
|
4 |
+
```bash
|
5 |
+
#arg1: verification (0=no, 1=yes)
|
6 |
+
#arg2: initialization (0=no init, 1=integer value, 2=decimal value)
|
7 |
+
#arg3: run kernel # of times (>1)
|
8 |
+
./bin/example_gemm_xdl 0 1 5
|
9 |
+
```
|
sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/composable_kernels/example/01_gemm/common.hpp
ADDED
@@ -0,0 +1,222 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// SPDX-License-Identifier: MIT
|
2 |
+
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
|
3 |
+
|
4 |
+
#pragma once
|
5 |
+
|
6 |
+
#include <cstdlib>
|
7 |
+
#include <iostream>
|
8 |
+
#include <initializer_list>
|
9 |
+
#include <numeric>
|
10 |
+
|
11 |
+
#include "ck/ck.hpp"
|
12 |
+
#include "ck/tensor_operation/gpu/device/gemm_specialization.hpp"
|
13 |
+
#include "ck/tensor_operation/gpu/device/tensor_layout.hpp"
|
14 |
+
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"
|
15 |
+
#include "ck/utility/data_type.hpp"
|
16 |
+
|
17 |
+
#include "ck/library/utility/check_err.hpp"
|
18 |
+
#include "ck/library/utility/device_memory.hpp"
|
19 |
+
#include "ck/library/utility/fill.hpp"
|
20 |
+
#include "ck/library/utility/host_tensor.hpp"
|
21 |
+
#include "ck/library/utility/host_tensor_generator.hpp"
|
22 |
+
#include "ck/library/utility/literals.hpp"
|
23 |
+
#include "ck/library/reference_tensor_operation/cpu/reference_gemm.hpp"
|
24 |
+
|
25 |
+
struct ProblemSize final
|
26 |
+
{
|
27 |
+
ck::index_t M = 3840;
|
28 |
+
ck::index_t N = 4096;
|
29 |
+
ck::index_t K = 4096;
|
30 |
+
|
31 |
+
ck::index_t StrideA = 4096;
|
32 |
+
ck::index_t StrideB = 4096;
|
33 |
+
ck::index_t StrideC = 4096;
|
34 |
+
};
|
35 |
+
|
36 |
+
struct ProblemSizeStreamK final
|
37 |
+
{
|
38 |
+
ck::index_t M = 3840;
|
39 |
+
ck::index_t N = 4096;
|
40 |
+
ck::index_t K = 4096;
|
41 |
+
|
42 |
+
ck::index_t StrideA = 4096;
|
43 |
+
ck::index_t StrideB = 4096;
|
44 |
+
ck::index_t StrideC = 4096;
|
45 |
+
|
46 |
+
ck::index_t NumSKBlocks = -1;
|
47 |
+
};
|
48 |
+
|
49 |
+
struct ProblemSizeSplitK final
|
50 |
+
{
|
51 |
+
ck::index_t M = 3840;
|
52 |
+
ck::index_t N = 4096;
|
53 |
+
ck::index_t K = 4096;
|
54 |
+
|
55 |
+
ck::index_t StrideA = 4096;
|
56 |
+
ck::index_t StrideB = 4096;
|
57 |
+
ck::index_t StrideC = 4096;
|
58 |
+
|
59 |
+
ck::index_t KBatch = 1;
|
60 |
+
};
|
61 |
+
|
62 |
+
struct ExecutionConfig final
|
63 |
+
{
|
64 |
+
bool do_verification = true;
|
65 |
+
int init_method = 2;
|
66 |
+
bool time_kernel = false;
|
67 |
+
};
|
68 |
+
|
69 |
+
template <ck::index_t... Is>
|
70 |
+
using S = ck::Sequence<Is...>;
|
71 |
+
|
72 |
+
using Row = ck::tensor_layout::gemm::RowMajor;
|
73 |
+
using Col = ck::tensor_layout::gemm::ColumnMajor;
|
74 |
+
|
75 |
+
using PassThrough = ck::tensor_operation::element_wise::PassThrough;
|
76 |
+
|
77 |
+
template <typename ProblemType>
|
78 |
+
bool parse_cmd_args(int, char*[], ProblemType&, ExecutionConfig&)
|
79 |
+
{
|
80 |
+
return false;
|
81 |
+
}
|
82 |
+
|
83 |
+
template <>
|
84 |
+
bool parse_cmd_args<ProblemSize>(int argc,
|
85 |
+
char* argv[],
|
86 |
+
ProblemSize& problem_size,
|
87 |
+
ExecutionConfig& config)
|
88 |
+
{
|
89 |
+
if(argc == 1)
|
90 |
+
{
|
91 |
+
// use default case
|
92 |
+
}
|
93 |
+
else if(argc == 4)
|
94 |
+
{
|
95 |
+
config.do_verification = std::stoi(argv[1]);
|
96 |
+
config.init_method = std::stoi(argv[2]);
|
97 |
+
config.time_kernel = std::stoi(argv[3]);
|
98 |
+
}
|
99 |
+
else if(argc == 10)
|
100 |
+
{
|
101 |
+
config.do_verification = std::stoi(argv[1]);
|
102 |
+
config.init_method = std::stoi(argv[2]);
|
103 |
+
config.time_kernel = std::stoi(argv[3]);
|
104 |
+
|
105 |
+
problem_size.M = std::stoi(argv[4]);
|
106 |
+
problem_size.N = std::stoi(argv[5]);
|
107 |
+
problem_size.K = std::stoi(argv[6]);
|
108 |
+
|
109 |
+
problem_size.StrideA = std::stoi(argv[7]);
|
110 |
+
problem_size.StrideB = std::stoi(argv[8]);
|
111 |
+
problem_size.StrideC = std::stoi(argv[9]);
|
112 |
+
}
|
113 |
+
else
|
114 |
+
{
|
115 |
+
std::cerr << "arg1: verification (0=no, 1=yes)" << std::endl
|
116 |
+
<< "arg2: initialization (0=no init, 1=integer value, 2=decimal value)"
|
117 |
+
<< std::endl
|
118 |
+
<< "arg3: time kernel (0=no, 1=yes)" << std::endl
|
119 |
+
<< "arg4 to 9: M (256x), N(128x), K(32x), StrideA, StrideB, StrideC" << std::endl;
|
120 |
+
return false;
|
121 |
+
}
|
122 |
+
|
123 |
+
return true;
|
124 |
+
}
|
125 |
+
|
126 |
+
template <>
|
127 |
+
bool parse_cmd_args<ProblemSizeStreamK>(int argc,
|
128 |
+
char* argv[],
|
129 |
+
ProblemSizeStreamK& problem_size,
|
130 |
+
ExecutionConfig& config)
|
131 |
+
{
|
132 |
+
if(argc == 1)
|
133 |
+
{
|
134 |
+
// use default case
|
135 |
+
}
|
136 |
+
else if(argc == 4)
|
137 |
+
{
|
138 |
+
config.do_verification = std::stoi(argv[1]);
|
139 |
+
config.init_method = std::stoi(argv[2]);
|
140 |
+
config.time_kernel = std::stoi(argv[3]);
|
141 |
+
}
|
142 |
+
else if(argc >= 10)
|
143 |
+
{
|
144 |
+
config.do_verification = std::stoi(argv[1]);
|
145 |
+
config.init_method = std::stoi(argv[2]);
|
146 |
+
config.time_kernel = std::stoi(argv[3]);
|
147 |
+
|
148 |
+
problem_size.M = std::stoi(argv[4]);
|
149 |
+
problem_size.N = std::stoi(argv[5]);
|
150 |
+
problem_size.K = std::stoi(argv[6]);
|
151 |
+
|
152 |
+
problem_size.StrideA = std::stoi(argv[7]);
|
153 |
+
problem_size.StrideB = std::stoi(argv[8]);
|
154 |
+
problem_size.StrideC = std::stoi(argv[9]);
|
155 |
+
|
156 |
+
if(argc >= 11)
|
157 |
+
{
|
158 |
+
problem_size.NumSKBlocks = std::stoi(argv[10]);
|
159 |
+
}
|
160 |
+
}
|
161 |
+
else
|
162 |
+
{
|
163 |
+
std::cerr << "arg1: verification (0=no, 1=yes)" << std::endl
|
164 |
+
<< "arg2: initialization (0=no init, 1=integer value, 2=decimal value)"
|
165 |
+
<< std::endl
|
166 |
+
<< "arg3: time kernel (0=no, 1=yes)" << std::endl
|
167 |
+
<< "arg4 to 9: M (256x), N(128x), K(32x), StrideA, StrideB, StrideC" << std::endl
|
168 |
+
<< "arg10: NumSKBlocks(optional)" << std::endl;
|
169 |
+
return false;
|
170 |
+
}
|
171 |
+
|
172 |
+
return true;
|
173 |
+
}
|
174 |
+
|
175 |
+
template <>
|
176 |
+
bool parse_cmd_args<ProblemSizeSplitK>(int argc,
|
177 |
+
char* argv[],
|
178 |
+
ProblemSizeSplitK& problem_size,
|
179 |
+
ExecutionConfig& config)
|
180 |
+
{
|
181 |
+
if(argc == 1)
|
182 |
+
{
|
183 |
+
// use default case
|
184 |
+
}
|
185 |
+
else if(argc == 4)
|
186 |
+
{
|
187 |
+
config.do_verification = std::stoi(argv[1]);
|
188 |
+
config.init_method = std::stoi(argv[2]);
|
189 |
+
config.time_kernel = std::stoi(argv[3]);
|
190 |
+
}
|
191 |
+
else if(argc >= 10)
|
192 |
+
{
|
193 |
+
config.do_verification = std::stoi(argv[1]);
|
194 |
+
config.init_method = std::stoi(argv[2]);
|
195 |
+
config.time_kernel = std::stoi(argv[3]);
|
196 |
+
|
197 |
+
problem_size.M = std::stoi(argv[4]);
|
198 |
+
problem_size.N = std::stoi(argv[5]);
|
199 |
+
problem_size.K = std::stoi(argv[6]);
|
200 |
+
|
201 |
+
problem_size.StrideA = std::stoi(argv[7]);
|
202 |
+
problem_size.StrideB = std::stoi(argv[8]);
|
203 |
+
problem_size.StrideC = std::stoi(argv[9]);
|
204 |
+
|
205 |
+
if(argc >= 11)
|
206 |
+
{
|
207 |
+
problem_size.KBatch = std::stoi(argv[10]);
|
208 |
+
}
|
209 |
+
}
|
210 |
+
else
|
211 |
+
{
|
212 |
+
std::cerr << "arg1: verification (0=no, 1=yes)" << std::endl
|
213 |
+
<< "arg2: initialization (0=no init, 1=integer value, 2=decimal value)"
|
214 |
+
<< std::endl
|
215 |
+
<< "arg3: time kernel (0=no, 1=yes)" << std::endl
|
216 |
+
<< "arg4 to 9: M (256x), N(128x), K(32x), StrideA, StrideB, StrideC" << std::endl
|
217 |
+
<< "arg10: KBatch" << std::endl;
|
218 |
+
return false;
|
219 |
+
}
|
220 |
+
|
221 |
+
return true;
|
222 |
+
}
|
sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/composable_kernels/example/01_gemm/gemm_dl_fp16.cpp
ADDED
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// SPDX-License-Identifier: MIT
|
2 |
+
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
|
3 |
+
|
4 |
+
#include "common.hpp"
|
5 |
+
|
6 |
+
#include "ck/tensor_operation/gpu/device/impl/device_gemm_dl.hpp"
|
7 |
+
|
8 |
+
using ADataType = ck::half_t;
|
9 |
+
using BDataType = ck::half_t;
|
10 |
+
using CDataType = ck::half_t;
|
11 |
+
using AccDataType = float;
|
12 |
+
|
13 |
+
using ALayout = Col;
|
14 |
+
using BLayout = Row;
|
15 |
+
using CLayout = Row;
|
16 |
+
|
17 |
+
using AElementOp = PassThrough;
|
18 |
+
using BElementOp = PassThrough;
|
19 |
+
using CElementOp = PassThrough;
|
20 |
+
|
21 |
+
static constexpr auto GemmDefault = ck::tensor_operation::device::GemmSpecialization::Default;
|
22 |
+
|
23 |
+
// clang-format off
|
24 |
+
using DeviceGemmInstance = ck::tensor_operation::device::DeviceGemmDl
|
25 |
+
// ######| AData| BData| CData| AccData| ALayout| BLayout| CLayout| A| B| C| GEMM| Block| MPer| NPer| K0Per| K1| M1Per| N1Per| KPer| M11N11Thread| M11N11Thread| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| BBlockTransfer| BBlockTransfer| BBlockTransfer| BBlockTransfer| BBlockTransfer| BBlockTransfer| BBlockTransfer| CThreadTransfer| CThreadTransfer| CThreadTransfer|
|
26 |
+
// ######| Type| Type| Type| Type| | | | Elementwise| Elementwise| Elementwise| Spacialization| Size| Block| Block| Block| | ThreadM111| ThreadN111| Thread| ClusterM110Xs| ClusterN110Xs| ThreadSliceLengths| ThreadClusterLengths| ThreadCluster| SrcAccess| SrcVectorTensor| SrcVectorTensor| DstVectorTensor| ThreadSliceLengths| ThreadClusterLengths| ThreadCluster| SrcAccess| SrcVectorTensor| SrcVectorTensor| DstVectorTensor| SrcDstAccess| SrcDstVectorDim| DstScalarPerVector|
|
27 |
+
// ######| | | | | | | | Operation| Operation| Operation| | | | | | | | | | | | K0_M0_M1_K1| K0_M0_M1_K1| ArrangeOrder| Order| Lengths_K0_M0_M1_K1| ContiguousDimOrder| Lengths_K0_M0_M1_K1| K0_N0_N1_K1| K0_N0_N1_K1| ArrangeOrder| Order| Lengths_K0_N0_N1_K1| ContiguousDimOrder| Lengths_K0_N0_N1_K1| Order| | |
|
28 |
+
// ######| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
|
29 |
+
< ADataType, BDataType, CDataType, AccDataType, ALayout, BLayout, CLayout, AElementOp, BElementOp, CElementOp, GemmDefault, 256, 128, 128, 16, 2, 4, 4, 1, S<8, 2>, S<8, 2>, S<2, 1, 4, 2>, S<8, 1, 32, 1>, S<0, 3, 1, 2>, S<0, 3, 1, 2>, S<1, 1, 4, 1>, S<0, 3, 1, 2>, S<1, 1, 4, 2>, S<2, 1, 4, 2>, S<8, 1, 32, 1>, S<0, 3, 1, 2>, S<0, 3, 1, 2>, S<1, 1, 4, 1>, S<0, 3, 1, 2>, S<1, 1, 4, 2>, S<0, 1, 2, 3, 4, 5>, 5, 4>;
|
30 |
+
// clang-format on
|
31 |
+
|
32 |
+
using ReferenceGemmInstance = ck::tensor_operation::host::
|
33 |
+
ReferenceGemm<ADataType, BDataType, CDataType, AccDataType, AElementOp, BElementOp, CElementOp>;
|
34 |
+
|
35 |
+
#include "run_gemm_example.inc"
|
36 |
+
|
37 |
+
int main(int argc, char* argv[]) { return !run_gemm_example(argc, argv); }
|
sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/composable_kernels/example/01_gemm/gemm_dl_int4.cpp
ADDED
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// SPDX-License-Identifier: MIT
|
2 |
+
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
|
3 |
+
|
4 |
+
#ifdef CK_EXPERIMENTAL_BIT_INT_EXTENSION_INT4
|
5 |
+
|
6 |
+
#include "common.hpp"
|
7 |
+
|
8 |
+
#include "ck/tensor_operation/gpu/device/impl/device_gemm_dl.hpp"
|
9 |
+
|
10 |
+
using ADataType = ck::int4_t;
|
11 |
+
using BDataType = ck::int4_t;
|
12 |
+
using CDataType = ck::int4_t;
|
13 |
+
using KernelADataType = int8_t;
|
14 |
+
using KernelBDataType = int8_t;
|
15 |
+
using KernelCDataType = int8_t;
|
16 |
+
using AccDataType = int32_t;
|
17 |
+
|
18 |
+
using ALayout = Col;
|
19 |
+
using BLayout = Row;
|
20 |
+
using CLayout = Row;
|
21 |
+
|
22 |
+
using AElementOp = PassThrough;
|
23 |
+
using BElementOp = PassThrough;
|
24 |
+
using CElementOp = PassThrough;
|
25 |
+
|
26 |
+
static constexpr auto GemmDefault = ck::tensor_operation::device::GemmSpecialization::Default;
|
27 |
+
|
28 |
+
// clang-format off
|
29 |
+
using DeviceGemmInstance = ck::tensor_operation::device::DeviceGemmDl
|
30 |
+
// ######| AData| BData| CData| AccData| ALayout| BLayout| CLayout| A| B| C| GEMM| Block| MPer| NPer| K0Per| K1| M1Per| N1Per| KPer| M11N11Thread| M11N11Thread| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| BBlockTransfer| BBlockTransfer| BBlockTransfer| BBlockTransfer| BBlockTransfer| BBlockTransfer| BBlockTransfer| CThreadTransfer| CThreadTransfer| CThreadTransfer|
|
31 |
+
// ######| Type| Type| Type| Type| | | | Elementwise| Elementwise| Elementwise| Spacialization| Size| Block| Block| Block| | ThreadM111| ThreadN111| Thread| ClusterM110Xs| ClusterN110Xs| ThreadSliceLengths| ThreadClusterLengths| ThreadCluster| SrcAccess| SrcVectorTensor| SrcVectorTensor| DstVectorTensor| ThreadSliceLengths| ThreadClusterLengths| ThreadCluster| SrcAccess| SrcVectorTensor| SrcVectorTensor| DstVectorTensor| SrcDstAccess| SrcDstVectorDim| DstScalarPerVector|
|
32 |
+
// ######| | | | | | | | Operation| Operation| Operation| | | | | | | | | | | | K0_M0_M1_K1| K0_M0_M1_K1| ArrangeOrder| Order| Lengths_K0_M0_M1_K1| ContiguousDimOrder| Lengths_K0_M0_M1_K1| K0_N0_N1_K1| K0_N0_N1_K1| ArrangeOrder| Order| Lengths_K0_N0_N1_K1| ContiguousDimOrder| Lengths_K0_N0_N1_K1| Order| | |
|
33 |
+
// ######| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
|
34 |
+
< KernelADataType, KernelBDataType, KernelCDataType, AccDataType, ALayout, BLayout, CLayout, AElementOp, BElementOp, CElementOp, GemmDefault, 256, 128, 128, 16, 4, 4, 4, 1, S<8, 2>, S<8, 2>, S<2, 1, 4, 4>, S<8, 1, 32, 1>, S<0, 3, 1, 2>, S<0, 3, 1, 2>, S<1, 1, 4, 1>, S<0, 3, 1, 2>, S<1, 1, 4, 4>, S<2, 1, 4, 4>, S<8, 1, 32, 1>, S<0, 3, 1, 2>, S<0, 3, 1, 2>, S<1, 1, 4, 1>, S<0, 3, 1, 2>, S<1, 1, 4, 4>, S<0, 1, 2, 3, 4, 5>, 5, 4>;
|
35 |
+
// clang-format on
|
36 |
+
|
37 |
+
using ReferenceGemmInstance = ck::tensor_operation::host::
|
38 |
+
ReferenceGemm<ADataType, BDataType, CDataType, AccDataType, AElementOp, BElementOp, CElementOp>;
|
39 |
+
|
40 |
+
#define BUILD_INT4_EXAMPLE
|
41 |
+
#include "run_gemm_example.inc"
|
42 |
+
|
43 |
+
int main(int argc, char* argv[]) { return !run_gemm_example(argc, argv); }
|
44 |
+
#endif
|
sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/composable_kernels/example/01_gemm/gemm_wmma_fp16.cpp
ADDED
@@ -0,0 +1,73 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// SPDX-License-Identifier: MIT
|
2 |
+
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
|
3 |
+
|
4 |
+
#include "common.hpp"
|
5 |
+
|
6 |
+
#include "ck/tensor_operation/gpu/device/impl/device_gemm_wmma.hpp"
|
7 |
+
|
8 |
+
using ADataType = ck::half_t;
|
9 |
+
using BDataType = ck::half_t;
|
10 |
+
using AccDataType = float;
|
11 |
+
using CShuffleDataType = float;
|
12 |
+
using CDataType = ck::half_t;
|
13 |
+
|
14 |
+
using ALayout = Row;
|
15 |
+
using BLayout = Col;
|
16 |
+
using CLayout = Row;
|
17 |
+
|
18 |
+
using AElementOp = PassThrough;
|
19 |
+
using BElementOp = PassThrough;
|
20 |
+
using CElementOp = PassThrough;
|
21 |
+
|
22 |
+
static constexpr auto GemmDefault = ck::tensor_operation::device::GemmSpecialization::MNKPadding;
|
23 |
+
|
24 |
+
// clang-format off
|
25 |
+
using DeviceGemmInstance = ck::tensor_operation::device::DeviceGemmWmma_CShuffle
|
26 |
+
< ALayout,
|
27 |
+
BLayout,
|
28 |
+
CLayout,
|
29 |
+
ADataType,
|
30 |
+
BDataType,
|
31 |
+
CDataType,
|
32 |
+
AccDataType,
|
33 |
+
CShuffleDataType,
|
34 |
+
AElementOp,
|
35 |
+
BElementOp,
|
36 |
+
CElementOp,
|
37 |
+
GemmDefault,
|
38 |
+
1, // Prefetch stage
|
39 |
+
128, // BlockSize
|
40 |
+
64, // MPerBlock
|
41 |
+
128, // NPerBlock
|
42 |
+
64, // KPerBlock
|
43 |
+
8, // K1
|
44 |
+
16, // MPerWmma
|
45 |
+
16, // NPerWmma
|
46 |
+
2, // M-Repeat // M-PerWmma / M-Repeat = M-Wave
|
47 |
+
4, // N-Repeat // N-PerWmma / N-Repeat = N-Wave
|
48 |
+
S<4, 32, 1>,
|
49 |
+
S<1, 0, 2>,
|
50 |
+
S<1, 0, 2>,
|
51 |
+
2,
|
52 |
+
8,
|
53 |
+
8,
|
54 |
+
true,
|
55 |
+
S<4, 32, 1>,
|
56 |
+
S<1, 0, 2>,
|
57 |
+
S<1, 0, 2>,
|
58 |
+
2,
|
59 |
+
8,
|
60 |
+
8,
|
61 |
+
true,
|
62 |
+
1, // C shuffle (M Repeat) Per store
|
63 |
+
1, // C shuffle (N Repeat) Per store
|
64 |
+
S<1, 32, 1, 4>,
|
65 |
+
8>;
|
66 |
+
// clang-format on
|
67 |
+
|
68 |
+
using ReferenceGemmInstance = ck::tensor_operation::host::
|
69 |
+
ReferenceGemm<ADataType, BDataType, CDataType, AccDataType, AElementOp, BElementOp, CElementOp>;
|
70 |
+
|
71 |
+
#include "run_gemm_example.inc"
|
72 |
+
|
73 |
+
int main(int argc, char* argv[]) { return !run_gemm_example(argc, argv); }
|
sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/composable_kernels/example/01_gemm/gemm_xdl_fp16_fp8_v3.cpp
ADDED
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// SPDX-License-Identifier: MIT
|
2 |
+
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
|
3 |
+
|
4 |
+
#include "common.hpp"
|
5 |
+
|
6 |
+
#include "ck/tensor_operation/gpu/device/impl/device_gemm_xdl_cshuffle_v3.hpp"
|
7 |
+
|
8 |
+
using ADataType = ck::f8_t;
|
9 |
+
using BDataType = ck::half_t;
|
10 |
+
using AccDataType = float;
|
11 |
+
using CShuffleDataType = ck::half_t;
|
12 |
+
using CDataType = ck::half_t;
|
13 |
+
|
14 |
+
using ALayout = Row;
|
15 |
+
using BLayout = Col;
|
16 |
+
using CLayout = Row;
|
17 |
+
|
18 |
+
using AElementOp = PassThrough;
|
19 |
+
using BElementOp = PassThrough;
|
20 |
+
using CElementOp = PassThrough;
|
21 |
+
|
22 |
+
static constexpr auto GemmDefault = ck::tensor_operation::device::GemmSpecialization::Default;
|
23 |
+
|
24 |
+
// clang-format off
|
25 |
+
using DeviceGemmV2Instance =
|
26 |
+
ck::tensor_operation::device::DeviceGemm_Xdl_CShuffleV3<
|
27 |
+
ALayout, BLayout, CLayout,
|
28 |
+
ADataType, BDataType, CDataType, AccDataType, CShuffleDataType,
|
29 |
+
AElementOp, BElementOp, CElementOp, GemmDefault,
|
30 |
+
64,
|
31 |
+
16, 16,
|
32 |
+
64, 16, 8,
|
33 |
+
16, 16,
|
34 |
+
1, 1,
|
35 |
+
S<4, 16, 1>, S<1, 0, 2>, S<1, 0, 2>,
|
36 |
+
2, 16, 16, 0,
|
37 |
+
S<8, 8, 1>, S<1, 0, 2>, S<1, 0, 2>,
|
38 |
+
2, 8, 8, 0,
|
39 |
+
1, 1, S<1, 16, 1, 4>, 4,
|
40 |
+
ck::BlockGemmPipelineScheduler::Intrawave,ck::BlockGemmPipelineVersion::v1>;
|
41 |
+
// clang-format on
|
42 |
+
|
43 |
+
using ReferenceGemmInstance = ck::tensor_operation::host::ReferenceGemm<ADataType,
|
44 |
+
BDataType,
|
45 |
+
CDataType,
|
46 |
+
AccDataType,
|
47 |
+
PassThrough,
|
48 |
+
PassThrough,
|
49 |
+
PassThrough>;
|
50 |
+
|
51 |
+
#include "run_gemm_example_v2.inc"
|
52 |
+
|
53 |
+
int main(int argc, char* argv[]) { return !run_gemm_splitk_example(argc, argv); }
|
sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/composable_kernels/example/01_gemm/gemm_xdl_fp16_v2.cpp
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// SPDX-License-Identifier: MIT
|
2 |
+
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
|
3 |
+
|
4 |
+
#include "common.hpp"
|
5 |
+
|
6 |
+
#include "ck/tensor_operation/gpu/device/impl/device_gemm_xdl_cshuffle_v2.hpp"
|
7 |
+
|
8 |
+
using ADataType = ck::half_t;
|
9 |
+
using BDataType = ck::half_t;
|
10 |
+
using AccDataType = float;
|
11 |
+
using CShuffleDataType = ck::half_t;
|
12 |
+
using CDataType = ck::half_t;
|
13 |
+
|
14 |
+
using F16 = ck::half_t;
|
15 |
+
using F32 = float;
|
16 |
+
|
17 |
+
using ALayout = Row;
|
18 |
+
using BLayout = Row;
|
19 |
+
using CLayout = Row;
|
20 |
+
|
21 |
+
using AElementOp = PassThrough;
|
22 |
+
using BElementOp = PassThrough;
|
23 |
+
using CElementOp = PassThrough;
|
24 |
+
|
25 |
+
static constexpr auto GemmDefault = ck::tensor_operation::device::GemmSpecialization::Default;
|
26 |
+
|
27 |
+
// clang-format off
|
28 |
+
using DeviceGemmInstance =
|
29 |
+
ck::tensor_operation::device::DeviceGemm_Xdl_CShuffleV2<
|
30 |
+
ALayout, BLayout, CLayout,
|
31 |
+
F16, F16, F16, F32, F16,
|
32 |
+
PassThrough, PassThrough, PassThrough, GemmDefault,
|
33 |
+
2, 256,
|
34 |
+
256, 256,
|
35 |
+
32, 8, 4,
|
36 |
+
32, 32,
|
37 |
+
4, 4,
|
38 |
+
S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>,
|
39 |
+
2, 8, 8, 0,
|
40 |
+
S<8, 32, 1>, S<0, 2, 1>, S<0, 2, 1>,
|
41 |
+
1, 8, 4, 0,
|
42 |
+
1, 1, S<1, 32, 1, 8>, 8,
|
43 |
+
ck::LoopScheduler::Default, ck::PipelineVersion::v1>;
|
44 |
+
// clang-format on
|
45 |
+
|
46 |
+
using ReferenceGemmInstance = ck::tensor_operation::host::
|
47 |
+
ReferenceGemm<ADataType, BDataType, CDataType, AccDataType, AElementOp, BElementOp, CElementOp>;
|
48 |
+
|
49 |
+
#include "run_gemm_example.inc"
|
50 |
+
|
51 |
+
int main(int argc, char* argv[]) { return !run_gemm_example(argc, argv); }
|
sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/composable_kernels/example/01_gemm/gemm_xdl_int4.cpp
ADDED
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// SPDX-License-Identifier: MIT
|
2 |
+
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
|
3 |
+
|
4 |
+
#ifdef CK_EXPERIMENTAL_BIT_INT_EXTENSION_INT4
|
5 |
+
|
6 |
+
#include "common.hpp"
|
7 |
+
|
8 |
+
#include "ck/tensor_operation/gpu/device/impl/device_gemm_xdl_cshuffle.hpp"
|
9 |
+
|
10 |
+
using ADataType = ck::int4_t;
|
11 |
+
using BDataType = ck::int4_t;
|
12 |
+
using CDataType = ck::int4_t;
|
13 |
+
using KernelADataType = int8_t;
|
14 |
+
using KernelBDataType = int8_t;
|
15 |
+
using KernelCDataType = int8_t;
|
16 |
+
using AccDataType = int32_t;
|
17 |
+
using CShuffleDataType = int8_t;
|
18 |
+
|
19 |
+
using ALayout = Row;
|
20 |
+
using BLayout = Col;
|
21 |
+
using CLayout = Row;
|
22 |
+
|
23 |
+
using AElementOp = PassThrough;
|
24 |
+
using BElementOp = PassThrough;
|
25 |
+
using CElementOp = PassThrough;
|
26 |
+
|
27 |
+
static constexpr auto GemmDefault = ck::tensor_operation::device::GemmSpecialization::Default;
|
28 |
+
|
29 |
+
// clang-format off
|
30 |
+
using DeviceGemmInstance = ck::tensor_operation::device::DeviceGemm_Xdl_CShuffle
|
31 |
+
// ######| ALayout| BLayout| CLayout| AData| BData| CData| AccData| CShuffle| A| B| C| GEMM| NumGemmK| Block| MPer| NPer| KPer| AK1| BK1| MPer| NPer| MXdl| NXdl| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockLds| BBlockTransfer| BBlockTransfer| BBlockTransfer| BlockTransfer| BBlockTransfer| BBlockTransfer| BBlockLds| CShuffle| CShuffle| CBlockTransferClusterLengths| CBlockTransfer|
|
32 |
+
// ######| | | | Type| Type| Type| Type| DataType| Elementwise| Elementwise| Elementwise| Spacialization| Prefetch| Size| Block| Block| Block| | | XDL| XDL| Per| Per| ThreadCluster| ThreadCluster| SrcAccessOrder| SrcVectorDim| SrcScalar| DstScalar| AddExtraM| ThreadCluster| ThreadCluster| SrcAccessOrder| SrcVectorDim| SrcScalar| DstScalar| AddExtraN| MXdlPerWave| NXdlPerWave| _MBlock_MWaveMPerXdl| ScalarPerVector|
|
33 |
+
// ######| | | | | | | | | Operation| Operation| Operation| | Stage| | | | | | | | | Wave| Wave| Lengths_K0_M_K1| ArrangeOrder| | | PerVector| PerVector_K1| | Lengths_K0_N_K1| ArrangeOrder| | | PerVector| PerVector_K1| | PerShuffle| PerShuffle| _NBlock_NWaveNPerXdl| _NWaveNPerXdl|
|
34 |
+
// ######| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
|
35 |
+
< ALayout, BLayout, CLayout, KernelADataType, KernelBDataType, KernelCDataType, AccDataType, CShuffleDataType, AElementOp, BElementOp, CElementOp, GemmDefault, 1, 256, 256, 128, 64, 16, 16, 32, 32, 4, 2, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 16, 16, 1, S<4, 64, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 1, 1, 1, S<1, 64, 1, 4>, 16>;
|
36 |
+
// clang-format on
|
37 |
+
|
38 |
+
using ReferenceGemmInstance = ck::tensor_operation::host::
|
39 |
+
ReferenceGemm<ADataType, BDataType, CDataType, AccDataType, AElementOp, BElementOp, CElementOp>;
|
40 |
+
|
41 |
+
#define BUILD_INT4_EXAMPLE
|
42 |
+
#include "run_gemm_example.inc"
|
43 |
+
|
44 |
+
int main(int argc, char* argv[]) { return !run_gemm_example(argc, argv); }
|
45 |
+
#endif
|
sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/composable_kernels/example/01_gemm/run_gemm_example_v2.inc
ADDED
@@ -0,0 +1,297 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// SPDX-License-Identifier: MIT
|
2 |
+
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
|
3 |
+
|
4 |
+
#pragma once
|
5 |
+
|
6 |
+
template <typename DataType>
|
7 |
+
inline __host__ __device__ constexpr double get_rtol()
|
8 |
+
{
|
9 |
+
if constexpr(std::is_same_v<DataType, float>)
|
10 |
+
{
|
11 |
+
return 1e-3;
|
12 |
+
}
|
13 |
+
else if constexpr(std::is_same_v<DataType, double>)
|
14 |
+
{
|
15 |
+
return 1e-6;
|
16 |
+
}
|
17 |
+
else if constexpr(std::is_same_v<DataType, ck::half_t>)
|
18 |
+
{
|
19 |
+
return 1e-3;
|
20 |
+
}
|
21 |
+
else if constexpr(std::is_same_v<DataType, ck::bhalf_t>)
|
22 |
+
{
|
23 |
+
return 5e-2;
|
24 |
+
}
|
25 |
+
else if constexpr(std::is_same_v<DataType, int32_t>)
|
26 |
+
{
|
27 |
+
return 1e-1;
|
28 |
+
}
|
29 |
+
else if constexpr(std::is_same_v<DataType, int8_t>)
|
30 |
+
{
|
31 |
+
return 1e-1;
|
32 |
+
}
|
33 |
+
else if constexpr(std::is_same_v<DataType, ck::f8_t>)
|
34 |
+
{
|
35 |
+
return 1e-1; // 240 and 224 are acceptable
|
36 |
+
}
|
37 |
+
else if constexpr(std::is_same_v<DataType, ck::bf8_t>)
|
38 |
+
{
|
39 |
+
return 1.5e-1; // 57344 and 49152 are acceptable
|
40 |
+
}
|
41 |
+
else
|
42 |
+
{
|
43 |
+
return 1e-3;
|
44 |
+
}
|
45 |
+
}
|
46 |
+
|
47 |
+
template <typename DataType>
|
48 |
+
inline __host__ __device__ constexpr double get_atol()
|
49 |
+
{
|
50 |
+
if constexpr(std::is_same_v<DataType, float>)
|
51 |
+
{
|
52 |
+
return 1e-3;
|
53 |
+
}
|
54 |
+
else if constexpr(std::is_same_v<DataType, double>)
|
55 |
+
{
|
56 |
+
return 1e-6;
|
57 |
+
}
|
58 |
+
else if constexpr(std::is_same_v<DataType, ck::half_t>)
|
59 |
+
{
|
60 |
+
return 1e-3;
|
61 |
+
}
|
62 |
+
else if constexpr(std::is_same_v<DataType, ck::bhalf_t>)
|
63 |
+
{
|
64 |
+
return 5e-2;
|
65 |
+
}
|
66 |
+
else if constexpr(std::is_same_v<DataType, int32_t>)
|
67 |
+
{
|
68 |
+
return 1e-1;
|
69 |
+
}
|
70 |
+
else if constexpr(std::is_same_v<DataType, int8_t>)
|
71 |
+
{
|
72 |
+
return 1e-1;
|
73 |
+
}
|
74 |
+
else if constexpr(std::is_same_v<DataType, ck::f8_t>)
|
75 |
+
{
|
76 |
+
return 16.1; // 240 and 224 are acceptable
|
77 |
+
}
|
78 |
+
else if constexpr(std::is_same_v<DataType, ck::bf8_t>)
|
79 |
+
{
|
80 |
+
return 8192.1; // 57344 and 49152 are acceptable
|
81 |
+
}
|
82 |
+
else
|
83 |
+
{
|
84 |
+
return 1e-3;
|
85 |
+
}
|
86 |
+
}
|
87 |
+
|
88 |
+
template <typename ProblemType>
|
89 |
+
bool run_gemm(const ProblemType& problem_size, const ExecutionConfig& config)
|
90 |
+
{
|
91 |
+
#if defined(BUILD_INT4_EXAMPLE) && defined(CK_EXPERIMENTAL_BIT_INT_EXTENSION_INT4)
|
92 |
+
static_assert(sizeof(ck::int4_t) == sizeof(int8_t));
|
93 |
+
#endif
|
94 |
+
|
95 |
+
using namespace ck::literals;
|
96 |
+
|
97 |
+
auto M = problem_size.M;
|
98 |
+
auto N = problem_size.N;
|
99 |
+
auto K = problem_size.K;
|
100 |
+
auto StrideA = problem_size.StrideA;
|
101 |
+
auto StrideB = problem_size.StrideB;
|
102 |
+
auto StrideC = problem_size.StrideC;
|
103 |
+
auto KBatch = problem_size.KBatch;
|
104 |
+
|
105 |
+
auto f_host_tensor_descriptor =
|
106 |
+
[](std::size_t row, std::size_t col, std::size_t stride, auto layout) {
|
107 |
+
if constexpr(std::is_same_v<decltype(layout), ck::tensor_layout::gemm::RowMajor>)
|
108 |
+
{
|
109 |
+
return HostTensorDescriptor({row, col}, {stride, 1_uz});
|
110 |
+
}
|
111 |
+
else
|
112 |
+
{
|
113 |
+
return HostTensorDescriptor({row, col}, {1_uz, stride});
|
114 |
+
}
|
115 |
+
};
|
116 |
+
|
117 |
+
auto f_get_default_stride =
|
118 |
+
[](std::size_t row, std::size_t col, std::size_t stride, auto layout) {
|
119 |
+
if(stride == 0)
|
120 |
+
{
|
121 |
+
// give a chance if stride is zero, return a default packed stride
|
122 |
+
if constexpr(std::is_same_v<decltype(layout), ck::tensor_layout::gemm::RowMajor>)
|
123 |
+
{
|
124 |
+
return col;
|
125 |
+
}
|
126 |
+
else
|
127 |
+
{
|
128 |
+
return row;
|
129 |
+
}
|
130 |
+
}
|
131 |
+
else
|
132 |
+
return stride;
|
133 |
+
};
|
134 |
+
|
135 |
+
StrideA = f_get_default_stride(M, K, StrideA, ALayout{});
|
136 |
+
StrideB = f_get_default_stride(K, N, StrideB, BLayout{});
|
137 |
+
StrideC = f_get_default_stride(M, N, StrideC, CLayout{});
|
138 |
+
|
139 |
+
Tensor<ADataType> a_m_k(f_host_tensor_descriptor(M, K, StrideA, ALayout{}));
|
140 |
+
Tensor<BDataType> b_k_n(f_host_tensor_descriptor(K, N, StrideB, BLayout{}));
|
141 |
+
|
142 |
+
switch(config.init_method)
|
143 |
+
{
|
144 |
+
case 0:
|
145 |
+
a_m_k.GenerateTensorValue(GeneratorTensor_1<ADataType>{1});
|
146 |
+
b_k_n.GenerateTensorValue(GeneratorTensor_1<BDataType>{1});
|
147 |
+
break;
|
148 |
+
case 1:
|
149 |
+
a_m_k.GenerateTensorValue(GeneratorTensor_2<ADataType>{-2, 2});
|
150 |
+
b_k_n.GenerateTensorValue(GeneratorTensor_2<BDataType>{-2, 2});
|
151 |
+
break;
|
152 |
+
case 2:
|
153 |
+
a_m_k.GenerateTensorValue(GeneratorTensor_1<ADataType>{1});
|
154 |
+
b_k_n.GenerateTensorValue(GeneratorTensor_2<BDataType>{-2, 2});
|
155 |
+
break;
|
156 |
+
case 3:
|
157 |
+
a_m_k.GenerateTensorValue(GeneratorTensor_2<ADataType>{-2, 2});
|
158 |
+
b_k_n.GenerateTensorValue(GeneratorTensor_1<BDataType>{1});
|
159 |
+
break;
|
160 |
+
default:
|
161 |
+
a_m_k.GenerateTensorValue(GeneratorTensor_3<ADataType>{0.0, 1.0});
|
162 |
+
b_k_n.GenerateTensorValue(GeneratorTensor_3<BDataType>{-0.5, 0.5});
|
163 |
+
}
|
164 |
+
#if 0
|
165 |
+
printf("B matrix:\n");
|
166 |
+
for (int in = 0; in < N; in++)
|
167 |
+
{
|
168 |
+
for (int ik = 0; ik < K; ik++)
|
169 |
+
{
|
170 |
+
printf("%02x ", *(reinterpret_cast<uint8_t*>(&b_k_n(ik,in))));
|
171 |
+
if(ik%8==7) printf("|");
|
172 |
+
}
|
173 |
+
printf("\n");
|
174 |
+
}
|
175 |
+
#endif
|
176 |
+
|
177 |
+
Tensor<CDataType> c_m_n_host_result(f_host_tensor_descriptor(M, N, StrideC, CLayout{}));
|
178 |
+
Tensor<CDataType> c_m_n_device_result(f_host_tensor_descriptor(M, N, StrideC, CLayout{}));
|
179 |
+
|
180 |
+
std::cout << "a_m_k: " << a_m_k.mDesc << std::endl;
|
181 |
+
std::cout << "b_k_n: " << b_k_n.mDesc << std::endl;
|
182 |
+
std::cout << "c_m_n: " << c_m_n_host_result.mDesc << std::endl;
|
183 |
+
|
184 |
+
#ifdef BUILD_INT4_EXAMPLE
|
185 |
+
DeviceMem a_m_k_device_buf(sizeof(KernelADataType) * a_m_k.mDesc.GetElementSpaceSize());
|
186 |
+
DeviceMem b_k_n_device_buf(sizeof(KernelBDataType) * b_k_n.mDesc.GetElementSpaceSize());
|
187 |
+
DeviceMem c_m_n_device_buf(sizeof(KernelCDataType) *
|
188 |
+
c_m_n_device_result.mDesc.GetElementSpaceSize());
|
189 |
+
|
190 |
+
const Tensor<KernelADataType> a_m_k_converted(a_m_k);
|
191 |
+
const Tensor<KernelBDataType> b_k_n_converted(b_k_n);
|
192 |
+
|
193 |
+
a_m_k_device_buf.ToDevice(a_m_k_converted.mData.data());
|
194 |
+
b_k_n_device_buf.ToDevice(b_k_n_converted.mData.data());
|
195 |
+
#else
|
196 |
+
DeviceMem a_m_k_device_buf(sizeof(ADataType) * a_m_k.mDesc.GetElementSpaceSize());
|
197 |
+
DeviceMem b_k_n_device_buf(sizeof(BDataType) * b_k_n.mDesc.GetElementSpaceSize());
|
198 |
+
DeviceMem c_m_n_device_buf(sizeof(CDataType) * c_m_n_device_result.mDesc.GetElementSpaceSize());
|
199 |
+
|
200 |
+
a_m_k_device_buf.ToDevice(a_m_k.mData.data());
|
201 |
+
b_k_n_device_buf.ToDevice(b_k_n.mData.data());
|
202 |
+
#endif
|
203 |
+
DeviceMem workspace;
|
204 |
+
|
205 |
+
auto a_element_op = AElementOp{};
|
206 |
+
auto b_element_op = BElementOp{};
|
207 |
+
auto c_element_op = CElementOp{};
|
208 |
+
|
209 |
+
// do GEMM
|
210 |
+
auto gemm = DeviceGemmV2Instance{};
|
211 |
+
auto invoker = gemm.MakeInvoker();
|
212 |
+
float ave_time = 0;
|
213 |
+
|
214 |
+
auto argument = gemm.MakeArgument(
|
215 |
+
#ifdef BUILD_INT4_EXAMPLE
|
216 |
+
static_cast<KernelADataType*>(a_m_k_device_buf.GetDeviceBuffer()),
|
217 |
+
static_cast<KernelBDataType*>(b_k_n_device_buf.GetDeviceBuffer()),
|
218 |
+
static_cast<KernelCDataType*>(c_m_n_device_buf.GetDeviceBuffer()),
|
219 |
+
#else
|
220 |
+
static_cast<ADataType*>(a_m_k_device_buf.GetDeviceBuffer()),
|
221 |
+
static_cast<BDataType*>(b_k_n_device_buf.GetDeviceBuffer()),
|
222 |
+
static_cast<CDataType*>(c_m_n_device_buf.GetDeviceBuffer()),
|
223 |
+
#endif
|
224 |
+
M,
|
225 |
+
N,
|
226 |
+
K,
|
227 |
+
StrideA,
|
228 |
+
StrideB,
|
229 |
+
StrideC,
|
230 |
+
KBatch,
|
231 |
+
a_element_op,
|
232 |
+
b_element_op,
|
233 |
+
c_element_op);
|
234 |
+
|
235 |
+
if(!gemm.IsSupportedArgument(argument))
|
236 |
+
{
|
237 |
+
std::cerr << gemm.GetTypeString() << " does not support this problem" << std::endl;
|
238 |
+
|
239 |
+
return true;
|
240 |
+
}
|
241 |
+
|
242 |
+
bool pass = true;
|
243 |
+
if(config.do_verification)
|
244 |
+
{
|
245 |
+
auto ref_gemm = ReferenceGemmInstance{};
|
246 |
+
auto ref_invoker = ref_gemm.MakeInvoker();
|
247 |
+
|
248 |
+
auto ref_argument = ref_gemm.MakeArgument(
|
249 |
+
a_m_k, b_k_n, c_m_n_host_result, PassThrough{}, PassThrough{}, PassThrough{});
|
250 |
+
|
251 |
+
ref_invoker.Run(ref_argument);
|
252 |
+
|
253 |
+
ave_time = invoker.Run(argument, StreamConfig{nullptr, false, 1});
|
254 |
+
#ifdef BUILD_INT4_EXAMPLE
|
255 |
+
Tensor<CDataType> c_m_n_device_result_converted(c_m_n_host_result.mDesc);
|
256 |
+
|
257 |
+
c_m_n_device_buf.FromDevice(c_m_n_device_result_converted.mData.data());
|
258 |
+
|
259 |
+
c_m_n_device_result = c_m_n_device_result_converted.CopyAsType<CDataType>();
|
260 |
+
|
261 |
+
return ck::utils::check_err(c_m_n_device_result_converted, c_m_n_host_result);
|
262 |
+
#else
|
263 |
+
c_m_n_device_buf.FromDevice(c_m_n_device_result.mData.data());
|
264 |
+
|
265 |
+
pass &= ck::utils::check_err(c_m_n_device_result,
|
266 |
+
c_m_n_host_result,
|
267 |
+
"Error: Incorrect results!",
|
268 |
+
get_rtol<CDataType>(),
|
269 |
+
get_atol<CDataType>());
|
270 |
+
#endif
|
271 |
+
}
|
272 |
+
|
273 |
+
if(config.time_kernel)
|
274 |
+
{
|
275 |
+
ave_time = invoker.Run(argument, StreamConfig{nullptr, config.time_kernel});
|
276 |
+
|
277 |
+
std::size_t flop = 2_uz * M * N * K;
|
278 |
+
std::size_t num_btype =
|
279 |
+
sizeof(ADataType) * M * K + sizeof(BDataType) * K * N + sizeof(CDataType) * M * N;
|
280 |
+
|
281 |
+
float tflops = static_cast<float>(flop) / 1.E9 / ave_time;
|
282 |
+
|
283 |
+
float gb_per_sec = num_btype / 1.E6 / ave_time;
|
284 |
+
|
285 |
+
std::cout << "Perf: " << ave_time << " ms, " << tflops << " TFlops, " << gb_per_sec
|
286 |
+
<< " GB/s, " << gemm.GetTypeString() << std::endl;
|
287 |
+
}
|
288 |
+
return pass;
|
289 |
+
}
|
290 |
+
|
291 |
+
bool run_gemm_splitk_example(int argc, char* argv[])
|
292 |
+
{
|
293 |
+
ProblemSizeSplitK problem_size;
|
294 |
+
ExecutionConfig config;
|
295 |
+
|
296 |
+
return !parse_cmd_args(argc, argv, problem_size, config) || run_gemm(problem_size, config);
|
297 |
+
}
|
sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/composable_kernels/example/09_convnd_fwd/convnd_fwd_common.hpp
ADDED
@@ -0,0 +1,257 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// SPDX-License-Identifier: MIT
|
2 |
+
// Copyright (c) 2018-2024, Advanced Micro Devices, Inc. All rights reserved.
|
3 |
+
|
4 |
+
#include <cstdlib>
|
5 |
+
#include <iostream>
|
6 |
+
#include <numeric>
|
7 |
+
#include <type_traits>
|
8 |
+
|
9 |
+
#include "ck/ck.hpp"
|
10 |
+
#include "ck/tensor_operation/gpu/device/tensor_layout.hpp"
|
11 |
+
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"
|
12 |
+
|
13 |
+
#include "ck/library/utility/algorithm.hpp"
|
14 |
+
#include "ck/library/utility/check_err.hpp"
|
15 |
+
#include "ck/library/utility/device_memory.hpp"
|
16 |
+
#include "ck/library/utility/host_tensor.hpp"
|
17 |
+
#include "ck/library/utility/host_tensor_generator.hpp"
|
18 |
+
#include "ck/library/utility/convolution_parameter.hpp"
|
19 |
+
#include "ck/library/utility/convolution_host_tensor_descriptor_helper.hpp"
|
20 |
+
#include "ck/library/reference_tensor_operation/cpu/reference_conv_fwd.hpp"
|
21 |
+
|
22 |
+
void print_helper_msg()
|
23 |
+
{
|
24 |
+
std::cout << "arg1: verification (0=no, 1=yes)\n"
|
25 |
+
<< "arg2: initialization (0=no init, 1=integer value, 2=decimal value)\n"
|
26 |
+
<< "arg3: time kernel (0=no, 1=yes)\n"
|
27 |
+
<< ck::utils::conv::get_conv_param_parser_helper_msg() << std::endl;
|
28 |
+
}
|
29 |
+
|
30 |
+
template <typename DataType>
|
31 |
+
inline __host__ __device__ constexpr double get_rtol()
|
32 |
+
{
|
33 |
+
if constexpr(std::is_same_v<DataType, float>)
|
34 |
+
{
|
35 |
+
return 1e-3;
|
36 |
+
}
|
37 |
+
else if constexpr(std::is_same_v<DataType, double>)
|
38 |
+
{
|
39 |
+
return 1e-6;
|
40 |
+
}
|
41 |
+
else if constexpr(std::is_same_v<DataType, ck::half_t>)
|
42 |
+
{
|
43 |
+
return 1e-3;
|
44 |
+
}
|
45 |
+
else if constexpr(std::is_same_v<DataType, ck::bhalf_t>)
|
46 |
+
{
|
47 |
+
return 5e-2;
|
48 |
+
}
|
49 |
+
else if constexpr(std::is_same_v<DataType, int32_t>)
|
50 |
+
{
|
51 |
+
return 1e-1;
|
52 |
+
}
|
53 |
+
else if constexpr(std::is_same_v<DataType, int8_t>)
|
54 |
+
{
|
55 |
+
return 1e-1;
|
56 |
+
}
|
57 |
+
else if constexpr(std::is_same_v<DataType, ck::f8_t>)
|
58 |
+
{
|
59 |
+
return 1e-1; // 240 and 224 are acceptable
|
60 |
+
}
|
61 |
+
else if constexpr(std::is_same_v<DataType, ck::bf8_t>)
|
62 |
+
{
|
63 |
+
return 1.5e-1; // 57344 and 49152 are acceptable
|
64 |
+
}
|
65 |
+
else
|
66 |
+
{
|
67 |
+
return 1e-3;
|
68 |
+
}
|
69 |
+
}
|
70 |
+
|
71 |
+
template <typename DataType>
|
72 |
+
inline __host__ __device__ constexpr double get_atol()
|
73 |
+
{
|
74 |
+
if constexpr(std::is_same_v<DataType, float>)
|
75 |
+
{
|
76 |
+
return 1e-3;
|
77 |
+
}
|
78 |
+
else if constexpr(std::is_same_v<DataType, double>)
|
79 |
+
{
|
80 |
+
return 1e-6;
|
81 |
+
}
|
82 |
+
else if constexpr(std::is_same_v<DataType, ck::half_t>)
|
83 |
+
{
|
84 |
+
return 1e-3;
|
85 |
+
}
|
86 |
+
else if constexpr(std::is_same_v<DataType, ck::bhalf_t>)
|
87 |
+
{
|
88 |
+
return 5e-2;
|
89 |
+
}
|
90 |
+
else if constexpr(std::is_same_v<DataType, int32_t>)
|
91 |
+
{
|
92 |
+
return 1e-1;
|
93 |
+
}
|
94 |
+
else if constexpr(std::is_same_v<DataType, int8_t>)
|
95 |
+
{
|
96 |
+
return 1e-1;
|
97 |
+
}
|
98 |
+
else if constexpr(std::is_same_v<DataType, ck::f8_t>)
|
99 |
+
{
|
100 |
+
return 16.1; // 240 and 224 are acceptable
|
101 |
+
}
|
102 |
+
else if constexpr(std::is_same_v<DataType, ck::bf8_t>)
|
103 |
+
{
|
104 |
+
return 8192.1; // 57344 and 49152 are acceptable
|
105 |
+
}
|
106 |
+
else
|
107 |
+
{
|
108 |
+
return 1e-3;
|
109 |
+
}
|
110 |
+
}
|
111 |
+
|
112 |
+
template <ck::index_t NDimSpatial,
|
113 |
+
typename InDataType,
|
114 |
+
typename WeiDataType,
|
115 |
+
typename OutDataType,
|
116 |
+
typename InElementOp,
|
117 |
+
typename WeiElementOp,
|
118 |
+
typename OutElementOp,
|
119 |
+
typename DeviceConvNDFwdInstance>
|
120 |
+
bool run_grouped_conv_fwd(bool do_verification,
|
121 |
+
int init_method,
|
122 |
+
bool time_kernel,
|
123 |
+
const ck::utils::conv::ConvParam& conv_param,
|
124 |
+
const HostTensorDescriptor& in_g_n_c_wis_desc,
|
125 |
+
const HostTensorDescriptor& wei_g_k_c_xs_desc,
|
126 |
+
const HostTensorDescriptor& out_g_n_k_wos_desc,
|
127 |
+
const InElementOp& in_element_op,
|
128 |
+
const WeiElementOp& wei_element_op,
|
129 |
+
const OutElementOp& out_element_op)
|
130 |
+
{
|
131 |
+
Tensor<InDataType> in(in_g_n_c_wis_desc);
|
132 |
+
Tensor<WeiDataType> wei(wei_g_k_c_xs_desc);
|
133 |
+
Tensor<OutDataType> out_host(out_g_n_k_wos_desc);
|
134 |
+
Tensor<OutDataType> out_device(out_g_n_k_wos_desc);
|
135 |
+
|
136 |
+
std::cout << "in: " << in.mDesc << std::endl;
|
137 |
+
std::cout << "wei: " << wei.mDesc << std::endl;
|
138 |
+
std::cout << "out: " << out_host.mDesc << std::endl;
|
139 |
+
|
140 |
+
switch(init_method)
|
141 |
+
{
|
142 |
+
case 0: break;
|
143 |
+
case 1:
|
144 |
+
in.GenerateTensorValue(GeneratorTensor_2<InDataType>{-5, 5});
|
145 |
+
wei.GenerateTensorValue(GeneratorTensor_2<WeiDataType>{-5, 5});
|
146 |
+
break;
|
147 |
+
default:
|
148 |
+
in.GenerateTensorValue(GeneratorTensor_3<InDataType>{0.0, 1.0});
|
149 |
+
wei.GenerateTensorValue(GeneratorTensor_3<WeiDataType>{-0.5, 0.5});
|
150 |
+
}
|
151 |
+
|
152 |
+
DeviceMem in_device_buf(sizeof(InDataType) * in.mDesc.GetElementSpaceSize());
|
153 |
+
DeviceMem wei_device_buf(sizeof(WeiDataType) * wei.mDesc.GetElementSpaceSize());
|
154 |
+
DeviceMem out_device_buf(sizeof(OutDataType) * out_device.mDesc.GetElementSpaceSize());
|
155 |
+
|
156 |
+
in_device_buf.ToDevice(in.mData.data());
|
157 |
+
wei_device_buf.ToDevice(wei.mData.data());
|
158 |
+
|
159 |
+
std::array<ck::index_t, NDimSpatial + 3> a_g_n_c_wis_lengths{};
|
160 |
+
std::array<ck::index_t, NDimSpatial + 3> a_g_n_c_wis_strides{};
|
161 |
+
std::array<ck::index_t, NDimSpatial + 3> b_g_k_c_xs_lengths{};
|
162 |
+
std::array<ck::index_t, NDimSpatial + 3> b_g_k_c_xs_strides{};
|
163 |
+
std::array<ck::index_t, NDimSpatial + 3> e_g_n_k_wos_lengths{};
|
164 |
+
std::array<ck::index_t, NDimSpatial + 3> e_g_n_k_wos_strides{};
|
165 |
+
std::array<ck::index_t, NDimSpatial> conv_filter_strides{};
|
166 |
+
std::array<ck::index_t, NDimSpatial> conv_filter_dilations{};
|
167 |
+
std::array<ck::index_t, NDimSpatial> input_left_pads{};
|
168 |
+
std::array<ck::index_t, NDimSpatial> input_right_pads{};
|
169 |
+
|
170 |
+
auto copy = [](const auto& x, auto& y) { ck::ranges::copy(x, y.begin()); };
|
171 |
+
|
172 |
+
copy(in_g_n_c_wis_desc.GetLengths(), a_g_n_c_wis_lengths);
|
173 |
+
copy(in_g_n_c_wis_desc.GetStrides(), a_g_n_c_wis_strides);
|
174 |
+
copy(wei_g_k_c_xs_desc.GetLengths(), b_g_k_c_xs_lengths);
|
175 |
+
copy(wei_g_k_c_xs_desc.GetStrides(), b_g_k_c_xs_strides);
|
176 |
+
copy(out_g_n_k_wos_desc.GetLengths(), e_g_n_k_wos_lengths);
|
177 |
+
copy(out_g_n_k_wos_desc.GetStrides(), e_g_n_k_wos_strides);
|
178 |
+
copy(conv_param.conv_filter_strides_, conv_filter_strides);
|
179 |
+
copy(conv_param.conv_filter_dilations_, conv_filter_dilations);
|
180 |
+
copy(conv_param.input_left_pads_, input_left_pads);
|
181 |
+
copy(conv_param.input_right_pads_, input_right_pads);
|
182 |
+
|
183 |
+
// do Conv
|
184 |
+
auto conv = DeviceConvNDFwdInstance{};
|
185 |
+
auto invoker = conv.MakeInvoker();
|
186 |
+
auto argument = conv.MakeArgument(in_device_buf.GetDeviceBuffer(),
|
187 |
+
wei_device_buf.GetDeviceBuffer(),
|
188 |
+
std::array<const void*, 0>{},
|
189 |
+
out_device_buf.GetDeviceBuffer(),
|
190 |
+
a_g_n_c_wis_lengths,
|
191 |
+
a_g_n_c_wis_strides,
|
192 |
+
b_g_k_c_xs_lengths,
|
193 |
+
b_g_k_c_xs_strides,
|
194 |
+
std::array<std::array<ck::index_t, NDimSpatial + 3>, 0>{{}},
|
195 |
+
std::array<std::array<ck::index_t, NDimSpatial + 3>, 0>{{}},
|
196 |
+
e_g_n_k_wos_lengths,
|
197 |
+
e_g_n_k_wos_strides,
|
198 |
+
conv_filter_strides,
|
199 |
+
conv_filter_dilations,
|
200 |
+
input_left_pads,
|
201 |
+
input_right_pads,
|
202 |
+
in_element_op,
|
203 |
+
wei_element_op,
|
204 |
+
out_element_op);
|
205 |
+
|
206 |
+
if(!conv.IsSupportedArgument(argument))
|
207 |
+
{
|
208 |
+
throw std::runtime_error(
|
209 |
+
"wrong! device_conv with the specified compilation parameters does "
|
210 |
+
"not support this Conv problem");
|
211 |
+
}
|
212 |
+
|
213 |
+
float avg_time = invoker.Run(argument, StreamConfig{nullptr, time_kernel});
|
214 |
+
|
215 |
+
std::size_t flop = conv_param.GetFlops();
|
216 |
+
std::size_t num_btype = conv_param.GetByte<InDataType, WeiDataType, OutDataType>();
|
217 |
+
|
218 |
+
float tflops = static_cast<float>(flop) / 1.E9 / avg_time;
|
219 |
+
float gb_per_sec = num_btype / 1.E6 / avg_time;
|
220 |
+
std::cout << "Perf: " << avg_time << " ms, " << tflops << " TFlops, " << gb_per_sec << " GB/s, "
|
221 |
+
<< conv.GetTypeString() << std::endl;
|
222 |
+
|
223 |
+
if(do_verification)
|
224 |
+
{
|
225 |
+
auto ref_conv = ck::tensor_operation::host::ReferenceConvFwd<NDimSpatial,
|
226 |
+
InDataType,
|
227 |
+
WeiDataType,
|
228 |
+
OutDataType,
|
229 |
+
InElementOp,
|
230 |
+
WeiElementOp,
|
231 |
+
OutElementOp>();
|
232 |
+
|
233 |
+
auto ref_invoker = ref_conv.MakeInvoker();
|
234 |
+
auto ref_argument = ref_conv.MakeArgument(in,
|
235 |
+
wei,
|
236 |
+
out_host,
|
237 |
+
conv_param.conv_filter_strides_,
|
238 |
+
conv_param.conv_filter_dilations_,
|
239 |
+
conv_param.input_left_pads_,
|
240 |
+
conv_param.input_right_pads_,
|
241 |
+
in_element_op,
|
242 |
+
wei_element_op,
|
243 |
+
out_element_op);
|
244 |
+
|
245 |
+
ref_invoker.Run(ref_argument);
|
246 |
+
|
247 |
+
out_device_buf.FromDevice(out_device.mData.data());
|
248 |
+
|
249 |
+
return ck::utils::check_err(out_device,
|
250 |
+
out_host,
|
251 |
+
"Error: incorrect results!",
|
252 |
+
get_rtol<OutDataType>(),
|
253 |
+
get_atol<OutDataType>());
|
254 |
+
}
|
255 |
+
|
256 |
+
return true;
|
257 |
+
}
|
sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/composable_kernels/example/09_convnd_fwd/convnd_fwd_dl_common.hpp
ADDED
@@ -0,0 +1,196 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// SPDX-License-Identifier: MIT
|
2 |
+
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
|
3 |
+
|
4 |
+
#include <cstdlib>
|
5 |
+
#include <iostream>
|
6 |
+
#include <numeric>
|
7 |
+
#include <type_traits>
|
8 |
+
|
9 |
+
#include "ck/ck.hpp"
|
10 |
+
#include "ck/tensor_operation/gpu/device/tensor_layout.hpp"
|
11 |
+
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"
|
12 |
+
|
13 |
+
#include "ck/library/utility/algorithm.hpp"
|
14 |
+
#include "ck/library/utility/check_err.hpp"
|
15 |
+
#include "ck/library/utility/device_memory.hpp"
|
16 |
+
#include "ck/library/utility/host_tensor.hpp"
|
17 |
+
#include "ck/library/utility/host_tensor_generator.hpp"
|
18 |
+
#include "ck/library/utility/convolution_parameter.hpp"
|
19 |
+
#include "ck/library/utility/convolution_host_tensor_descriptor_helper.hpp"
|
20 |
+
#include "ck/library/reference_tensor_operation/cpu/reference_conv_fwd.hpp"
|
21 |
+
|
22 |
+
void print_helper_msg()
|
23 |
+
{
|
24 |
+
std::cout << "arg1: verification (0=no, 1=yes)\n"
|
25 |
+
<< "arg2: initialization (0=no init, 1=integer value, 2=decimal value)\n"
|
26 |
+
<< "arg3: time kernel (0=no, 1=yes)\n"
|
27 |
+
<< ck::utils::conv::get_conv_param_parser_helper_msg() << std::endl;
|
28 |
+
}
|
29 |
+
|
30 |
+
template <ck::index_t NDimSpatial,
|
31 |
+
typename InDataType,
|
32 |
+
typename WeiDataType,
|
33 |
+
typename DsDataType,
|
34 |
+
typename OutDataType,
|
35 |
+
typename InElementOp,
|
36 |
+
typename WeiElementOp,
|
37 |
+
typename OutElementOp,
|
38 |
+
typename DeviceConvNDFwdInstance>
|
39 |
+
bool run_grouped_conv_fwd_dl(bool do_verification,
|
40 |
+
int init_method,
|
41 |
+
bool time_kernel,
|
42 |
+
const ck::utils::conv::ConvParam& conv_param,
|
43 |
+
const HostTensorDescriptor& in_g_n_c_wis_desc,
|
44 |
+
const HostTensorDescriptor& wei_g_k_c_xs_desc,
|
45 |
+
const HostTensorDescriptor& out_g_n_k_wos_desc,
|
46 |
+
const InElementOp& in_element_op,
|
47 |
+
const WeiElementOp& wei_element_op,
|
48 |
+
const OutElementOp& out_element_op)
|
49 |
+
{
|
50 |
+
using DDataType = ck::remove_cvref_t<ck::tuple_element_t<0, DsDataType>>;
|
51 |
+
Tensor<InDataType> in(in_g_n_c_wis_desc);
|
52 |
+
Tensor<WeiDataType> wei(wei_g_k_c_xs_desc);
|
53 |
+
Tensor<DDataType> bias(out_g_n_k_wos_desc);
|
54 |
+
Tensor<OutDataType> out_host(out_g_n_k_wos_desc);
|
55 |
+
Tensor<OutDataType> out_device(out_g_n_k_wos_desc);
|
56 |
+
|
57 |
+
std::cout << "in: " << in.mDesc << std::endl;
|
58 |
+
std::cout << "wei: " << wei.mDesc << std::endl;
|
59 |
+
std::cout << "out: " << out_host.mDesc << std::endl;
|
60 |
+
|
61 |
+
switch(init_method)
|
62 |
+
{
|
63 |
+
case 0: break;
|
64 |
+
case 1:
|
65 |
+
in.GenerateTensorValue(GeneratorTensor_2<InDataType>{-2, 3});
|
66 |
+
wei.GenerateTensorValue(GeneratorTensor_2<WeiDataType>{-2, 3});
|
67 |
+
bias.GenerateTensorValue(GeneratorTensor_2<DDataType>{-2, 3});
|
68 |
+
break;
|
69 |
+
case 2:
|
70 |
+
in.GenerateTensorValue(GeneratorTensor_3<InDataType>{0.0, 1.0});
|
71 |
+
wei.GenerateTensorValue(GeneratorTensor_3<WeiDataType>{-0.5, 0.5});
|
72 |
+
bias.GenerateTensorValue(GeneratorTensor_3<DDataType>{-0.5, 0.5});
|
73 |
+
break;
|
74 |
+
default:
|
75 |
+
in.GenerateTensorValue(GeneratorTensor_1<InDataType>{1});
|
76 |
+
wei.GenerateTensorValue(GeneratorTensor_1<WeiDataType>{-1});
|
77 |
+
bias.GenerateTensorValue(GeneratorTensor_1<DDataType>{1});
|
78 |
+
}
|
79 |
+
|
80 |
+
DeviceMem in_device_buf(sizeof(InDataType) * in.mDesc.GetElementSpaceSize());
|
81 |
+
DeviceMem wei_device_buf(sizeof(WeiDataType) * wei.mDesc.GetElementSpaceSize());
|
82 |
+
DeviceMem bias_device_buf(sizeof(DDataType) * bias.mDesc.GetElementSpaceSize());
|
83 |
+
DeviceMem out_device_buf(sizeof(OutDataType) * out_device.mDesc.GetElementSpaceSize());
|
84 |
+
|
85 |
+
in_device_buf.ToDevice(in.mData.data());
|
86 |
+
wei_device_buf.ToDevice(wei.mData.data());
|
87 |
+
bias_device_buf.ToDevice(bias.mData.data());
|
88 |
+
|
89 |
+
std::array<ck::index_t, NDimSpatial + 3> a_g_n_c_wis_lengths{};
|
90 |
+
std::array<ck::index_t, NDimSpatial + 3> a_g_n_c_wis_strides{};
|
91 |
+
std::array<ck::index_t, NDimSpatial + 3> b_g_k_c_xs_lengths{};
|
92 |
+
std::array<ck::index_t, NDimSpatial + 3> b_g_k_c_xs_strides{};
|
93 |
+
std::array<ck::index_t, NDimSpatial + 3> d_g_n_k_wos_lengths{};
|
94 |
+
std::array<ck::index_t, NDimSpatial + 3> d_g_n_k_wos_strides{};
|
95 |
+
std::array<ck::index_t, NDimSpatial + 3> e_g_n_k_wos_lengths{};
|
96 |
+
std::array<ck::index_t, NDimSpatial + 3> e_g_n_k_wos_strides{};
|
97 |
+
std::array<ck::index_t, NDimSpatial> conv_filter_strides{};
|
98 |
+
std::array<ck::index_t, NDimSpatial> conv_filter_dilations{};
|
99 |
+
std::array<ck::index_t, NDimSpatial> input_left_pads{};
|
100 |
+
std::array<ck::index_t, NDimSpatial> input_right_pads{};
|
101 |
+
|
102 |
+
auto copy = [](auto& x, auto& y) { ck::ranges::copy(x, y.begin()); };
|
103 |
+
|
104 |
+
copy(in_g_n_c_wis_desc.GetLengths(), a_g_n_c_wis_lengths);
|
105 |
+
copy(in_g_n_c_wis_desc.GetStrides(), a_g_n_c_wis_strides);
|
106 |
+
copy(wei_g_k_c_xs_desc.GetLengths(), b_g_k_c_xs_lengths);
|
107 |
+
copy(wei_g_k_c_xs_desc.GetStrides(), b_g_k_c_xs_strides);
|
108 |
+
copy(out_g_n_k_wos_desc.GetLengths(), d_g_n_k_wos_lengths);
|
109 |
+
copy(out_g_n_k_wos_desc.GetStrides(), d_g_n_k_wos_strides);
|
110 |
+
copy(out_g_n_k_wos_desc.GetLengths(), e_g_n_k_wos_lengths);
|
111 |
+
copy(out_g_n_k_wos_desc.GetStrides(), e_g_n_k_wos_strides);
|
112 |
+
copy(conv_param.conv_filter_strides_, conv_filter_strides);
|
113 |
+
copy(conv_param.conv_filter_dilations_, conv_filter_dilations);
|
114 |
+
copy(conv_param.input_left_pads_, input_left_pads);
|
115 |
+
copy(conv_param.input_right_pads_, input_right_pads);
|
116 |
+
|
117 |
+
// do Conv
|
118 |
+
auto conv = DeviceConvNDFwdInstance{};
|
119 |
+
auto invoker = conv.MakeInvoker();
|
120 |
+
auto argument = conv.MakeArgument(
|
121 |
+
in_device_buf.GetDeviceBuffer(),
|
122 |
+
wei_device_buf.GetDeviceBuffer(),
|
123 |
+
std::array<const void*, 1>{bias_device_buf.GetDeviceBuffer()},
|
124 |
+
out_device_buf.GetDeviceBuffer(),
|
125 |
+
a_g_n_c_wis_lengths,
|
126 |
+
a_g_n_c_wis_strides,
|
127 |
+
b_g_k_c_xs_lengths,
|
128 |
+
b_g_k_c_xs_strides,
|
129 |
+
std::array<std::array<ck::index_t, NDimSpatial + 3>, 1>{{d_g_n_k_wos_lengths}},
|
130 |
+
std::array<std::array<ck::index_t, NDimSpatial + 3>, 1>{{d_g_n_k_wos_strides}},
|
131 |
+
e_g_n_k_wos_lengths,
|
132 |
+
e_g_n_k_wos_strides,
|
133 |
+
conv_filter_strides,
|
134 |
+
conv_filter_dilations,
|
135 |
+
input_left_pads,
|
136 |
+
input_right_pads,
|
137 |
+
in_element_op,
|
138 |
+
wei_element_op,
|
139 |
+
out_element_op);
|
140 |
+
|
141 |
+
if(!conv.IsSupportedArgument(argument))
|
142 |
+
{
|
143 |
+
std::cout << "wrong! device_conv with the specified compilation parameters does not "
|
144 |
+
"support this Conv problem"
|
145 |
+
<< std::endl;
|
146 |
+
return true;
|
147 |
+
}
|
148 |
+
|
149 |
+
float avg_time = invoker.Run(argument, StreamConfig{nullptr, time_kernel});
|
150 |
+
|
151 |
+
std::size_t flop = conv_param.GetFlops();
|
152 |
+
std::size_t num_btype = conv_param.GetByte<InDataType, WeiDataType, OutDataType>();
|
153 |
+
|
154 |
+
float tflops = static_cast<float>(flop) / 1.E9 / avg_time;
|
155 |
+
float gb_per_sec = num_btype / 1.E6 / avg_time;
|
156 |
+
std::cout << "Perf: " << avg_time << " ms, " << tflops << " TFlops, " << gb_per_sec << " GB/s, "
|
157 |
+
<< conv.GetTypeString() << std::endl;
|
158 |
+
|
159 |
+
if(do_verification)
|
160 |
+
{
|
161 |
+
auto ref_conv = ck::tensor_operation::host::ReferenceConvFwd<
|
162 |
+
NDimSpatial,
|
163 |
+
InDataType,
|
164 |
+
WeiDataType,
|
165 |
+
OutDataType,
|
166 |
+
InElementOp,
|
167 |
+
WeiElementOp,
|
168 |
+
ck::tensor_operation::element_wise::PassThrough>();
|
169 |
+
|
170 |
+
auto ref_invoker = ref_conv.MakeInvoker();
|
171 |
+
auto ref_argument =
|
172 |
+
ref_conv.MakeArgument(in,
|
173 |
+
wei,
|
174 |
+
out_host,
|
175 |
+
conv_param.conv_filter_strides_,
|
176 |
+
conv_param.conv_filter_dilations_,
|
177 |
+
conv_param.input_left_pads_,
|
178 |
+
conv_param.input_right_pads_,
|
179 |
+
in_element_op,
|
180 |
+
wei_element_op,
|
181 |
+
ck::tensor_operation::element_wise::PassThrough{});
|
182 |
+
|
183 |
+
ref_invoker.Run(ref_argument);
|
184 |
+
|
185 |
+
// cde_elementwise
|
186 |
+
out_host.ForEach(
|
187 |
+
[&](auto&, auto idx) { out_element_op(out_host(idx), out_host(idx), bias(idx)); });
|
188 |
+
|
189 |
+
out_device_buf.FromDevice(out_device.mData.data());
|
190 |
+
|
191 |
+
return ck::utils::check_err(
|
192 |
+
out_device.mData, out_host.mData, "Error: incorrect results!", 1e-5f, 1e-4f);
|
193 |
+
}
|
194 |
+
|
195 |
+
return true;
|
196 |
+
}
|
sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/composable_kernels/example/09_convnd_fwd/convnd_fwd_dl_int8.cpp
ADDED
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// SPDX-License-Identifier: MIT
|
2 |
+
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
|
3 |
+
|
4 |
+
#include "convnd_fwd_dl_common.hpp"
|
5 |
+
|
6 |
+
#include "ck/tensor_operation/gpu/device/impl/device_grouped_conv_fwd_dl_multiple_d_nhwc_kyxc_nhwk.hpp"
|
7 |
+
|
8 |
+
#include "ck/library/utility/convolution_host_tensor_descriptor_helper.hpp"
|
9 |
+
|
10 |
+
using InDataType = int8_t;
|
11 |
+
using WeiDataType = int8_t;
|
12 |
+
using AccDataType = int32_t;
|
13 |
+
using DsDataType = ck::Tuple<int8_t>;
|
14 |
+
using OutDataType = int8_t;
|
15 |
+
|
16 |
+
template <ck::index_t... Is>
|
17 |
+
using S = ck::Sequence<Is...>;
|
18 |
+
|
19 |
+
using InElementOp = ck::tensor_operation::element_wise::PassThrough;
|
20 |
+
using WeiElementOp = ck::tensor_operation::element_wise::PassThrough;
|
21 |
+
using OutElementOp = ck::tensor_operation::element_wise::AddRelu;
|
22 |
+
|
23 |
+
static constexpr auto ConvSpec =
|
24 |
+
ck::tensor_operation::device::ConvolutionForwardSpecialization::Default;
|
25 |
+
|
26 |
+
static constexpr auto GemmPadingSpec = ck::tensor_operation::device::GemmSpecialization::MNKPadding;
|
27 |
+
|
28 |
+
template <ck::index_t NDimSpatial, typename InLayout, typename WeiLayout, typename OutLayout>
|
29 |
+
// clang-format off
|
30 |
+
using DeviceGroupedConvNDFwdInstance = ck::tensor_operation::device::DeviceGroupedConvFwdDlMultipleD_NHWC_KYXC_NHWK
|
31 |
+
// ######| NDim| InData| WeiData| MultpleD| OutData| AccData| InLayout| WeiLayout| MultipleD| OutLayout| In| Wei| Out| Convolution| GEMM| Block| MPer| NPer| K0Per| K1| M1Per| N1Per| KPer| M11N11Thread| M11N11Thread| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| BBlockTransfer| BBlockTransfer| BBlockTransfer| BBlockTransfer| BBlockTransfer| BBlockTransfer| BBlockTransfer| CThreadTransfer| CThreadTransfer| CThreadTransfer|
|
32 |
+
// ######| Spatial| Type| Type| Type| Type| Type| | | Layout| | Elementwise| Elementwise| Elementwise| Forward| Spacialization| Size| Block| Block| Block| | ThreadM111| ThreadN111| Thread| ClusterM110Xs| ClusterN110Xs| ThreadSliceLengths| ThreadClusterLengths| ThreadCluster| SrcAccess| SrcVectorTensor| SrcVectorTensor| DstVectorTensor| ThreadSliceLengths| ThreadClusterLengths| ThreadCluster| SrcAccess| SrcVectorTensor| SrcVectorTensor| DstVectorTensor| SrcDstAccess| SrcDstVectorDim| DstScalarPerVector|
|
33 |
+
// ######| | | | | | | | | | | Operation| Operation| Operation| Specialization| | | | | | | | | | | | K0_M0_M1_K1| K0_M0_M1_K1| ArrangeOrder| Order| Lengths_K0_M0_M1_K1| ContiguousDimOrder| Lengths_K0_M0_M1_K1| K0_N0_N1_K1| K0_N0_N1_K1| ArrangeOrder| Order| Lengths_K0_N0_N1_K1| ContiguousDimOrder| Lengths_K0_N0_N1_K1| Order| | |
|
34 |
+
// ######| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
|
35 |
+
< NDimSpatial, InDataType, WeiDataType, DsDataType, OutDataType, AccDataType, InLayout, WeiLayout, ck::Tuple<OutLayout>, OutLayout, InElementOp, WeiElementOp, OutElementOp, ConvSpec, GemmPadingSpec, 256, 128, 128, 16, 4, 4, 4, 1, S<8, 2>, S<8, 2>, S<8, 1, 1, 4>, S<2, 1, 128, 1>, S<1, 2, 0, 3>, S<1, 2, 0, 3>, S<4, 1, 1, 4>, S<1, 2, 0, 3>, S<1, 1, 1, 4>, S<8, 1, 1, 4>, S<2, 1, 128, 1>, S<1, 2, 0, 3>, S<1, 2, 0, 3>, S<4, 1, 1, 4>, S<1, 2, 0, 3>, S<1, 1, 1, 4>, S<0, 1, 2, 3, 4, 5>, 5, 4>;
|
36 |
+
// clang-format on
|
37 |
+
|
38 |
+
#include "run_convnd_fwd_dl_example.inc"
|
39 |
+
|
40 |
+
int main(int argc, char* argv[]) { return run_convnd_fwd_dl_example(argc, argv) ? 0 : 1; }
|
sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/composable_kernels/example/09_convnd_fwd/convnd_fwd_xdl_fp64.cpp
ADDED
@@ -0,0 +1,79 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// SPDX-License-Identifier: MIT
|
2 |
+
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
|
3 |
+
|
4 |
+
#include "convnd_fwd_common.hpp"
|
5 |
+
|
6 |
+
#include "ck/tensor_operation/gpu/device/impl/device_grouped_conv_fwd_multiple_abd_xdl_cshuffle.hpp"
|
7 |
+
|
8 |
+
#include "ck/library/utility/convolution_host_tensor_descriptor_helper.hpp"
|
9 |
+
|
10 |
+
using InDataType = double;
|
11 |
+
using WeiDataType = double;
|
12 |
+
using AccDataType = double;
|
13 |
+
using CShuffleDataType = double;
|
14 |
+
using OutDataType = double;
|
15 |
+
|
16 |
+
template <ck::index_t... Is>
|
17 |
+
using S = ck::Sequence<Is...>;
|
18 |
+
|
19 |
+
using InElementOp = ck::tensor_operation::element_wise::PassThrough;
|
20 |
+
using WeiElementOp = ck::tensor_operation::element_wise::PassThrough;
|
21 |
+
using OutElementOp = ck::tensor_operation::element_wise::PassThrough;
|
22 |
+
|
23 |
+
static constexpr auto ConvSpec =
|
24 |
+
ck::tensor_operation::device::ConvolutionForwardSpecialization::Default;
|
25 |
+
|
26 |
+
static constexpr auto GemmSpec = ck::tensor_operation::device::GemmSpecialization::MNKPadding;
|
27 |
+
|
28 |
+
template <ck::index_t NDimSpatial, typename InLayout, typename WeiLayout, typename OutLayout>
|
29 |
+
using DeviceGroupedConvNDFwdInstance =
|
30 |
+
ck::tensor_operation::device::DeviceGroupedConvFwdMultipleABD_Xdl_CShuffle<
|
31 |
+
NDimSpatial,
|
32 |
+
InLayout,
|
33 |
+
WeiLayout,
|
34 |
+
ck::Tuple<>,
|
35 |
+
OutLayout,
|
36 |
+
InDataType,
|
37 |
+
WeiDataType,
|
38 |
+
AccDataType,
|
39 |
+
CShuffleDataType,
|
40 |
+
ck::Tuple<>,
|
41 |
+
OutDataType,
|
42 |
+
InElementOp,
|
43 |
+
WeiElementOp,
|
44 |
+
OutElementOp,
|
45 |
+
ConvSpec, // ConvForwardSpecialization
|
46 |
+
GemmSpec, // GemmSpecialization
|
47 |
+
1, //
|
48 |
+
256, // BlockSize
|
49 |
+
128, // MPerBlock
|
50 |
+
128, // NPerBlock
|
51 |
+
8, // KPerBlock
|
52 |
+
2, // AK1
|
53 |
+
2, // BK1
|
54 |
+
16, // MPerXdl
|
55 |
+
16, // NPerXdl
|
56 |
+
4, // MXdlPerWave
|
57 |
+
4, // NXdlPerWave
|
58 |
+
S<4, 64, 1>, // ABlockTransferThreadClusterLengths_AK0_M_AK1
|
59 |
+
S<1, 0, 2>, // ABlockTransferThreadClusterArrangeOrder
|
60 |
+
S<1, 0, 2>, // ABlockTransferSrcAccessOrder
|
61 |
+
2, // ABlockTransferSrcVectorDim
|
62 |
+
2, // ABlockTransferSrcScalarPerVector
|
63 |
+
2, // ABlockTransferDstScalarPerVector_AK1
|
64 |
+
1, // ABlockLdsExtraM
|
65 |
+
S<4, 64, 1>, // BBlockTransferThreadClusterLengths_BK0_N_BK1
|
66 |
+
S<1, 0, 2>, // BBlockTransferThreadClusterArrangeOrder
|
67 |
+
S<1, 0, 2>, // BBlockTransferSrcAccessOrder
|
68 |
+
2, // BBlockTransferSrcVectorDim
|
69 |
+
2, // BBlockTransferSrcScalarPerVector
|
70 |
+
2, // BBlockTransferDstScalarPerVector_BK1
|
71 |
+
1, // BBlockLdsExtraN
|
72 |
+
1,
|
73 |
+
1,
|
74 |
+
S<1, 16, 1, 16>,
|
75 |
+
1>;
|
76 |
+
|
77 |
+
#include "run_convnd_fwd_example.inc"
|
78 |
+
|
79 |
+
int main(int argc, char* argv[]) { return run_convnd_fwd_example(argc, argv) ? 0 : 1; }
|
sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/composable_kernels/example/09_convnd_fwd/convnd_fwd_xdl_int8.cpp
ADDED
@@ -0,0 +1,79 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// SPDX-License-Identifier: MIT
|
2 |
+
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
|
3 |
+
|
4 |
+
#include "convnd_fwd_common.hpp"
|
5 |
+
|
6 |
+
#include "ck/tensor_operation/gpu/device/impl/device_grouped_conv_fwd_multiple_abd_xdl_cshuffle.hpp"
|
7 |
+
|
8 |
+
#include "ck/library/utility/convolution_host_tensor_descriptor_helper.hpp"
|
9 |
+
|
10 |
+
using InDataType = int8_t;
|
11 |
+
using WeiDataType = int8_t;
|
12 |
+
using AccDataType = int32_t;
|
13 |
+
using CShuffleDataType = int8_t;
|
14 |
+
using OutDataType = int8_t;
|
15 |
+
|
16 |
+
template <ck::index_t... Is>
|
17 |
+
using S = ck::Sequence<Is...>;
|
18 |
+
|
19 |
+
using InElementOp = ck::tensor_operation::element_wise::PassThrough;
|
20 |
+
using WeiElementOp = ck::tensor_operation::element_wise::PassThrough;
|
21 |
+
using OutElementOp = ck::tensor_operation::element_wise::PassThrough;
|
22 |
+
|
23 |
+
static constexpr auto ConvSpec =
|
24 |
+
ck::tensor_operation::device::ConvolutionForwardSpecialization::Default;
|
25 |
+
|
26 |
+
static constexpr auto GemmSpec = ck::tensor_operation::device::GemmSpecialization::MNKPadding;
|
27 |
+
|
28 |
+
template <ck::index_t NDimSpatial, typename InLayout, typename WeiLayout, typename OutLayout>
|
29 |
+
using DeviceGroupedConvNDFwdInstance =
|
30 |
+
ck::tensor_operation::device::DeviceGroupedConvFwdMultipleABD_Xdl_CShuffle<
|
31 |
+
NDimSpatial,
|
32 |
+
InLayout,
|
33 |
+
WeiLayout,
|
34 |
+
ck::Tuple<>,
|
35 |
+
OutLayout,
|
36 |
+
InDataType,
|
37 |
+
WeiDataType,
|
38 |
+
AccDataType,
|
39 |
+
CShuffleDataType,
|
40 |
+
ck::Tuple<>,
|
41 |
+
OutDataType,
|
42 |
+
InElementOp,
|
43 |
+
WeiElementOp,
|
44 |
+
OutElementOp,
|
45 |
+
ConvSpec, // ConvForwardSpecialization
|
46 |
+
GemmSpec, // GemmSpecialization
|
47 |
+
1, //
|
48 |
+
256, // BlockSize
|
49 |
+
128, // MPerBlock
|
50 |
+
256, // NPerBlock
|
51 |
+
64, // KPerBlock
|
52 |
+
16, // AK1
|
53 |
+
16, // BK1
|
54 |
+
32, // MPerXdl
|
55 |
+
32, // NPerXdl
|
56 |
+
2, // MXdlPerWave
|
57 |
+
4, // NXdlPerWave
|
58 |
+
S<4, 64, 1>, // ABlockTransferThreadClusterLengths_AK0_M_AK1
|
59 |
+
S<1, 0, 2>, // ABlockTransferThreadClusterArrangeOrder
|
60 |
+
S<1, 0, 2>, // ABlockTransferSrcAccessOrder
|
61 |
+
2, // ABlockTransferSrcVectorDim
|
62 |
+
16, // ABlockTransferSrcScalarPerVector
|
63 |
+
16, // ABlockTransferDstScalarPerVector_AK1
|
64 |
+
1, // ABlockLdsExtraM
|
65 |
+
S<4, 64, 1>, // BBlockTransferThreadClusterLengths_BK0_N_BK1
|
66 |
+
S<1, 0, 2>, // BBlockTransferThreadClusterArrangeOrder
|
67 |
+
S<1, 0, 2>, // BBlockTransferSrcAccessOrder
|
68 |
+
2, // BBlockTransferSrcVectorDim
|
69 |
+
16, // BBlockTransferSrcScalarPerVector
|
70 |
+
16, // BBlockTransferDstScalarPerVector_BK1
|
71 |
+
1, // BBlockLdsExtraN
|
72 |
+
1,
|
73 |
+
1,
|
74 |
+
S<1, 64, 1, 4>,
|
75 |
+
16>;
|
76 |
+
|
77 |
+
#include "run_convnd_fwd_example.inc"
|
78 |
+
|
79 |
+
int main(int argc, char* argv[]) { return run_convnd_fwd_example(argc, argv) ? 0 : 1; }
|
sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/composable_kernels/example/15_grouped_gemm/grouped_gemm_multiple_d_splitk_xdl_fp16.cpp
ADDED
@@ -0,0 +1,394 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// SPDX-License-Identifier: MIT
|
2 |
+
// Copyright (c) 2018-2024, Advanced Micro Devices, Inc. All rights reserved.
|
3 |
+
|
4 |
+
#include <iostream>
|
5 |
+
#include <numeric>
|
6 |
+
#include <initializer_list>
|
7 |
+
#include <cstdlib>
|
8 |
+
|
9 |
+
#include "ck/ck.hpp"
|
10 |
+
#include "ck/tensor_operation/gpu/device/tensor_layout.hpp"
|
11 |
+
#include "ck/tensor_operation/gpu/device/gemm_specialization.hpp"
|
12 |
+
#include "ck/tensor_operation/gpu/device/impl/device_grouped_gemm_multiple_d_splitk_xdl_cshuffle_two_stage.hpp"
|
13 |
+
#include "ck/tensor_operation/gpu/device/device_grouped_gemm.hpp"
|
14 |
+
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"
|
15 |
+
|
16 |
+
#include <ck/utility/data_type.hpp>
|
17 |
+
#include <ck/utility/tuple.hpp>
|
18 |
+
|
19 |
+
#include "ck/library/utility/check_err.hpp"
|
20 |
+
#include "ck/library/utility/device_memory.hpp"
|
21 |
+
#include "ck/library/utility/host_tensor.hpp"
|
22 |
+
#include "ck/library/utility/host_tensor_generator.hpp"
|
23 |
+
#include "ck/library/utility/literals.hpp"
|
24 |
+
#include "ck/library/reference_tensor_operation/cpu/reference_gemm_multiple_d.hpp"
|
25 |
+
|
26 |
+
template <ck::index_t... Is>
|
27 |
+
using S = ck::Sequence<Is...>;
|
28 |
+
|
29 |
+
using F16 = ck::half_t;
|
30 |
+
using F32 = float;
|
31 |
+
|
32 |
+
using Row = ck::tensor_layout::gemm::RowMajor;
|
33 |
+
using Col = ck::tensor_layout::gemm::ColumnMajor;
|
34 |
+
|
35 |
+
using PassThrough = ck::tensor_operation::element_wise::PassThrough;
|
36 |
+
using AddAdd = ck::tensor_operation::element_wise::AddAdd;
|
37 |
+
|
38 |
+
using ADataType = F16;
|
39 |
+
using BDataType = F16;
|
40 |
+
using AccDataType = F32;
|
41 |
+
using CShuffleDataType = F32;
|
42 |
+
using DDataType = F16;
|
43 |
+
using DsDataType = ck::Tuple<DDataType, DDataType>;
|
44 |
+
using EDataType = F32;
|
45 |
+
|
46 |
+
using ALayout = Row;
|
47 |
+
using BLayout = Col;
|
48 |
+
using DLayout = Row;
|
49 |
+
using DsLayout = ck::Tuple<DLayout, DLayout>;
|
50 |
+
using ELayout = Row;
|
51 |
+
|
52 |
+
using AElementOp = PassThrough;
|
53 |
+
using BElementOp = PassThrough;
|
54 |
+
using CDEElementOp = AddAdd;
|
55 |
+
|
56 |
+
static constexpr auto GemmMNKPadding = ck::tensor_operation::device::GemmSpecialization::MNKPadding;
|
57 |
+
static constexpr int NumDMatrices = 2;
|
58 |
+
|
59 |
+
using DeviceGemmInstance =
|
60 |
+
ck::tensor_operation::device::DeviceGroupedGemmMultipleDSplitKXdlCShuffleTwoStage
|
61 |
+
// clang-format off
|
62 |
+
//######| ALayout| BLayout| DsLayout| ELayout| AData| BData| AccData| CShuffle| DsData| EData| A| B| CDE| GEMM| NumGemmK| Block| MPer| NPer| KPer| AK1| BK1| MPer| NPer| MXdl| NXdl| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockLds| BBlockTransfer| BBlockTransfer| BBlockTransfer| BlockTransfer| BBlockTransfer| BBlockTransfer| BBlockLds| CShuffle| CShuffle| CBlockTransferClusterLengths| CBlockTransfer|
|
63 |
+
//######| | | | | Type| Type| Type| DataType| Type| Type| Elementwise| Elementwise| Elementwise| Spacialization| Prefetch| Size| Block| Block| Block| | | XDL| XDL| Per| Per| ThreadCluster| ThreadCluster| SrcAccessOrder| SrcVectorDim| SrcScalar| DstScalar| AddExtraM| ThreadCluster| ThreadCluster| SrcAccessOrder| SrcVectorDim| SrcScalar| DstScalar| AddExtraN| MXdlPerWave| NXdlPerWave| _MBlock_MWaveMPerXdl| ScalarPerVector|
|
64 |
+
//######| | | | | | | | | | | Operation| Operation| Operation| | Stage| | | | | | | | | Wave| Wave| Lengths_K0_M_K1| ArrangeOrder| | | PerVector| PerVector_K1| | Lengths_K0_N_K1| ArrangeOrder| | | PerVector| PerVector_K1| | PerShuffle| PerShuffle| _NBlock_NWaveNPerXdl| _NWaveNPerXdl|
|
65 |
+
//######| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
|
66 |
+
< ALayout, BLayout, DsLayout, ELayout, ADataType, BDataType, AccDataType, CShuffleDataType, DsDataType, EDataType, AElementOp, BElementOp, CDEElementOp, GemmMNKPadding, 1, 256, 64, 128, 32, 8, 8, 32, 32, 1, 2, S<1, 4, 64, 1>, S<0, 2, 1, 3>, S<0, 2, 1, 3>, 3, 8, 8, 1, S<1, 4, 64, 1>, S<0, 2, 1, 3>, S<0, 2, 1, 3>, 3, 8, 8, 1, 1, 1, S<1, 32, 1, 8>, 4>;
|
67 |
+
// clang-format on
|
68 |
+
|
69 |
+
struct ProblemSize final
|
70 |
+
{
|
71 |
+
std::vector<ck::index_t> Ms;
|
72 |
+
std::vector<ck::index_t> Ns;
|
73 |
+
std::vector<ck::index_t> Ks;
|
74 |
+
|
75 |
+
std::vector<ck::index_t> stride_As;
|
76 |
+
std::vector<ck::index_t> stride_Bs;
|
77 |
+
std::vector<std::vector<ck::index_t>> stride_Ds;
|
78 |
+
std::vector<ck::index_t> stride_Cs;
|
79 |
+
|
80 |
+
ck::index_t group_count;
|
81 |
+
};
|
82 |
+
|
83 |
+
struct ExecutionConfig final
|
84 |
+
{
|
85 |
+
bool do_verification = true;
|
86 |
+
int init_method = 1;
|
87 |
+
int k_batch = 128;
|
88 |
+
bool time_kernel = true;
|
89 |
+
};
|
90 |
+
|
91 |
+
bool run_grouped_gemm(const ProblemSize& problem_size, const ExecutionConfig& config)
|
92 |
+
{
|
93 |
+
auto group_count = problem_size.group_count;
|
94 |
+
|
95 |
+
// GEMM shape
|
96 |
+
std::vector<ck::tensor_operation::device::GemmDesc> gemm_descs;
|
97 |
+
std::vector<void*> p_Cs;
|
98 |
+
std::vector<const void*> p_As;
|
99 |
+
std::vector<const void*> p_Bs;
|
100 |
+
std::vector<std::array<const void*, NumDMatrices>> p_Ds = {};
|
101 |
+
|
102 |
+
gemm_descs.reserve(group_count);
|
103 |
+
p_As.reserve(group_count);
|
104 |
+
p_Bs.reserve(group_count);
|
105 |
+
p_Ds.reserve(group_count);
|
106 |
+
|
107 |
+
auto f_host_tensor_descriptor =
|
108 |
+
[](std::size_t row, std::size_t col, std::size_t stride, auto layout) {
|
109 |
+
using namespace ck::literals;
|
110 |
+
|
111 |
+
if(std::is_same<decltype(layout), ck::tensor_layout::gemm::RowMajor>::value)
|
112 |
+
{
|
113 |
+
return HostTensorDescriptor({row, col}, {stride, 1_uz});
|
114 |
+
}
|
115 |
+
else
|
116 |
+
{
|
117 |
+
return HostTensorDescriptor({row, col}, {1_uz, stride});
|
118 |
+
}
|
119 |
+
};
|
120 |
+
|
121 |
+
std::vector<Tensor<ADataType>> a_tensors;
|
122 |
+
std::vector<Tensor<BDataType>> b_tensors;
|
123 |
+
std::vector<std::array<Tensor<DDataType>, NumDMatrices>> d_tensors;
|
124 |
+
std::vector<Tensor<EDataType>> c_host_tensors;
|
125 |
+
std::vector<Tensor<EDataType>> c_device_result_tensors;
|
126 |
+
|
127 |
+
a_tensors.reserve(group_count);
|
128 |
+
b_tensors.reserve(group_count);
|
129 |
+
d_tensors.reserve(group_count);
|
130 |
+
c_host_tensors.reserve(group_count);
|
131 |
+
c_device_result_tensors.reserve(group_count);
|
132 |
+
|
133 |
+
using DeviceMemPtr = std::unique_ptr<DeviceMem>;
|
134 |
+
|
135 |
+
std::vector<DeviceMemPtr> a_tensors_device, b_tensors_device, c_tensors_device;
|
136 |
+
std::vector<std::vector<DeviceMemPtr>> d_tensors_device;
|
137 |
+
|
138 |
+
a_tensors_device.reserve(group_count);
|
139 |
+
b_tensors_device.reserve(group_count);
|
140 |
+
d_tensors_device.reserve(group_count);
|
141 |
+
c_tensors_device.reserve(group_count);
|
142 |
+
|
143 |
+
std::size_t flop = 0, num_btype = 0;
|
144 |
+
|
145 |
+
for(int i = 0; i < group_count; i++)
|
146 |
+
{
|
147 |
+
a_tensors.push_back(Tensor<ADataType>(f_host_tensor_descriptor(
|
148 |
+
problem_size.Ms[i], problem_size.Ks[i], problem_size.stride_As[i], ALayout{})));
|
149 |
+
b_tensors.push_back(Tensor<BDataType>(f_host_tensor_descriptor(
|
150 |
+
problem_size.Ks[i], problem_size.Ns[i], problem_size.stride_Bs[i], BLayout{})));
|
151 |
+
|
152 |
+
auto d0_tensor = Tensor<DDataType>(f_host_tensor_descriptor(
|
153 |
+
problem_size.Ms[i], problem_size.Ns[i], problem_size.stride_Cs[i], DLayout{}));
|
154 |
+
auto d1_tensor = Tensor<DDataType>(f_host_tensor_descriptor(
|
155 |
+
problem_size.Ms[i], problem_size.Ns[i], problem_size.stride_Cs[i], DLayout{}));
|
156 |
+
|
157 |
+
std::array<Tensor<DDataType>, NumDMatrices> d_tens = {d0_tensor, d1_tensor};
|
158 |
+
d_tensors.push_back(d_tens);
|
159 |
+
c_host_tensors.push_back(Tensor<EDataType>(f_host_tensor_descriptor(
|
160 |
+
problem_size.Ms[i], problem_size.Ns[i], problem_size.stride_Cs[i], ELayout{})));
|
161 |
+
c_device_result_tensors.push_back(Tensor<EDataType>(f_host_tensor_descriptor(
|
162 |
+
problem_size.Ms[i], problem_size.Ns[i], problem_size.stride_Cs[i], ELayout{})));
|
163 |
+
std::cout << "gemm[" << i << "] a_m_k: " << a_tensors[i].mDesc
|
164 |
+
<< " b_k_n: " << b_tensors[i].mDesc
|
165 |
+
<< " c_m_n: " << c_device_result_tensors[i].mDesc << std::endl;
|
166 |
+
|
167 |
+
flop += std::size_t(2) * problem_size.Ms[i] * problem_size.Ks[i] * problem_size.Ns[i];
|
168 |
+
num_btype += sizeof(ADataType) * a_tensors[i].GetElementSize() +
|
169 |
+
sizeof(BDataType) * b_tensors[i].GetElementSize() +
|
170 |
+
sizeof(DDataType) * d_tensors[i][0].GetElementSize() * NumDMatrices +
|
171 |
+
sizeof(EDataType) * c_device_result_tensors[i].GetElementSize();
|
172 |
+
|
173 |
+
switch(config.init_method)
|
174 |
+
{
|
175 |
+
case 0: break;
|
176 |
+
case 1:
|
177 |
+
a_tensors[i].GenerateTensorValue(GeneratorTensor_2<ADataType>{-5, 5});
|
178 |
+
b_tensors[i].GenerateTensorValue(GeneratorTensor_2<BDataType>{-5, 5});
|
179 |
+
for(int j = 0; j < NumDMatrices; ++j)
|
180 |
+
{
|
181 |
+
d_tensors[i][j].GenerateTensorValue(GeneratorTensor_2<DDataType>{-5, 5});
|
182 |
+
}
|
183 |
+
break;
|
184 |
+
case 2:
|
185 |
+
a_tensors[i].GenerateTensorValue(GeneratorTensor_3<ADataType>{0.0, 1.0});
|
186 |
+
b_tensors[i].GenerateTensorValue(GeneratorTensor_3<BDataType>{-0.5, 0.5});
|
187 |
+
for(int j = 0; j < NumDMatrices; ++j)
|
188 |
+
{
|
189 |
+
d_tensors[i][j].GenerateTensorValue(GeneratorTensor_3<ADataType>{0.0, 1.0});
|
190 |
+
}
|
191 |
+
break;
|
192 |
+
default:
|
193 |
+
a_tensors[i].GenerateTensorValue(GeneratorTensor_Sequential<0>{});
|
194 |
+
b_tensors[i].GenerateTensorValue(GeneratorTensor_Sequential<1>{});
|
195 |
+
for(int j = 0; j < NumDMatrices; ++j)
|
196 |
+
{
|
197 |
+
d_tensors[i][j].GenerateTensorValue(GeneratorTensor_Sequential<0>{});
|
198 |
+
}
|
199 |
+
}
|
200 |
+
}
|
201 |
+
|
202 |
+
for(int i = 0; i < group_count; i++)
|
203 |
+
{
|
204 |
+
a_tensors_device.emplace_back(
|
205 |
+
std::make_unique<DeviceMem>(a_tensors[i].GetElementSpaceSize() * sizeof(ADataType)));
|
206 |
+
|
207 |
+
b_tensors_device.emplace_back(
|
208 |
+
std::make_unique<DeviceMem>(b_tensors[i].GetElementSpaceSize() * sizeof(BDataType)));
|
209 |
+
|
210 |
+
c_tensors_device.emplace_back(std::make_unique<DeviceMem>(
|
211 |
+
c_device_result_tensors[i].GetElementSpaceSize() * sizeof(EDataType)));
|
212 |
+
|
213 |
+
for(int j = 0; j < NumDMatrices; ++j)
|
214 |
+
{
|
215 |
+
d_tensors_device[i].emplace_back(std::make_unique<DeviceMem>(
|
216 |
+
d_tensors[i][j].GetElementSpaceSize() * sizeof(DDataType)));
|
217 |
+
}
|
218 |
+
|
219 |
+
a_tensors_device[i]->ToDevice(a_tensors[i].mData.data());
|
220 |
+
b_tensors_device[i]->ToDevice(b_tensors[i].mData.data());
|
221 |
+
for(int j = 0; j < NumDMatrices; ++j)
|
222 |
+
{
|
223 |
+
d_tensors_device[i][j]->ToDevice(d_tensors[i][j].mData.data());
|
224 |
+
}
|
225 |
+
c_tensors_device[i]->SetZero();
|
226 |
+
p_As.push_back(a_tensors_device[i]->GetDeviceBuffer());
|
227 |
+
p_Bs.push_back(b_tensors_device[i]->GetDeviceBuffer());
|
228 |
+
p_Ds.push_back(
|
229 |
+
{d_tensors_device[i][0]->GetDeviceBuffer(), d_tensors_device[i][1]->GetDeviceBuffer()});
|
230 |
+
p_Cs.push_back(c_tensors_device[i]->GetDeviceBuffer());
|
231 |
+
gemm_descs.push_back({problem_size.Ms[i],
|
232 |
+
problem_size.Ns[i],
|
233 |
+
problem_size.Ks[i],
|
234 |
+
problem_size.stride_As[i],
|
235 |
+
problem_size.stride_Bs[i],
|
236 |
+
problem_size.stride_Cs[i],
|
237 |
+
problem_size.stride_Ds[i]});
|
238 |
+
}
|
239 |
+
auto a_element_op = AElementOp{};
|
240 |
+
auto b_element_op = BElementOp{};
|
241 |
+
auto cde_element_op = CDEElementOp{};
|
242 |
+
|
243 |
+
auto gemm = DeviceGemmInstance{};
|
244 |
+
auto invoker = gemm.MakeInvoker();
|
245 |
+
|
246 |
+
// do GEMM
|
247 |
+
auto argument = gemm.MakeArgument(
|
248 |
+
p_As, p_Bs, p_Ds, p_Cs, gemm_descs, a_element_op, b_element_op, cde_element_op);
|
249 |
+
gemm.SetKBatchSize(argument, config.k_batch);
|
250 |
+
if(!gemm.IsSupportedArgument(argument))
|
251 |
+
{
|
252 |
+
throw std::runtime_error(
|
253 |
+
"wrong! device_gemm with the specified compilation parameters does "
|
254 |
+
"not support this GEMM problem");
|
255 |
+
}
|
256 |
+
DeviceMem gemm_workspace_dev(gemm.GetWorkSpaceSize(&argument));
|
257 |
+
gemm.SetWorkSpacePointer(&argument, gemm_workspace_dev.GetDeviceBuffer());
|
258 |
+
|
259 |
+
DeviceMem gemm_arg_dev_mem(gemm.GetDeviceKernelArgSize(&argument));
|
260 |
+
gemm.SetDeviceKernelArgs(argument, gemm_arg_dev_mem.GetDeviceBuffer());
|
261 |
+
|
262 |
+
invoker.Run(argument, StreamConfig{nullptr, false, 1});
|
263 |
+
|
264 |
+
if(config.time_kernel)
|
265 |
+
{
|
266 |
+
float ave_time = invoker.Run(argument, StreamConfig{nullptr, config.time_kernel});
|
267 |
+
float tflops = static_cast<float>(flop) / 1.E9 / ave_time;
|
268 |
+
float gb_per_sec = num_btype / 1.E6 / ave_time;
|
269 |
+
|
270 |
+
std::cout << "Perf: " << ave_time << " ms, " << tflops << " TFlops, " << gb_per_sec
|
271 |
+
<< " GB/s, " << gemm.GetTypeString() << std::endl;
|
272 |
+
}
|
273 |
+
|
274 |
+
bool pass = true;
|
275 |
+
if(config.do_verification)
|
276 |
+
{
|
277 |
+
using ReferenceGemmInstance =
|
278 |
+
ck::tensor_operation::host::ReferenceGemmMultipleD<ADataType,
|
279 |
+
BDataType,
|
280 |
+
DsDataType,
|
281 |
+
EDataType,
|
282 |
+
AccDataType,
|
283 |
+
AElementOp,
|
284 |
+
BElementOp,
|
285 |
+
CDEElementOp>;
|
286 |
+
|
287 |
+
for(std::size_t i = 0; i < gemm_descs.size(); i++)
|
288 |
+
{
|
289 |
+
auto karg = argument.gemm_kernel_args_[i].karg_;
|
290 |
+
auto dev_res_tensor =
|
291 |
+
Tensor<float>(f_host_tensor_descriptor(karg.M, karg.N, karg.StrideC, ELayout{}));
|
292 |
+
|
293 |
+
c_tensors_device[i]->FromDevice(c_device_result_tensors[i].mData.data(),
|
294 |
+
c_device_result_tensors[i].mDesc.GetElementSize() *
|
295 |
+
sizeof(EDataType));
|
296 |
+
auto ref_gemm = ReferenceGemmInstance{};
|
297 |
+
auto ref_invoker = ref_gemm.MakeInvoker();
|
298 |
+
|
299 |
+
auto ref_argument = ref_gemm.MakeArgument(a_tensors[i],
|
300 |
+
b_tensors[i],
|
301 |
+
d_tensors[i],
|
302 |
+
c_host_tensors[i],
|
303 |
+
a_element_op,
|
304 |
+
b_element_op,
|
305 |
+
cde_element_op);
|
306 |
+
|
307 |
+
ref_invoker.Run(ref_argument);
|
308 |
+
pass &= ck::utils::check_err(c_device_result_tensors[i], c_host_tensors[i]);
|
309 |
+
}
|
310 |
+
|
311 |
+
std::cout << "Verification: " << (pass ? "SUCCESS" : "FAILURE") << "!" << std::endl;
|
312 |
+
}
|
313 |
+
|
314 |
+
return pass;
|
315 |
+
}
|
316 |
+
|
317 |
+
std::vector<int> argToIntArray(char* input)
|
318 |
+
{
|
319 |
+
std::vector<int> out;
|
320 |
+
|
321 |
+
std::istringstream in(input);
|
322 |
+
|
323 |
+
std::string item;
|
324 |
+
|
325 |
+
while(std::getline(in, item, ','))
|
326 |
+
{
|
327 |
+
out.push_back(std::stoi(item));
|
328 |
+
}
|
329 |
+
|
330 |
+
return out;
|
331 |
+
}
|
332 |
+
|
333 |
+
int main(int argc, char* argv[])
|
334 |
+
{
|
335 |
+
ProblemSize problem_size;
|
336 |
+
ExecutionConfig config;
|
337 |
+
|
338 |
+
if(argc < 11)
|
339 |
+
{
|
340 |
+
std::vector<ck::index_t> Ms{64, 127, 255, 129, 260, 190, 77};
|
341 |
+
problem_size.group_count = Ms.size();
|
342 |
+
|
343 |
+
for(int i = 0; i < problem_size.group_count; i++)
|
344 |
+
{
|
345 |
+
problem_size.Ms.push_back(Ms[i]);
|
346 |
+
problem_size.Ns.push_back(252);
|
347 |
+
problem_size.Ks.push_back(4608);
|
348 |
+
|
349 |
+
problem_size.stride_As.push_back(problem_size.Ks[i]);
|
350 |
+
problem_size.stride_Bs.push_back(problem_size.Ks[i]);
|
351 |
+
problem_size.stride_Cs.push_back(problem_size.Ns[i]);
|
352 |
+
|
353 |
+
problem_size.stride_Ds.push_back({});
|
354 |
+
for(int j = 0; j < NumDMatrices; ++j)
|
355 |
+
{
|
356 |
+
problem_size.stride_Ds[i].push_back(problem_size.Ns[i]);
|
357 |
+
}
|
358 |
+
}
|
359 |
+
|
360 |
+
std::cout
|
361 |
+
<< "Usage:\n"
|
362 |
+
<< "arg1: verification (0=no, 1=yes)\n"
|
363 |
+
<< "arg2: initialization (0=no init, 1=integer value, 2=decimal value)\n"
|
364 |
+
<< "arg3: time kernel (0=n0, 1=yes)\n"
|
365 |
+
<< "arg4 to 9: Ms, Ns, Ks, StrideAs, StrideBs, StrideCs (e.g., 256,256 128,128 64,64 "
|
366 |
+
"64,64 64,64 128,128)\n"
|
367 |
+
<< "arg10: k_batch (> 0)\n"
|
368 |
+
<< "... setting default values." << std::endl;
|
369 |
+
}
|
370 |
+
else
|
371 |
+
{
|
372 |
+
config.do_verification = std::stoi(argv[1]);
|
373 |
+
config.init_method = std::stoi(argv[2]);
|
374 |
+
config.time_kernel = std::stoi(argv[3]);
|
375 |
+
config.k_batch = std::stoi(argv[10]);
|
376 |
+
|
377 |
+
problem_size.Ms = argToIntArray(argv[4]);
|
378 |
+
problem_size.Ns = argToIntArray(argv[5]);
|
379 |
+
problem_size.Ks = argToIntArray(argv[6]);
|
380 |
+
|
381 |
+
problem_size.stride_As = argToIntArray(argv[7]);
|
382 |
+
problem_size.stride_Bs = argToIntArray(argv[8]);
|
383 |
+
problem_size.stride_Cs = argToIntArray(argv[9]);
|
384 |
+
|
385 |
+
for(int j = 0; j < NumDMatrices; ++j)
|
386 |
+
{
|
387 |
+
problem_size.stride_Ds.push_back(problem_size.stride_Cs);
|
388 |
+
}
|
389 |
+
|
390 |
+
problem_size.group_count = problem_size.Ms.size();
|
391 |
+
}
|
392 |
+
|
393 |
+
return !run_grouped_gemm(problem_size, config);
|
394 |
+
}
|
sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/composable_kernels/example/17_convnd_bwd_data/convnd_bwd_data_common.hpp
ADDED
@@ -0,0 +1,153 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// SPDX-License-Identifier: MIT
|
2 |
+
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
|
3 |
+
|
4 |
+
#include <iostream>
|
5 |
+
#include <numeric>
|
6 |
+
#include <initializer_list>
|
7 |
+
#include <cstdlib>
|
8 |
+
|
9 |
+
#include "ck/ck.hpp"
|
10 |
+
#include "ck/tensor_operation/gpu/device/tensor_layout.hpp"
|
11 |
+
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"
|
12 |
+
|
13 |
+
#include "ck/library/utility/check_err.hpp"
|
14 |
+
#include "ck/library/utility/device_memory.hpp"
|
15 |
+
#include "ck/library/utility/host_tensor.hpp"
|
16 |
+
#include "ck/library/utility/host_tensor_generator.hpp"
|
17 |
+
#include "ck/library/utility/convolution_parameter.hpp"
|
18 |
+
#include "ck/library/utility/convolution_host_tensor_descriptor_helper.hpp"
|
19 |
+
#include "ck/library/reference_tensor_operation/cpu/reference_conv_bwd_data.hpp"
|
20 |
+
|
21 |
+
void print_helper_msg()
|
22 |
+
{
|
23 |
+
std::cout << "arg1: verification (0=no, 1=yes)\n"
|
24 |
+
<< "arg2: initialization (0=no init, 1=integer value, 2=decimal value)\n"
|
25 |
+
<< "arg3: time kernel (0=no, 1=yes)\n"
|
26 |
+
<< ck::utils::conv::get_conv_param_parser_helper_msg() << std::endl;
|
27 |
+
}
|
28 |
+
|
29 |
+
template <ck::index_t NDimSpatial,
|
30 |
+
typename InDataType,
|
31 |
+
typename WeiDataType,
|
32 |
+
typename OutDataType,
|
33 |
+
typename InElementOp,
|
34 |
+
typename WeiElementOp,
|
35 |
+
typename OutElementOp,
|
36 |
+
typename DeviceConvNdBwdDataInstance>
|
37 |
+
int run_conv_bwd_data(bool do_verification,
|
38 |
+
int init_method,
|
39 |
+
bool time_kernel,
|
40 |
+
const ck::utils::conv::ConvParam& conv_param,
|
41 |
+
const HostTensorDescriptor& in_g_n_c_wis_desc,
|
42 |
+
const HostTensorDescriptor& wei_g_k_c_xs_desc,
|
43 |
+
const HostTensorDescriptor& out_g_n_k_wos_desc,
|
44 |
+
const InElementOp& in_element_op,
|
45 |
+
const WeiElementOp& wei_element_op,
|
46 |
+
const OutElementOp& out_element_op)
|
47 |
+
{
|
48 |
+
Tensor<InDataType> in_host(in_g_n_c_wis_desc);
|
49 |
+
Tensor<InDataType> in_device(in_g_n_c_wis_desc);
|
50 |
+
Tensor<WeiDataType> wei(wei_g_k_c_xs_desc);
|
51 |
+
Tensor<OutDataType> out(out_g_n_k_wos_desc);
|
52 |
+
|
53 |
+
std::cout << "in: " << in_host.mDesc << std::endl;
|
54 |
+
std::cout << "wei: " << wei.mDesc << std::endl;
|
55 |
+
std::cout << "out: " << out.mDesc << std::endl;
|
56 |
+
|
57 |
+
switch(init_method)
|
58 |
+
{
|
59 |
+
case 0: break;
|
60 |
+
case 1:
|
61 |
+
out.GenerateTensorValue(GeneratorTensor_2<OutDataType>{-5, 5});
|
62 |
+
wei.GenerateTensorValue(GeneratorTensor_2<WeiDataType>{-5, 5});
|
63 |
+
break;
|
64 |
+
case 2:
|
65 |
+
out.GenerateTensorValue(GeneratorTensor_3<OutDataType>{0.0, 1.0});
|
66 |
+
wei.GenerateTensorValue(GeneratorTensor_3<WeiDataType>{-0.5, 0.5});
|
67 |
+
break;
|
68 |
+
default:
|
69 |
+
out.GenerateTensorValue(GeneratorTensor_1<OutDataType>{1});
|
70 |
+
wei.GenerateTensorValue(GeneratorTensor_1<WeiDataType>{1});
|
71 |
+
}
|
72 |
+
|
73 |
+
DeviceMem in_device_buf(sizeof(InDataType) * in_device.mDesc.GetElementSpaceSize());
|
74 |
+
DeviceMem wei_device_buf(sizeof(WeiDataType) * wei.mDesc.GetElementSpaceSize());
|
75 |
+
DeviceMem out_device_buf(sizeof(OutDataType) * out.mDesc.GetElementSpaceSize());
|
76 |
+
|
77 |
+
out_device_buf.ToDevice(out.mData.data());
|
78 |
+
wei_device_buf.ToDevice(wei.mData.data());
|
79 |
+
|
80 |
+
// reset input to zero
|
81 |
+
in_device_buf.SetZero();
|
82 |
+
|
83 |
+
// do GEMM
|
84 |
+
auto conv = DeviceConvNdBwdDataInstance{};
|
85 |
+
auto invoker = conv.MakeInvoker();
|
86 |
+
auto argument =
|
87 |
+
conv.MakeArgumentPointer(static_cast<InDataType*>(in_device_buf.GetDeviceBuffer()),
|
88 |
+
static_cast<WeiDataType*>(wei_device_buf.GetDeviceBuffer()),
|
89 |
+
static_cast<OutDataType*>(out_device_buf.GetDeviceBuffer()),
|
90 |
+
conv_param.N_,
|
91 |
+
conv_param.K_,
|
92 |
+
conv_param.C_,
|
93 |
+
conv_param.input_spatial_lengths_,
|
94 |
+
conv_param.filter_spatial_lengths_,
|
95 |
+
conv_param.GetOutputSpatialLengths(),
|
96 |
+
conv_param.conv_filter_strides_,
|
97 |
+
conv_param.conv_filter_dilations_,
|
98 |
+
conv_param.input_left_pads_,
|
99 |
+
conv_param.input_right_pads_,
|
100 |
+
in_element_op,
|
101 |
+
wei_element_op,
|
102 |
+
out_element_op);
|
103 |
+
|
104 |
+
if(!conv.IsSupportedArgument(argument.get()))
|
105 |
+
{
|
106 |
+
std::cout << "Not support,please check parameters or device";
|
107 |
+
return 0;
|
108 |
+
}
|
109 |
+
|
110 |
+
float ave_time = invoker.Run(argument.get(), StreamConfig{nullptr, time_kernel});
|
111 |
+
|
112 |
+
std::size_t flop = conv_param.GetFlops();
|
113 |
+
std::size_t num_btype = conv_param.GetByte<InDataType, WeiDataType, OutDataType>();
|
114 |
+
|
115 |
+
float tflops = static_cast<float>(flop) / 1.E9 / ave_time;
|
116 |
+
|
117 |
+
float gb_per_sec = num_btype / 1.E6 / ave_time;
|
118 |
+
|
119 |
+
std::cout << "Perf: " << ave_time << " ms, " << tflops << " TFlops, " << gb_per_sec << " GB/s"
|
120 |
+
<< std::endl;
|
121 |
+
|
122 |
+
if(do_verification)
|
123 |
+
{
|
124 |
+
auto ref_conv = ck::tensor_operation::host::ReferenceConvBwdData<NDimSpatial,
|
125 |
+
InDataType,
|
126 |
+
WeiDataType,
|
127 |
+
OutDataType,
|
128 |
+
InElementOp,
|
129 |
+
WeiElementOp,
|
130 |
+
OutElementOp>();
|
131 |
+
|
132 |
+
auto ref_invoker = ref_conv.MakeInvoker();
|
133 |
+
|
134 |
+
auto ref_argument = ref_conv.MakeArgument(in_host,
|
135 |
+
wei,
|
136 |
+
out,
|
137 |
+
conv_param.conv_filter_strides_,
|
138 |
+
conv_param.conv_filter_dilations_,
|
139 |
+
conv_param.input_left_pads_,
|
140 |
+
conv_param.input_right_pads_,
|
141 |
+
in_element_op,
|
142 |
+
wei_element_op,
|
143 |
+
out_element_op);
|
144 |
+
|
145 |
+
ref_invoker.Run(ref_argument);
|
146 |
+
|
147 |
+
in_device_buf.FromDevice(in_device.mData.data());
|
148 |
+
|
149 |
+
return ck::utils::check_err(in_device, in_host) ? 0 : 1;
|
150 |
+
}
|
151 |
+
|
152 |
+
return 0;
|
153 |
+
}
|
sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/composable_kernels/example/22_cgemm/cgemm_xdl_common.hpp
ADDED
@@ -0,0 +1,254 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// SPDX-License-Identifier: MIT
|
2 |
+
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
|
3 |
+
|
4 |
+
#include <numeric>
|
5 |
+
#include <initializer_list>
|
6 |
+
#include <cstdlib>
|
7 |
+
|
8 |
+
#include "ck/ck.hpp"
|
9 |
+
#include "ck/stream_config.hpp"
|
10 |
+
#include "ck/library/utility/check_err.hpp"
|
11 |
+
#include "ck/library/utility/device_memory.hpp"
|
12 |
+
#include "ck/library/utility/host_tensor.hpp"
|
13 |
+
#include "ck/library/utility/host_tensor_generator.hpp"
|
14 |
+
#include "ck/library/utility/literals.hpp"
|
15 |
+
#include "ck/tensor_operation/gpu/device/tensor_layout.hpp"
|
16 |
+
|
17 |
+
template <ck::index_t... Is>
|
18 |
+
using S = ck::Sequence<Is...>;
|
19 |
+
|
20 |
+
using F16 = ck::half_t;
|
21 |
+
using F32 = float;
|
22 |
+
using BF16 = ck::bhalf_t;
|
23 |
+
using INT8 = std::int8_t;
|
24 |
+
using INT32 = std::int32_t;
|
25 |
+
#ifdef CK_EXPERIMENTAL_BIT_INT_EXTENSION_INT4
|
26 |
+
using INT4 = ck::int4_t;
|
27 |
+
#endif
|
28 |
+
|
29 |
+
template <typename ADataType,
|
30 |
+
typename BDataType,
|
31 |
+
typename CDataType,
|
32 |
+
typename ALayout,
|
33 |
+
typename BLayout,
|
34 |
+
typename CLayout,
|
35 |
+
typename AElementwiseOperation,
|
36 |
+
typename BElementwiseOperation,
|
37 |
+
typename CElementwiseOperation,
|
38 |
+
typename DeviceCGemmInstance,
|
39 |
+
typename ReferenceCGemmInstance,
|
40 |
+
typename KernelADataType = ADataType,
|
41 |
+
typename KernelBDataType = BDataType,
|
42 |
+
typename KernelCDataType = CDataType>
|
43 |
+
bool run_cgemm_xdl(ck::index_t M,
|
44 |
+
ck::index_t N,
|
45 |
+
ck::index_t K,
|
46 |
+
ck::index_t StrideA,
|
47 |
+
ck::index_t StrideB,
|
48 |
+
ck::index_t StrideC,
|
49 |
+
bool do_verification,
|
50 |
+
int init_method,
|
51 |
+
bool time_kernel)
|
52 |
+
{
|
53 |
+
#ifdef CK_EXPERIMENTAL_BIT_INT_EXTENSION_INT4
|
54 |
+
static_assert(sizeof(ck::int4_t) == sizeof(int8_t),
|
55 |
+
"sizeof ck::int4_t and int8_t is different!");
|
56 |
+
static_assert(sizeof(ADataType) == sizeof(KernelADataType),
|
57 |
+
"sizeof ADataType and KernelADataType is different!");
|
58 |
+
static_assert(sizeof(BDataType) == sizeof(KernelBDataType),
|
59 |
+
"sizeof BDataType and KernelBDataType is different!");
|
60 |
+
static_assert(sizeof(CDataType) == sizeof(KernelCDataType),
|
61 |
+
"sizeof CDataType and KernelCDataType is different!");
|
62 |
+
#endif
|
63 |
+
|
64 |
+
auto f_host_tensor_descriptor =
|
65 |
+
[](std::size_t row, std::size_t col, std::size_t stride, auto layout) {
|
66 |
+
using namespace ck::literals;
|
67 |
+
|
68 |
+
if(std::is_same<decltype(layout), ck::tensor_layout::gemm::RowMajor>::value)
|
69 |
+
{
|
70 |
+
return HostTensorDescriptor({row, col}, {stride, 1_uz});
|
71 |
+
}
|
72 |
+
else
|
73 |
+
{
|
74 |
+
return HostTensorDescriptor({row, col}, {1_uz, stride});
|
75 |
+
}
|
76 |
+
};
|
77 |
+
|
78 |
+
Tensor<ADataType> a_m_k_real(f_host_tensor_descriptor(M, K, StrideA, ALayout{}));
|
79 |
+
Tensor<ADataType> a_m_k_imag(f_host_tensor_descriptor(M, K, StrideA, ALayout{}));
|
80 |
+
Tensor<BDataType> b_k_n_real(f_host_tensor_descriptor(K, N, StrideB, BLayout{}));
|
81 |
+
Tensor<BDataType> b_k_n_imag(f_host_tensor_descriptor(K, N, StrideB, BLayout{}));
|
82 |
+
Tensor<KernelCDataType> c_m_n_real_device_result(
|
83 |
+
f_host_tensor_descriptor(M, N, StrideC, CLayout{}));
|
84 |
+
Tensor<KernelCDataType> c_m_n_imag_device_result(
|
85 |
+
f_host_tensor_descriptor(M, N, StrideC, CLayout{}));
|
86 |
+
|
87 |
+
std::cout << "a_m_k_real: " << a_m_k_real.mDesc << std::endl;
|
88 |
+
std::cout << "a_m_k_imag: " << a_m_k_imag.mDesc << std::endl;
|
89 |
+
std::cout << "b_k_n_real: " << b_k_n_real.mDesc << std::endl;
|
90 |
+
std::cout << "b_k_n_imag: " << b_k_n_imag.mDesc << std::endl;
|
91 |
+
std::cout << "c_m_n_real: " << c_m_n_real_device_result.mDesc << std::endl;
|
92 |
+
std::cout << "c_m_n_imag: " << c_m_n_imag_device_result.mDesc << std::endl;
|
93 |
+
|
94 |
+
switch(init_method)
|
95 |
+
{
|
96 |
+
case 0: break;
|
97 |
+
case 1:
|
98 |
+
a_m_k_real.GenerateTensorValue(GeneratorTensor_2<ADataType>{-2, 2});
|
99 |
+
a_m_k_imag.GenerateTensorValue(GeneratorTensor_2<ADataType>{-2, 2});
|
100 |
+
b_k_n_real.GenerateTensorValue(GeneratorTensor_2<BDataType>{-2, 2});
|
101 |
+
b_k_n_imag.GenerateTensorValue(GeneratorTensor_2<BDataType>{-2, 2});
|
102 |
+
break;
|
103 |
+
default:
|
104 |
+
a_m_k_real.GenerateTensorValue(GeneratorTensor_3<ADataType>{-0.5, 0.5});
|
105 |
+
a_m_k_imag.GenerateTensorValue(GeneratorTensor_3<ADataType>{-0.5, 0.5});
|
106 |
+
b_k_n_real.GenerateTensorValue(GeneratorTensor_3<BDataType>{-0.5, 0.5});
|
107 |
+
b_k_n_imag.GenerateTensorValue(GeneratorTensor_3<BDataType>{-0.5, 0.5});
|
108 |
+
}
|
109 |
+
|
110 |
+
auto cgemm = DeviceCGemmInstance{};
|
111 |
+
|
112 |
+
DeviceMem a_m_k_real_device_buf(sizeof(KernelADataType) *
|
113 |
+
a_m_k_real.mDesc.GetElementSpaceSize());
|
114 |
+
DeviceMem a_m_k_imag_device_buf(sizeof(KernelADataType) *
|
115 |
+
a_m_k_imag.mDesc.GetElementSpaceSize());
|
116 |
+
DeviceMem b_k_n_real_device_buf(sizeof(KernelBDataType) *
|
117 |
+
b_k_n_real.mDesc.GetElementSpaceSize());
|
118 |
+
DeviceMem b_k_n_imag_device_buf(sizeof(KernelBDataType) *
|
119 |
+
b_k_n_imag.mDesc.GetElementSpaceSize());
|
120 |
+
DeviceMem c_m_n_real_device_buf(sizeof(KernelCDataType) *
|
121 |
+
c_m_n_real_device_result.mDesc.GetElementSpaceSize());
|
122 |
+
DeviceMem c_m_n_imag_device_buf(sizeof(KernelCDataType) *
|
123 |
+
c_m_n_imag_device_result.mDesc.GetElementSpaceSize());
|
124 |
+
DeviceMem workspace_device_buf(cgemm.GetWorkspaceSize(M, N, K, StrideA, StrideB, StrideC));
|
125 |
+
|
126 |
+
#ifdef CK_EXPERIMENTAL_BIT_INT_EXTENSION_INT4
|
127 |
+
if constexpr(std::is_same_v<ADataType, ck::int4_t>)
|
128 |
+
{
|
129 |
+
Tensor<KernelADataType> a_m_k_real_converted(a_m_k_real);
|
130 |
+
Tensor<KernelADataType> a_m_k_imag_converted(a_m_k_imag);
|
131 |
+
Tensor<KernelBDataType> b_k_n_real_converted(b_k_n_real);
|
132 |
+
Tensor<KernelBDataType> b_k_n_imag_converted(b_k_n_imag);
|
133 |
+
|
134 |
+
a_m_k_real_device_buf.ToDevice(a_m_k_real_converted.mData.data());
|
135 |
+
a_m_k_imag_device_buf.ToDevice(a_m_k_imag_converted.mData.data());
|
136 |
+
b_k_n_real_device_buf.ToDevice(b_k_n_real_converted.mData.data());
|
137 |
+
b_k_n_imag_device_buf.ToDevice(b_k_n_imag_converted.mData.data());
|
138 |
+
}
|
139 |
+
else
|
140 |
+
#endif // CK_EXPERIMENTAL_BIT_INT_EXTENSION_INT4
|
141 |
+
{
|
142 |
+
a_m_k_real_device_buf.ToDevice(a_m_k_real.mData.data());
|
143 |
+
a_m_k_imag_device_buf.ToDevice(a_m_k_imag.mData.data());
|
144 |
+
b_k_n_real_device_buf.ToDevice(b_k_n_real.mData.data());
|
145 |
+
b_k_n_imag_device_buf.ToDevice(b_k_n_imag.mData.data());
|
146 |
+
}
|
147 |
+
|
148 |
+
auto a_element_op = AElementwiseOperation{};
|
149 |
+
auto b_element_op = BElementwiseOperation{};
|
150 |
+
auto c_element_op = CElementwiseOperation{};
|
151 |
+
|
152 |
+
// do GEMM
|
153 |
+
auto invoker = cgemm.MakeInvoker();
|
154 |
+
auto argument =
|
155 |
+
cgemm.MakeArgument(static_cast<KernelADataType*>(a_m_k_real_device_buf.GetDeviceBuffer()),
|
156 |
+
static_cast<KernelADataType*>(a_m_k_imag_device_buf.GetDeviceBuffer()),
|
157 |
+
static_cast<KernelBDataType*>(b_k_n_real_device_buf.GetDeviceBuffer()),
|
158 |
+
static_cast<KernelBDataType*>(b_k_n_imag_device_buf.GetDeviceBuffer()),
|
159 |
+
static_cast<KernelCDataType*>(c_m_n_real_device_buf.GetDeviceBuffer()),
|
160 |
+
static_cast<KernelCDataType*>(c_m_n_imag_device_buf.GetDeviceBuffer()),
|
161 |
+
static_cast<KernelCDataType*>(workspace_device_buf.GetDeviceBuffer()),
|
162 |
+
M,
|
163 |
+
N,
|
164 |
+
K,
|
165 |
+
StrideA,
|
166 |
+
StrideB,
|
167 |
+
StrideC,
|
168 |
+
a_element_op,
|
169 |
+
b_element_op,
|
170 |
+
c_element_op);
|
171 |
+
|
172 |
+
if(!cgemm.IsSupportedArgument(argument))
|
173 |
+
{
|
174 |
+
throw std::runtime_error(
|
175 |
+
"wrong! device_cgemm with the specified compilation parameters does "
|
176 |
+
"not support this CGEMM problem");
|
177 |
+
}
|
178 |
+
|
179 |
+
float ave_time = invoker.Run(argument, StreamConfig{nullptr, time_kernel});
|
180 |
+
|
181 |
+
std::size_t flop = std::size_t(8) * M * N * K;
|
182 |
+
std::size_t num_btype =
|
183 |
+
std::size_t(2) *
|
184 |
+
(sizeof(ADataType) * M * K + sizeof(BDataType) * K * N + sizeof(CDataType) * M * N);
|
185 |
+
|
186 |
+
float tflops = static_cast<float>(flop) / 1.E9 / ave_time;
|
187 |
+
float gb_per_sec = num_btype / 1.E6 / ave_time;
|
188 |
+
|
189 |
+
std::cout << "Perf: " << ave_time << " ms, " << tflops << " TFlops, " << gb_per_sec << " GB/s, "
|
190 |
+
<< cgemm.GetTypeString() << std::endl;
|
191 |
+
|
192 |
+
if(do_verification)
|
193 |
+
{
|
194 |
+
Tensor<CDataType> c_m_n_real_host_result(
|
195 |
+
f_host_tensor_descriptor(M, N, StrideC, CLayout{}));
|
196 |
+
Tensor<CDataType> c_m_n_imag_host_result(
|
197 |
+
f_host_tensor_descriptor(M, N, StrideC, CLayout{}));
|
198 |
+
|
199 |
+
auto ref_cgemm = ReferenceCGemmInstance{};
|
200 |
+
auto ref_invoker = ref_cgemm.MakeInvoker();
|
201 |
+
auto ref_argument = ref_cgemm.MakeArgument(a_m_k_real,
|
202 |
+
a_m_k_imag,
|
203 |
+
b_k_n_real,
|
204 |
+
b_k_n_imag,
|
205 |
+
c_m_n_real_host_result,
|
206 |
+
c_m_n_imag_host_result,
|
207 |
+
a_element_op,
|
208 |
+
b_element_op,
|
209 |
+
c_element_op);
|
210 |
+
|
211 |
+
ref_invoker.Run(ref_argument);
|
212 |
+
|
213 |
+
c_m_n_real_device_buf.FromDevice(c_m_n_real_device_result.mData.data());
|
214 |
+
c_m_n_imag_device_buf.FromDevice(c_m_n_imag_device_result.mData.data());
|
215 |
+
|
216 |
+
bool result = true;
|
217 |
+
#ifdef CK_EXPERIMENTAL_BIT_INT_EXTENSION_INT4
|
218 |
+
if constexpr(std::is_same_v<ADataType, ck::int4_t>)
|
219 |
+
{
|
220 |
+
const Tensor<CDataType> c_m_n_real_device_result_converted(c_m_n_real_device_result);
|
221 |
+
const Tensor<CDataType> c_m_n_imag_device_result_converted(c_m_n_imag_device_result);
|
222 |
+
|
223 |
+
result = ck::utils::check_err(c_m_n_real_device_result_converted,
|
224 |
+
c_m_n_real_host_result,
|
225 |
+
"Verification error: incorrect results in real part!",
|
226 |
+
1e-2f,
|
227 |
+
1e-1f);
|
228 |
+
result = result && ck::utils::check_err(
|
229 |
+
c_m_n_imag_device_result_converted,
|
230 |
+
c_m_n_imag_host_result,
|
231 |
+
"Verification error: incorrect results in imaginary part!",
|
232 |
+
1e-2f,
|
233 |
+
1e-1f);
|
234 |
+
}
|
235 |
+
else
|
236 |
+
#endif // CK_EXPERIMENTAL_BIT_INT_EXTENSION_INT4
|
237 |
+
{
|
238 |
+
result = ck::utils::check_err(c_m_n_real_device_result,
|
239 |
+
c_m_n_real_host_result,
|
240 |
+
"Verification error: incorrect results in real part!",
|
241 |
+
1e-2f,
|
242 |
+
1e-1f);
|
243 |
+
result = result && ck::utils::check_err(
|
244 |
+
c_m_n_imag_device_result,
|
245 |
+
c_m_n_imag_host_result,
|
246 |
+
"Verification error: incorrect results in imaginary part!",
|
247 |
+
1e-2f,
|
248 |
+
1e-1f);
|
249 |
+
}
|
250 |
+
|
251 |
+
return result;
|
252 |
+
}
|
253 |
+
return true;
|
254 |
+
}
|
sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/composable_kernels/example/22_cgemm/cgemm_xdl_fp32.cpp
ADDED
@@ -0,0 +1,132 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// SPDX-License-Identifier: MIT
|
2 |
+
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
|
3 |
+
|
4 |
+
#include <iostream>
|
5 |
+
|
6 |
+
#include "cgemm_xdl_common.hpp"
|
7 |
+
|
8 |
+
#include "ck/library/reference_tensor_operation/cpu/reference_cgemm.hpp"
|
9 |
+
|
10 |
+
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"
|
11 |
+
#include "ck/tensor_operation/gpu/device/impl/device_cgemm_4gemm_xdl_cshuffle.hpp"
|
12 |
+
#include "ck/tensor_operation/gpu/device/gemm_specialization.hpp"
|
13 |
+
|
14 |
+
using ADataType = F32;
|
15 |
+
using BDataType = F32;
|
16 |
+
using CDataType = F32;
|
17 |
+
using AccDataType = F32;
|
18 |
+
|
19 |
+
using ALayout = ck::tensor_layout::gemm::RowMajor;
|
20 |
+
using BLayout = ck::tensor_layout::gemm::ColumnMajor;
|
21 |
+
using CLayout = ck::tensor_layout::gemm::RowMajor;
|
22 |
+
|
23 |
+
using PassThrough = ck::tensor_operation::element_wise::PassThrough;
|
24 |
+
|
25 |
+
static constexpr auto GemmDefault = ck::tensor_operation::device::GemmSpecialization::Default;
|
26 |
+
|
27 |
+
using ReferenceCGemmInstance = ck::tensor_operation::host::
|
28 |
+
ReferenceCGemm<ADataType, BDataType, CDataType, PassThrough, PassThrough, PassThrough>;
|
29 |
+
|
30 |
+
// clang-format off
|
31 |
+
using DeviceCGemmInstance = ck::tensor_operation::device::DeviceCGemm_4Gemm_Xdl_CShuffle
|
32 |
+
<ALayout, // typename ALayout
|
33 |
+
BLayout, // typename BLayout
|
34 |
+
CLayout, // typename CLayout
|
35 |
+
ADataType, // typename ADataType
|
36 |
+
BDataType, // typename BDataType
|
37 |
+
CDataType, // typename CDataType
|
38 |
+
AccDataType, // typename GemmAccDataType
|
39 |
+
CDataType, // typename CShuffleDataType
|
40 |
+
PassThrough, // typename AElementwiseOperation
|
41 |
+
PassThrough, // typename BElementwiseOperation
|
42 |
+
PassThrough, // typename CElementwiseOperation
|
43 |
+
GemmDefault, // GemmSpecialization GemmSpec
|
44 |
+
1, // index_t NumGemmKPrefetchStage
|
45 |
+
256, // index_t BlockSize
|
46 |
+
256, // index_t MPerBlock
|
47 |
+
128, // index_t NPerBlock
|
48 |
+
16, // index_t KPerBlock
|
49 |
+
4, // index_t AK1
|
50 |
+
4, // index_t BK1
|
51 |
+
32, // index_t MPerXDL
|
52 |
+
32, // index_t NPerXDL
|
53 |
+
4, // index_t MXdlPerWave
|
54 |
+
2, // index_t NXdlPerWave
|
55 |
+
S<4, 64, 1>, // typename ABlockTransferThreadClusterLengths_AK0_M_AK1
|
56 |
+
S<1, 0, 2>, // typename ABlockTransferThreadClusterArrangeOrder
|
57 |
+
S<1, 0, 2>, // typename ABlockTransferSrcAccessOrder
|
58 |
+
2, // index_t ABlockTransferSrcVectorDim
|
59 |
+
4, // index_t ABlockTransferSrcScalarPerVector
|
60 |
+
4, // index_t ABlockTransferDstScalarPerVector_AK1
|
61 |
+
1, // index_t ABlockLdsExtraM
|
62 |
+
S<4, 64, 1>, // typename BBlockTransferThreadClusterLengths_BK0_N_BK1
|
63 |
+
S<1, 0, 2>, // typename BBlockTransferThreadClusterArrangeOrder
|
64 |
+
S<1, 0, 2>, // typename BBlockTransferSrcAccessOrder
|
65 |
+
2, // index_t BBlockTransferSrcVectorDim
|
66 |
+
4, // index_t BBlockTransferSrcScalarPerVector
|
67 |
+
4, // index_t BBlockTransferDstScalarPerVector_BK1
|
68 |
+
1, // index_t BBlockLdsExtraN
|
69 |
+
1, // index_t CShuffleMXdlPerWavePerShuffle
|
70 |
+
1, // index_t CShuffleNXdlPerWavePerShuffle
|
71 |
+
S<1, 16, 1, 16>, // typename CShuffleBlockTransferClusterLengths_MBlock_MPerBlock_NBlock_NPerBlock
|
72 |
+
4>; // index_t CShuffleBlockTransferScalarPerVector_NPerBlock
|
73 |
+
// clang-format on
|
74 |
+
|
75 |
+
int main(int argc, char* argv[])
|
76 |
+
{
|
77 |
+
bool do_verification = true;
|
78 |
+
int init_method = 1;
|
79 |
+
bool time_kernel = false;
|
80 |
+
|
81 |
+
// CGEMM shape
|
82 |
+
ck::index_t M = 3840;
|
83 |
+
ck::index_t N = 4096;
|
84 |
+
ck::index_t K = 4096;
|
85 |
+
|
86 |
+
ck::index_t StrideA = 4096;
|
87 |
+
ck::index_t StrideB = 4096;
|
88 |
+
ck::index_t StrideC = 4096;
|
89 |
+
|
90 |
+
if(argc == 4)
|
91 |
+
{
|
92 |
+
do_verification = std::stoi(argv[1]);
|
93 |
+
init_method = std::stoi(argv[2]);
|
94 |
+
time_kernel = std::stoi(argv[3]);
|
95 |
+
}
|
96 |
+
else if(argc == 10)
|
97 |
+
{
|
98 |
+
do_verification = std::stoi(argv[1]);
|
99 |
+
init_method = std::stoi(argv[2]);
|
100 |
+
time_kernel = std::stoi(argv[3]);
|
101 |
+
|
102 |
+
M = std::stoi(argv[4]);
|
103 |
+
N = std::stoi(argv[5]);
|
104 |
+
K = std::stoi(argv[6]);
|
105 |
+
|
106 |
+
StrideA = std::stoi(argv[7]);
|
107 |
+
StrideB = std::stoi(argv[8]);
|
108 |
+
StrideC = std::stoi(argv[9]);
|
109 |
+
}
|
110 |
+
else
|
111 |
+
{
|
112 |
+
std::cout << "arg1: verification (0=no, 1=yes)\n"
|
113 |
+
<< "arg2: initialization (0=no init, 1=integer value, 2=decimal value)\n"
|
114 |
+
<< "arg3: run kernel # of times (>1)\n"
|
115 |
+
<< "arg4 to 9: M (256x), N(128x), K(32x), StrideA, StrideB, StrideC\n"
|
116 |
+
<< std::endl;
|
117 |
+
exit(0);
|
118 |
+
}
|
119 |
+
|
120 |
+
return !run_cgemm_xdl<ADataType,
|
121 |
+
BDataType,
|
122 |
+
CDataType,
|
123 |
+
ALayout,
|
124 |
+
BLayout,
|
125 |
+
CLayout,
|
126 |
+
PassThrough,
|
127 |
+
PassThrough,
|
128 |
+
PassThrough,
|
129 |
+
DeviceCGemmInstance,
|
130 |
+
ReferenceCGemmInstance>(
|
131 |
+
M, N, K, StrideA, StrideB, StrideC, do_verification, init_method, time_kernel);
|
132 |
+
}
|
sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/composable_kernels/example/22_cgemm/cgemm_xdl_int4.cpp
ADDED
@@ -0,0 +1,140 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// SPDX-License-Identifier: MIT
|
2 |
+
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
|
3 |
+
|
4 |
+
#include <iostream>
|
5 |
+
|
6 |
+
#include "cgemm_xdl_common.hpp"
|
7 |
+
|
8 |
+
#include "ck/library/reference_tensor_operation/cpu/reference_cgemm.hpp"
|
9 |
+
|
10 |
+
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"
|
11 |
+
#include "ck/tensor_operation/gpu/device/impl/device_cgemm_4gemm_xdl_cshuffle.hpp"
|
12 |
+
#include "ck/tensor_operation/gpu/device/gemm_specialization.hpp"
|
13 |
+
|
14 |
+
using ADataType = INT4;
|
15 |
+
using BDataType = INT4;
|
16 |
+
using CDataType = INT4;
|
17 |
+
using AccDataType = INT32;
|
18 |
+
using CShuffleDataType = INT32;
|
19 |
+
|
20 |
+
using KernelADataType = INT8;
|
21 |
+
using KernelBDataType = INT8;
|
22 |
+
using KernelCDataType = INT8;
|
23 |
+
|
24 |
+
using ALayout = ck::tensor_layout::gemm::RowMajor;
|
25 |
+
using BLayout = ck::tensor_layout::gemm::ColumnMajor;
|
26 |
+
using CLayout = ck::tensor_layout::gemm::RowMajor;
|
27 |
+
|
28 |
+
using PassThrough = ck::tensor_operation::element_wise::PassThrough;
|
29 |
+
|
30 |
+
static constexpr auto GemmDefault = ck::tensor_operation::device::GemmSpecialization::Default;
|
31 |
+
|
32 |
+
using ReferenceCGemmInstance = ck::tensor_operation::host::
|
33 |
+
ReferenceCGemm<ADataType, BDataType, CDataType, PassThrough, PassThrough, PassThrough>;
|
34 |
+
|
35 |
+
// clang-format off
|
36 |
+
using DeviceCGemmInstance = ck::tensor_operation::device::DeviceCGemm_4Gemm_Xdl_CShuffle
|
37 |
+
<ALayout, // typename ALayout
|
38 |
+
BLayout, // typename BLayout
|
39 |
+
CLayout, // typename CLayout
|
40 |
+
KernelADataType, // typename ADataType
|
41 |
+
KernelBDataType, // typename BDataType
|
42 |
+
KernelCDataType, // typename CDataType
|
43 |
+
AccDataType, // typename GemmAccDataType
|
44 |
+
CShuffleDataType, // typename CShuffleDataType
|
45 |
+
PassThrough, // typename AElementwiseOperation
|
46 |
+
PassThrough, // typename BElementwiseOperation
|
47 |
+
PassThrough, // typename CElementwiseOperation
|
48 |
+
GemmDefault, // GemmSpecialization GemmSpec
|
49 |
+
1, // index_t NumGemmKPrefetchStage
|
50 |
+
256, // index_t BlockSize
|
51 |
+
256, // index_t MPerBlock
|
52 |
+
128, // index_t NPerBlock
|
53 |
+
64, // index_t KPerBlock
|
54 |
+
16, // index_t AK1
|
55 |
+
16, // index_t BK1
|
56 |
+
32, // index_t MPerXDL
|
57 |
+
32, // index_t NPerXDL
|
58 |
+
4, // index_t MXdlPerWave
|
59 |
+
2, // index_t NXdlPerWave
|
60 |
+
S<4, 64, 1>, // typename ABlockTransferThreadClusterLengths_AK0_M_AK1
|
61 |
+
S<1, 0, 2>, // typename ABlockTransferThreadClusterArrangeOrder
|
62 |
+
S<1, 0, 2>, // typename ABlockTransferSrcAccessOrder
|
63 |
+
2, // index_t ABlockTransferSrcVectorDim
|
64 |
+
16, // index_t ABlockTransferSrcScalarPerVector
|
65 |
+
16, // index_t ABlockTransferDstScalarPerVector_AK1
|
66 |
+
1, // index_t ABlockLdsExtraM
|
67 |
+
S<4, 64, 1>, // typename BBlockTransferThreadClusterLengths_BK0_N_BK1
|
68 |
+
S<1, 0, 2>, // typename BBlockTransferThreadClusterArrangeOrder
|
69 |
+
S<1, 0, 2>, // typename BBlockTransferSrcAccessOrder
|
70 |
+
2, // index_t BBlockTransferSrcVectorDim
|
71 |
+
8, // index_t BBlockTransferSrcScalarPerVector
|
72 |
+
8, // index_t BBlockTransferDstScalarPerVector_BK1
|
73 |
+
1, // index_t BBlockLdsExtraN
|
74 |
+
1, // index_t CShuffleMXdlPerWavePerShuffle
|
75 |
+
1, // index_t CShuffleNXdlPerWavePerShuffle
|
76 |
+
S<1, 64, 1, 4>, // typename CShuffleBlockTransferClusterLengths_MBlock_MPerBlock_NBlock_NPerBlock
|
77 |
+
16>; // index_t CShuffleBlockTransferScalarPerVector_NPerBlock
|
78 |
+
// clang-format on
|
79 |
+
|
80 |
+
int main(int argc, char* argv[])
|
81 |
+
{
|
82 |
+
bool do_verification = true;
|
83 |
+
int init_method = 1;
|
84 |
+
bool time_kernel = true;
|
85 |
+
|
86 |
+
// CGEMM shape
|
87 |
+
ck::index_t M = 1024;
|
88 |
+
ck::index_t N = 1152;
|
89 |
+
ck::index_t K = 512;
|
90 |
+
|
91 |
+
ck::index_t StrideA = K;
|
92 |
+
ck::index_t StrideB = K;
|
93 |
+
ck::index_t StrideC = N;
|
94 |
+
|
95 |
+
if(argc == 4)
|
96 |
+
{
|
97 |
+
do_verification = std::stoi(argv[1]);
|
98 |
+
init_method = std::stoi(argv[2]);
|
99 |
+
time_kernel = std::stoi(argv[3]);
|
100 |
+
}
|
101 |
+
else if(argc == 10)
|
102 |
+
{
|
103 |
+
do_verification = std::stoi(argv[1]);
|
104 |
+
init_method = std::stoi(argv[2]);
|
105 |
+
time_kernel = std::stoi(argv[3]);
|
106 |
+
|
107 |
+
M = std::stoi(argv[4]);
|
108 |
+
N = std::stoi(argv[5]);
|
109 |
+
K = std::stoi(argv[6]);
|
110 |
+
|
111 |
+
StrideA = std::stoi(argv[7]);
|
112 |
+
StrideB = std::stoi(argv[8]);
|
113 |
+
StrideC = std::stoi(argv[9]);
|
114 |
+
}
|
115 |
+
else
|
116 |
+
{
|
117 |
+
std::cout << "arg1: verification (0=no, 1=yes)\n"
|
118 |
+
<< "arg2: initialization (0=no init, 1=integer value, 2=decimal value)\n"
|
119 |
+
<< "arg3: time kernel (0=no, 1=yes)\n"
|
120 |
+
<< "arg4 to 9: M (256x), N(128x), K(32x), StrideA, StrideB, StrideC\n"
|
121 |
+
<< std::endl;
|
122 |
+
exit(EXIT_SUCCESS);
|
123 |
+
}
|
124 |
+
|
125 |
+
return !run_cgemm_xdl<ADataType,
|
126 |
+
BDataType,
|
127 |
+
CDataType,
|
128 |
+
ALayout,
|
129 |
+
BLayout,
|
130 |
+
CLayout,
|
131 |
+
PassThrough,
|
132 |
+
PassThrough,
|
133 |
+
PassThrough,
|
134 |
+
DeviceCGemmInstance,
|
135 |
+
ReferenceCGemmInstance,
|
136 |
+
KernelADataType,
|
137 |
+
KernelBDataType,
|
138 |
+
KernelCDataType>(
|
139 |
+
M, N, K, StrideA, StrideB, StrideC, do_verification, init_method, time_kernel);
|
140 |
+
}
|
sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/composable_kernels/example/22_cgemm/cgemm_xdl_int8.cpp
ADDED
@@ -0,0 +1,132 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// SPDX-License-Identifier: MIT
|
2 |
+
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
|
3 |
+
|
4 |
+
#include <iostream>
|
5 |
+
|
6 |
+
#include "cgemm_xdl_common.hpp"
|
7 |
+
|
8 |
+
#include "ck/library/reference_tensor_operation/cpu/reference_cgemm.hpp"
|
9 |
+
|
10 |
+
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"
|
11 |
+
#include "ck/tensor_operation/gpu/device/impl/device_cgemm_4gemm_xdl_cshuffle.hpp"
|
12 |
+
#include "ck/tensor_operation/gpu/device/gemm_specialization.hpp"
|
13 |
+
|
14 |
+
using ADataType = INT8;
|
15 |
+
using BDataType = INT8;
|
16 |
+
using CDataType = INT8;
|
17 |
+
using AccDataType = INT32;
|
18 |
+
|
19 |
+
using ALayout = ck::tensor_layout::gemm::RowMajor;
|
20 |
+
using BLayout = ck::tensor_layout::gemm::ColumnMajor;
|
21 |
+
using CLayout = ck::tensor_layout::gemm::RowMajor;
|
22 |
+
|
23 |
+
using PassThrough = ck::tensor_operation::element_wise::PassThrough;
|
24 |
+
|
25 |
+
static constexpr auto GemmDefault = ck::tensor_operation::device::GemmSpecialization::Default;
|
26 |
+
|
27 |
+
using ReferenceCGemmInstance = ck::tensor_operation::host::
|
28 |
+
ReferenceCGemm<ADataType, BDataType, CDataType, PassThrough, PassThrough, PassThrough>;
|
29 |
+
|
30 |
+
// clang-format off
|
31 |
+
using DeviceCGemmInstance = ck::tensor_operation::device::DeviceCGemm_4Gemm_Xdl_CShuffle
|
32 |
+
<ALayout, // typename ALayout
|
33 |
+
BLayout, // typename BLayout
|
34 |
+
CLayout, // typename CLayout
|
35 |
+
ADataType, // typename ADataType
|
36 |
+
BDataType, // typename BDataType
|
37 |
+
CDataType, // typename CDataType
|
38 |
+
AccDataType, // typename GemmAccDataType
|
39 |
+
CDataType, // typename CShuffleDataType
|
40 |
+
PassThrough, // typename AElementwiseOperation
|
41 |
+
PassThrough, // typename BElementwiseOperation
|
42 |
+
PassThrough, // typename CElementwiseOperation
|
43 |
+
GemmDefault, // GemmSpecialization GemmSpec
|
44 |
+
1, // index_t NumGemmKPrefetchStage
|
45 |
+
256, // index_t BlockSize
|
46 |
+
256, // index_t MPerBlock
|
47 |
+
128, // index_t NPerBlock
|
48 |
+
64, // index_t KPerBlock
|
49 |
+
16, // index_t AK1
|
50 |
+
16, // index_t BK1
|
51 |
+
32, // index_t MPerXDL
|
52 |
+
32, // index_t NPerXDL
|
53 |
+
4, // index_t MXdlPerWave
|
54 |
+
2, // index_t NXdlPerWave
|
55 |
+
S<4, 64, 1>, // typename ABlockTransferThreadClusterLengths_AK0_M_AK1
|
56 |
+
S<1, 0, 2>, // typename ABlockTransferThreadClusterArrangeOrder
|
57 |
+
S<1, 0, 2>, // typename ABlockTransferSrcAccessOrder
|
58 |
+
2, // index_t ABlockTransferSrcVectorDim
|
59 |
+
16, // index_t ABlockTransferSrcScalarPerVector
|
60 |
+
16, // index_t ABlockTransferDstScalarPerVector_AK1
|
61 |
+
1, // index_t ABlockLdsExtraM
|
62 |
+
S<4, 64, 1>, // typename BBlockTransferThreadClusterLengths_BK0_N_BK1
|
63 |
+
S<1, 0, 2>, // typename BBlockTransferThreadClusterArrangeOrder
|
64 |
+
S<1, 0, 2>, // typename BBlockTransferSrcAccessOrder
|
65 |
+
2, // index_t BBlockTransferSrcVectorDim
|
66 |
+
8, // index_t BBlockTransferSrcScalarPerVector
|
67 |
+
8, // index_t BBlockTransferDstScalarPerVector_BK1
|
68 |
+
1, // index_t BBlockLdsExtraN
|
69 |
+
1, // index_t CShuffleMXdlPerWavePerShuffle
|
70 |
+
1, // index_t CShuffleNXdlPerWavePerShuffle
|
71 |
+
S<1, 64, 1, 4>, // typename CShuffleBlockTransferClusterLengths_MBlock_MPerBlock_NBlock_NPerBlock
|
72 |
+
16>; // index_t CShuffleBlockTransferScalarPerVector_NPerBlock
|
73 |
+
// clang-format on
|
74 |
+
|
75 |
+
int main(int argc, char* argv[])
|
76 |
+
{
|
77 |
+
bool do_verification = true;
|
78 |
+
int init_method = 1;
|
79 |
+
bool time_kernel = false;
|
80 |
+
|
81 |
+
// CGEMM shape
|
82 |
+
ck::index_t M = 3840;
|
83 |
+
ck::index_t N = 4096;
|
84 |
+
ck::index_t K = 4096;
|
85 |
+
|
86 |
+
ck::index_t StrideA = 4096;
|
87 |
+
ck::index_t StrideB = 4096;
|
88 |
+
ck::index_t StrideC = 4096;
|
89 |
+
|
90 |
+
if(argc == 4)
|
91 |
+
{
|
92 |
+
do_verification = std::stoi(argv[1]);
|
93 |
+
init_method = std::stoi(argv[2]);
|
94 |
+
time_kernel = std::stoi(argv[3]);
|
95 |
+
}
|
96 |
+
else if(argc == 10)
|
97 |
+
{
|
98 |
+
do_verification = std::stoi(argv[1]);
|
99 |
+
init_method = std::stoi(argv[2]);
|
100 |
+
time_kernel = std::stoi(argv[3]);
|
101 |
+
|
102 |
+
M = std::stoi(argv[4]);
|
103 |
+
N = std::stoi(argv[5]);
|
104 |
+
K = std::stoi(argv[6]);
|
105 |
+
|
106 |
+
StrideA = std::stoi(argv[7]);
|
107 |
+
StrideB = std::stoi(argv[8]);
|
108 |
+
StrideC = std::stoi(argv[9]);
|
109 |
+
}
|
110 |
+
else
|
111 |
+
{
|
112 |
+
std::cout << "arg1: verification (0=no, 1=yes)\n"
|
113 |
+
<< "arg2: initialization (0=no init, 1=integer value, 2=decimal value)\n"
|
114 |
+
<< "arg3: run kernel # of times (>1)\n"
|
115 |
+
<< "arg4 to 9: M (256x), N(128x), K(32x), StrideA, StrideB, StrideC\n"
|
116 |
+
<< std::endl;
|
117 |
+
exit(0);
|
118 |
+
}
|
119 |
+
|
120 |
+
return !run_cgemm_xdl<ADataType,
|
121 |
+
BDataType,
|
122 |
+
CDataType,
|
123 |
+
ALayout,
|
124 |
+
BLayout,
|
125 |
+
CLayout,
|
126 |
+
PassThrough,
|
127 |
+
PassThrough,
|
128 |
+
PassThrough,
|
129 |
+
DeviceCGemmInstance,
|
130 |
+
ReferenceCGemmInstance>(
|
131 |
+
M, N, K, StrideA, StrideB, StrideC, do_verification, init_method, time_kernel);
|
132 |
+
}
|
sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/composable_kernels/example/27_layernorm2d_fwd/common.hpp
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// SPDX-License-Identifier: MIT
|
2 |
+
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
|
3 |
+
|
4 |
+
#pragma once
|
5 |
+
|
6 |
+
#include <iostream>
|
7 |
+
#include <numeric>
|
8 |
+
#include <initializer_list>
|
9 |
+
#include <cstdlib>
|
10 |
+
#include <getopt.h>
|
11 |
+
|
12 |
+
#include "ck/ck.hpp"
|
13 |
+
#include "ck/tensor_operation/gpu/device/impl/device_normalization_fwd_impl.hpp"
|
14 |
+
#include "ck/tensor_operation/gpu/device/impl/device_normalization_fwd_splitk_impl.hpp"
|
15 |
+
|
16 |
+
#include "ck/library/utility/check_err.hpp"
|
17 |
+
#include "ck/library/utility/device_memory.hpp"
|
18 |
+
#include "ck/library/utility/host_common_util.hpp"
|
19 |
+
#include "ck/library/utility/host_tensor.hpp"
|
20 |
+
#include "ck/library/utility/host_tensor_generator.hpp"
|
21 |
+
#include "ck/library/utility/literals.hpp"
|
22 |
+
#include "ck/library/reference_tensor_operation/cpu/reference_layernorm.hpp"
|
sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/composable_kernels/example/27_layernorm2d_fwd/layernorm2d_fwd_fp16.cpp
ADDED
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// SPDX-License-Identifier: MIT
|
2 |
+
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
|
3 |
+
|
4 |
+
#include "common.hpp"
|
5 |
+
|
6 |
+
using XDataType = ck::half_t;
|
7 |
+
using GammaDataType = ck::half_t;
|
8 |
+
using BetaDataType = ck::half_t;
|
9 |
+
using YDataType = ck::half_t;
|
10 |
+
using SaveMeanInvStdDataType = float;
|
11 |
+
using ComputeDataType = float;
|
12 |
+
using PassThrough = ck::tensor_operation::element_wise::PassThrough;
|
13 |
+
|
14 |
+
#define SAVE_MEAN_INV_STD
|
15 |
+
|
16 |
+
constexpr int Rank = 2;
|
17 |
+
constexpr int NumReduceDim = 1;
|
18 |
+
|
19 |
+
using DeviceInstance =
|
20 |
+
ck::tensor_operation::device::DeviceNormalizationFwdImpl<XDataType,
|
21 |
+
GammaDataType,
|
22 |
+
BetaDataType,
|
23 |
+
ComputeDataType,
|
24 |
+
YDataType,
|
25 |
+
SaveMeanInvStdDataType,
|
26 |
+
PassThrough,
|
27 |
+
Rank,
|
28 |
+
NumReduceDim,
|
29 |
+
256, // BlockSize
|
30 |
+
8, // ClusterM
|
31 |
+
32, // ClusterK
|
32 |
+
1, // SliceM
|
33 |
+
8, // SliceK
|
34 |
+
1, // XYVectorDim (0=M, 1=K)
|
35 |
+
8, // SrcScalarPerVector
|
36 |
+
1, // GammaVecDim (0=M, 1=K)
|
37 |
+
8, // GammaScalarPerVector
|
38 |
+
1, // BetaVecDim (0=M, 1=K)
|
39 |
+
8, // BetaScalarPerVector
|
40 |
+
8, // YScalarPerVector
|
41 |
+
1>; // SaveMeanInvStdScalarPerVector
|
42 |
+
#include "run_layernorm_example.inc"
|
43 |
+
|
44 |
+
int main() { return run_layernorm2d_fwd_example<DeviceInstance>(); }
|
sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/composable_kernels/example/27_layernorm2d_fwd/layernorm2d_fwd_splitk_fp16.cpp
ADDED
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// SPDX-License-Identifier: MIT
|
2 |
+
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
|
3 |
+
|
4 |
+
#include "common.hpp"
|
5 |
+
|
6 |
+
using XDataType = ck::half_t;
|
7 |
+
using GammaDataType = ck::half_t;
|
8 |
+
using BetaDataType = ck::half_t;
|
9 |
+
using YDataType = ck::half_t;
|
10 |
+
using SaveMeanInvStdDataType = float;
|
11 |
+
using ComputeDataType = float;
|
12 |
+
using PassThrough = ck::tensor_operation::element_wise::PassThrough;
|
13 |
+
|
14 |
+
#define SAVE_MEAN_INV_STD
|
15 |
+
|
16 |
+
constexpr int Rank = 2;
|
17 |
+
constexpr int NumReduceDim = 1;
|
18 |
+
|
19 |
+
using DeviceInstance = ck::tensor_operation::device::DeviceNormalizationFwdSplitKImpl<
|
20 |
+
XDataType,
|
21 |
+
GammaDataType,
|
22 |
+
BetaDataType,
|
23 |
+
ComputeDataType,
|
24 |
+
YDataType,
|
25 |
+
SaveMeanInvStdDataType,
|
26 |
+
PassThrough,
|
27 |
+
Rank,
|
28 |
+
NumReduceDim,
|
29 |
+
256, // BlockSize
|
30 |
+
8, // ClusterM
|
31 |
+
32, // ClusterK
|
32 |
+
1, // SliceM
|
33 |
+
8, // SliceK
|
34 |
+
1, // XYVectorDim (0=M, 1=K)
|
35 |
+
8, // XScalarPerVector
|
36 |
+
1, // GammaVecDim (0=M, 1=K)
|
37 |
+
8, // GammaScalarPerVector
|
38 |
+
1, // BetaVecDim (0=M, 1=K)
|
39 |
+
8, // BetaScalarPerVector
|
40 |
+
8, // YScalarPerVector
|
41 |
+
1>; // SaveMeanInvStdScalarPerVector
|
42 |
+
|
43 |
+
#include "run_layernorm_example.inc"
|
44 |
+
|
45 |
+
int main() { return run_layernorm2d_fwd_example<DeviceInstance>(); }
|
sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/composable_kernels/example/31_batched_gemm_gemm/batched_gemm_gemm_xdl_fp32.cpp
ADDED
@@ -0,0 +1,135 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// SPDX-License-Identifier: MIT
|
2 |
+
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
|
3 |
+
|
4 |
+
/*
|
5 |
+
Gemm + Gemm fused operation. Computes C_m_o = A_m_k * B0_k_n * B1_n_o
|
6 |
+
|------------|
|
7 |
+
Gemm0
|
8 |
+
|---------------------|
|
9 |
+
Gemm1
|
10 |
+
*/
|
11 |
+
|
12 |
+
#include <iostream>
|
13 |
+
#include <numeric>
|
14 |
+
#include <initializer_list>
|
15 |
+
#include <cstdlib>
|
16 |
+
|
17 |
+
#include "ck/ck.hpp"
|
18 |
+
#include "ck/tensor_operation/gpu/device/gemm_specialization.hpp"
|
19 |
+
#include "ck/tensor_operation/gpu/device/impl/device_batched_gemm_gemm_xdl_cshuffle.hpp"
|
20 |
+
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"
|
21 |
+
|
22 |
+
#include "ck/library/utility/check_err.hpp"
|
23 |
+
#include "ck/library/utility/device_memory.hpp"
|
24 |
+
#include "ck/library/utility/host_tensor.hpp"
|
25 |
+
#include "ck/library/utility/host_tensor_generator.hpp"
|
26 |
+
#include "ck/library/utility/literals.hpp"
|
27 |
+
#include "ck/library/reference_tensor_operation/cpu/reference_batched_gemm.hpp"
|
28 |
+
|
29 |
+
template <ck::index_t... Is>
|
30 |
+
using S = ck::Sequence<Is...>;
|
31 |
+
|
32 |
+
using F32 = float;
|
33 |
+
|
34 |
+
using Row = ck::tensor_layout::gemm::RowMajor;
|
35 |
+
using Col = ck::tensor_layout::gemm::ColumnMajor;
|
36 |
+
|
37 |
+
using PassThrough = ck::tensor_operation::element_wise::PassThrough;
|
38 |
+
|
39 |
+
using ADataType = F32;
|
40 |
+
using B0DataType = F32;
|
41 |
+
using B1DataType = F32;
|
42 |
+
using AccDataType = F32;
|
43 |
+
using CShuffleDataType = F32;
|
44 |
+
using CDataType = F32;
|
45 |
+
|
46 |
+
using ALayout = Row;
|
47 |
+
using B0Layout = Col;
|
48 |
+
using B1Layout = Row;
|
49 |
+
using CLayout = Row;
|
50 |
+
|
51 |
+
using AElementOp = PassThrough;
|
52 |
+
using B0ElementOp = PassThrough;
|
53 |
+
using Acc0ElementOp = PassThrough;
|
54 |
+
using B1ElementOp = PassThrough;
|
55 |
+
using CElementOp = PassThrough;
|
56 |
+
|
57 |
+
static constexpr auto GemmDefault = ck::tensor_operation::device::GemmSpecialization::Default;
|
58 |
+
|
59 |
+
using DeviceGemmInstance = ck::tensor_operation::device::DeviceBatchedGemmGemm_Xdl_CShuffle<
|
60 |
+
ALayout,
|
61 |
+
B0Layout,
|
62 |
+
B1Layout,
|
63 |
+
CLayout,
|
64 |
+
ADataType,
|
65 |
+
B0DataType,
|
66 |
+
B1DataType,
|
67 |
+
CDataType,
|
68 |
+
AccDataType,
|
69 |
+
CShuffleDataType,
|
70 |
+
AElementOp,
|
71 |
+
B0ElementOp,
|
72 |
+
Acc0ElementOp,
|
73 |
+
B1ElementOp,
|
74 |
+
CElementOp,
|
75 |
+
GemmDefault,
|
76 |
+
1,
|
77 |
+
256,
|
78 |
+
128, // MPerBlock
|
79 |
+
128, // NPerBlock
|
80 |
+
16, // KPerBlock
|
81 |
+
128, // Gemm1NPerBlock
|
82 |
+
16, // Gemm1KPerBlock
|
83 |
+
4, // AK1
|
84 |
+
4, // BK1
|
85 |
+
1, // B1K1
|
86 |
+
32, // MPerXDL
|
87 |
+
32, // NPerXDL
|
88 |
+
1, // MXdlPerWave
|
89 |
+
4, // NXdlPerWave
|
90 |
+
4, // Gemm1NXdlPerWave
|
91 |
+
S<4, 64, 1>, // ABlockTransfer
|
92 |
+
S<1, 0, 2>,
|
93 |
+
S<1, 0, 2>,
|
94 |
+
2,
|
95 |
+
4,
|
96 |
+
4,
|
97 |
+
true,
|
98 |
+
S<4, 64, 1>, // BBlockTransfer
|
99 |
+
S<1, 0, 2>,
|
100 |
+
S<1, 0, 2>,
|
101 |
+
2,
|
102 |
+
4,
|
103 |
+
4,
|
104 |
+
true,
|
105 |
+
S<8, 32, 1>, // B1BlockTransfer
|
106 |
+
S<0, 2, 1>,
|
107 |
+
S<0, 2, 1>,
|
108 |
+
1,
|
109 |
+
4,
|
110 |
+
1,
|
111 |
+
false,
|
112 |
+
1, // CShuffleMXdlPerWavePerShuffle
|
113 |
+
2, // CShuffleNXdlPerWavePerShuffle
|
114 |
+
S<1, 16, 1, 16>, // CShuffleBlockTransferClusterLengths_MBlock_MPerBlock_NBlock_NPerBlock
|
115 |
+
4>; // CShuffleBlockTransferScalarPerVector_NPerBlock
|
116 |
+
|
117 |
+
using ReferenceGemm0Instance = ck::tensor_operation::host::ReferenceBatchedGemm<ADataType,
|
118 |
+
B0DataType,
|
119 |
+
ADataType,
|
120 |
+
AccDataType,
|
121 |
+
AElementOp,
|
122 |
+
B0ElementOp,
|
123 |
+
CElementOp>;
|
124 |
+
|
125 |
+
using ReferenceGemm1Instance = ck::tensor_operation::host::ReferenceBatchedGemm<ADataType,
|
126 |
+
B1DataType,
|
127 |
+
CDataType,
|
128 |
+
AccDataType,
|
129 |
+
AElementOp,
|
130 |
+
B1ElementOp,
|
131 |
+
CElementOp>;
|
132 |
+
|
133 |
+
#include "run_batched_gemm_gemm_example.inc"
|
134 |
+
|
135 |
+
int main(int argc, char* argv[]) { return run_batched_gemm_gemm_example(argc, argv) ? 0 : 1; }
|
sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/composable_kernels/example/31_batched_gemm_gemm/batched_gemm_gemm_xdl_int8.cpp
ADDED
@@ -0,0 +1,133 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// SPDX-License-Identifier: MIT
|
2 |
+
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
|
3 |
+
|
4 |
+
/*
|
5 |
+
Gemm + Gemm fused operation. Computes C_m_o = A_m_k * B0_k_n * B1_n_o
|
6 |
+
|------------|
|
7 |
+
Gemm0
|
8 |
+
|---------------------|
|
9 |
+
Gemm1
|
10 |
+
*/
|
11 |
+
|
12 |
+
#include <iostream>
|
13 |
+
#include <numeric>
|
14 |
+
#include <initializer_list>
|
15 |
+
#include <cstdlib>
|
16 |
+
|
17 |
+
#include "ck/ck.hpp"
|
18 |
+
#include "ck/tensor_operation/gpu/device/gemm_specialization.hpp"
|
19 |
+
#include "ck/tensor_operation/gpu/device/impl/device_batched_gemm_gemm_xdl_cshuffle.hpp"
|
20 |
+
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"
|
21 |
+
|
22 |
+
#include "ck/library/utility/check_err.hpp"
|
23 |
+
#include "ck/library/utility/device_memory.hpp"
|
24 |
+
#include "ck/library/utility/host_tensor.hpp"
|
25 |
+
#include "ck/library/utility/host_tensor_generator.hpp"
|
26 |
+
#include "ck/library/utility/literals.hpp"
|
27 |
+
#include "ck/library/reference_tensor_operation/cpu/reference_batched_gemm.hpp"
|
28 |
+
|
29 |
+
template <ck::index_t... Is>
|
30 |
+
using S = ck::Sequence<Is...>;
|
31 |
+
|
32 |
+
using Row = ck::tensor_layout::gemm::RowMajor;
|
33 |
+
using Col = ck::tensor_layout::gemm::ColumnMajor;
|
34 |
+
|
35 |
+
using PassThrough = ck::tensor_operation::element_wise::PassThrough;
|
36 |
+
|
37 |
+
using ADataType = int8_t;
|
38 |
+
using B0DataType = int8_t;
|
39 |
+
using B1DataType = int8_t;
|
40 |
+
using AccDataType = int32_t;
|
41 |
+
using CShuffleDataType = int32_t;
|
42 |
+
using CDataType = int8_t;
|
43 |
+
|
44 |
+
using ALayout = Row;
|
45 |
+
using B0Layout = Col;
|
46 |
+
using B1Layout = Row;
|
47 |
+
using CLayout = Row;
|
48 |
+
|
49 |
+
using AElementOp = PassThrough;
|
50 |
+
using B0ElementOp = PassThrough;
|
51 |
+
using Acc0ElementOp = PassThrough;
|
52 |
+
using B1ElementOp = PassThrough;
|
53 |
+
using CElementOp = PassThrough;
|
54 |
+
|
55 |
+
static constexpr auto GemmDefault = ck::tensor_operation::device::GemmSpecialization::Default;
|
56 |
+
|
57 |
+
using DeviceGemmInstance = ck::tensor_operation::device::DeviceBatchedGemmGemm_Xdl_CShuffle<
|
58 |
+
ALayout,
|
59 |
+
B0Layout,
|
60 |
+
B1Layout,
|
61 |
+
CLayout,
|
62 |
+
ADataType,
|
63 |
+
B0DataType,
|
64 |
+
B1DataType,
|
65 |
+
CDataType,
|
66 |
+
AccDataType,
|
67 |
+
CShuffleDataType,
|
68 |
+
AElementOp,
|
69 |
+
B0ElementOp,
|
70 |
+
Acc0ElementOp,
|
71 |
+
B1ElementOp,
|
72 |
+
CElementOp,
|
73 |
+
GemmDefault,
|
74 |
+
1,
|
75 |
+
256,
|
76 |
+
128, // MPerBlock
|
77 |
+
128, // NPerBlock
|
78 |
+
64, // KPerBlock
|
79 |
+
128, // Gemm1NPerBlock
|
80 |
+
64, // Gemm1KPerBlock
|
81 |
+
16, // AK1
|
82 |
+
16, // BK1
|
83 |
+
4, // B1K1
|
84 |
+
32, // MPerXDL
|
85 |
+
32, // NPerXDL
|
86 |
+
1, // MXdlPerWave
|
87 |
+
4, // NXdlPerWave
|
88 |
+
4, // Gemm1NXdlPerWave
|
89 |
+
S<4, 64, 1>, // ABlockTransfer
|
90 |
+
S<1, 0, 2>,
|
91 |
+
S<1, 0, 2>,
|
92 |
+
2,
|
93 |
+
16,
|
94 |
+
16,
|
95 |
+
true,
|
96 |
+
S<4, 64, 1>, // BBlockTransfer
|
97 |
+
S<1, 0, 2>,
|
98 |
+
S<1, 0, 2>,
|
99 |
+
2,
|
100 |
+
16,
|
101 |
+
16,
|
102 |
+
true,
|
103 |
+
S<8, 32, 1>, // B1BlockTransfer
|
104 |
+
S<0, 2, 1>,
|
105 |
+
S<0, 2, 1>,
|
106 |
+
1,
|
107 |
+
4,
|
108 |
+
4,
|
109 |
+
false,
|
110 |
+
1, // CShuffleMXdlPerWavePerShuffle
|
111 |
+
2, // CShuffleNXdlPerWavePerShuffle
|
112 |
+
S<1, 32, 1, 8>, // CShuffleBlockTransferClusterLengths_MBlock_MPerBlock_NBlock_NPerBlock
|
113 |
+
8>; // CShuffleBlockTransferScalarPerVector_NPerBlock
|
114 |
+
|
115 |
+
using ReferenceGemm0Instance = ck::tensor_operation::host::ReferenceBatchedGemm<ADataType,
|
116 |
+
B0DataType,
|
117 |
+
ADataType,
|
118 |
+
AccDataType,
|
119 |
+
AElementOp,
|
120 |
+
B0ElementOp,
|
121 |
+
CElementOp>;
|
122 |
+
|
123 |
+
using ReferenceGemm1Instance = ck::tensor_operation::host::ReferenceBatchedGemm<ADataType,
|
124 |
+
B1DataType,
|
125 |
+
CDataType,
|
126 |
+
AccDataType,
|
127 |
+
AElementOp,
|
128 |
+
B1ElementOp,
|
129 |
+
CElementOp>;
|
130 |
+
|
131 |
+
#include "run_batched_gemm_gemm_example.inc"
|
132 |
+
|
133 |
+
int main(int argc, char* argv[]) { return run_batched_gemm_gemm_example(argc, argv) ? 0 : 1; }
|
sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/composable_kernels/example/33_multiple_reduce/README.md
ADDED
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Instructions for ```example_dual_reduce```
|
2 |
+
|
3 |
+
## Run ```example_dual_reduce_multiblock```
|
4 |
+
```bash
|
5 |
+
# -D <xxx> : input 4-d tensor lengths
|
6 |
+
# -v <x> : verification (0=no, 1=yes)
|
7 |
+
#arg1: initialization (0=no init, 1=single integer value, 2=scope integer value, 3=decimal value)
|
8 |
+
#arg2: time kernel (0=no, 1=yes)
|
9 |
+
./bin/example_dual_reduce_multiblock -D 600,28,28,256 -v 1 2 1
|
10 |
+
```
|
11 |
+
|
12 |
+
Result
|
13 |
+
```
|
14 |
+
./bin/example_dual_reduce_multiblock -D 600,28,28,256 -v 1 2 1
|
15 |
+
launch_and_time_kernel: grid_dim {150, 1, 1}, block_dim {256, 1, 1}
|
16 |
+
Warm up 1 time
|
17 |
+
Start running 10 times...
|
18 |
+
Perf: 1.19529 ms, 201.499 GB/s, DeviceMultipleReduceBlockWise<256,M_C4_S1,K_C64_S1,InSrcVectorDim_1_InSrcVectorSize_1,OutDstVectorSize_1_1>
|
19 |
+
```
|
20 |
+
|
21 |
+
## Run ```example_dual_reduce_threadwise```
|
22 |
+
```bash
|
23 |
+
# -D <xxx> : input 4-d tensor lengths
|
24 |
+
# -v <x> : verification (0=no, 1=yes)
|
25 |
+
#arg1: initialization (0=no init, 1=single integer value, 2=scope integer value, 3=decimal value)
|
26 |
+
#arg2: time kernel (0=no, 1=yes)
|
27 |
+
./bin/example_dual_reduce_multiblock -D 8000,4,4,4 -v 1 2 1
|
28 |
+
```
|
29 |
+
|
30 |
+
Result
|
31 |
+
```
|
32 |
+
./bin/example_dual_reduce_threadwise -D 8000,4,4,4 -v 1 2 1
|
33 |
+
launch_and_time_kernel: grid_dim {32, 1, 1}, block_dim {256, 1, 1}
|
34 |
+
Warm up 1 time
|
35 |
+
Start running 10 times...
|
36 |
+
Perf: 0.01512 ms, 71.9577 GB/s, DeviceMultipleReduceThreadwise<256,M_C256_S1,K_C1_S4,InSrcVectorDim_1_InSrcVectorSize_2,OutDstVectorSize_1_1>
|
37 |
+
```
|
sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/composable_kernels/example/33_multiple_reduce/dual_reduce_common.hpp
ADDED
@@ -0,0 +1,314 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// SPDX-License-Identifier: MIT
|
2 |
+
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
|
3 |
+
|
4 |
+
#include <iostream>
|
5 |
+
#include <cstdlib>
|
6 |
+
#include <vector>
|
7 |
+
#include <array>
|
8 |
+
#include <algorithm>
|
9 |
+
#include <getopt.h>
|
10 |
+
|
11 |
+
#include "ck/ck.hpp"
|
12 |
+
#include "ck/utility/reduction_enums.hpp"
|
13 |
+
#include "ck/utility/data_type.hpp"
|
14 |
+
|
15 |
+
#include "ck/library/utility/algorithm.hpp"
|
16 |
+
#include "ck/library/utility/check_err.hpp"
|
17 |
+
#include "ck/library/utility/device_memory.hpp"
|
18 |
+
#include "ck/library/utility/host_tensor.hpp"
|
19 |
+
#include "ck/library/utility/host_tensor_generator.hpp"
|
20 |
+
#include "ck/library/utility/host_common_util.hpp"
|
21 |
+
|
22 |
+
static struct option long_options[] = {{"inLengths", required_argument, nullptr, 'D'},
|
23 |
+
{"verify", required_argument, nullptr, 'v'},
|
24 |
+
{"help", no_argument, nullptr, '?'},
|
25 |
+
{nullptr, 0, nullptr, 0}};
|
26 |
+
|
27 |
+
class SimpleAppArgs
|
28 |
+
{
|
29 |
+
private:
|
30 |
+
int option_index = 0;
|
31 |
+
|
32 |
+
public:
|
33 |
+
std::vector<size_t> inLengths = {600, 28, 28, 256};
|
34 |
+
size_t n, h, w, c;
|
35 |
+
|
36 |
+
bool do_verification = true;
|
37 |
+
int init_method = 2;
|
38 |
+
bool time_kernel = true;
|
39 |
+
|
40 |
+
public:
|
41 |
+
SimpleAppArgs()
|
42 |
+
{
|
43 |
+
n = inLengths[0];
|
44 |
+
h = inLengths[1];
|
45 |
+
w = inLengths[2];
|
46 |
+
c = inLengths[3];
|
47 |
+
};
|
48 |
+
|
49 |
+
void show_usage(const char* cmd)
|
50 |
+
{
|
51 |
+
std::cout << "Usage of " << cmd << std::endl;
|
52 |
+
std::cout << "--inLengths or -D, comma separated list of input tensor dimension lengths"
|
53 |
+
<< std::endl;
|
54 |
+
std::cout << "--verify or -v, 1/0 to indicate whether to verify the reduction result by "
|
55 |
+
"comparing with the host-based reduction"
|
56 |
+
<< std::endl;
|
57 |
+
std::cout << "Arg1 -- init method (0=no init, 1=single integer value, 2=scope integer "
|
58 |
+
"value, 3=decimal value)"
|
59 |
+
<< std::endl;
|
60 |
+
std::cout << "Arg2 -- time kernel (0=no, 1=yes)" << std::endl;
|
61 |
+
};
|
62 |
+
|
63 |
+
int processArgs(int argc, char* argv[])
|
64 |
+
{
|
65 |
+
using ck::host_common::getTypeValuesFromString;
|
66 |
+
|
67 |
+
int ch;
|
68 |
+
|
69 |
+
while(1)
|
70 |
+
{
|
71 |
+
ch = getopt_long(argc, argv, "D:v:l:", long_options, &option_index);
|
72 |
+
if(ch == -1)
|
73 |
+
break;
|
74 |
+
switch(ch)
|
75 |
+
{
|
76 |
+
case 'D':
|
77 |
+
if(!optarg)
|
78 |
+
throw std::runtime_error("Invalid option format!");
|
79 |
+
|
80 |
+
inLengths = getTypeValuesFromString<size_t>(optarg);
|
81 |
+
if(inLengths.size() != 4)
|
82 |
+
throw std::runtime_error(
|
83 |
+
"Invalid option format! The number of integers is incorrect!");
|
84 |
+
|
85 |
+
break;
|
86 |
+
case 'v':
|
87 |
+
if(!optarg)
|
88 |
+
throw std::runtime_error("Invalid option format!");
|
89 |
+
|
90 |
+
do_verification = static_cast<bool>(std::atoi(optarg));
|
91 |
+
break;
|
92 |
+
case '?':
|
93 |
+
if(std::string(long_options[option_index].name) == "help")
|
94 |
+
{
|
95 |
+
show_usage(argv[0]);
|
96 |
+
return (-1);
|
97 |
+
};
|
98 |
+
break;
|
99 |
+
default: show_usage(argv[0]); return (-1);
|
100 |
+
};
|
101 |
+
};
|
102 |
+
|
103 |
+
if(optind + 2 > argc)
|
104 |
+
throw std::runtime_error("Invalid cmd-line arguments, more argumetns are needed!");
|
105 |
+
|
106 |
+
init_method = std::atoi(argv[optind++]);
|
107 |
+
time_kernel = static_cast<bool>(std::atoi(argv[optind]));
|
108 |
+
|
109 |
+
n = inLengths[0];
|
110 |
+
h = inLengths[1];
|
111 |
+
w = inLengths[2];
|
112 |
+
c = inLengths[3];
|
113 |
+
|
114 |
+
return (0);
|
115 |
+
};
|
116 |
+
};
|
117 |
+
|
118 |
+
template <typename InDataType, typename OutDataType1, typename OutDataType2, typename AccDataType>
|
119 |
+
static void mean_meansquare_host(const Tensor<InDataType>& in,
|
120 |
+
Tensor<OutDataType1>& mean_ref,
|
121 |
+
Tensor<OutDataType2>& meansquare_ref,
|
122 |
+
size_t n,
|
123 |
+
size_t h,
|
124 |
+
size_t w,
|
125 |
+
size_t c)
|
126 |
+
|
127 |
+
{
|
128 |
+
auto thread_reduce_func = [&](auto iN) {
|
129 |
+
AccDataType mean = ck::type_convert<AccDataType>(0.0f);
|
130 |
+
AccDataType meansquare = ck::type_convert<AccDataType>(0.0f);
|
131 |
+
|
132 |
+
// compute mean, meanquare, variance, invVariance
|
133 |
+
for(std::size_t iH = 0; iH < h; iH++)
|
134 |
+
{
|
135 |
+
for(std::size_t iW = 0; iW < w; iW++)
|
136 |
+
{
|
137 |
+
for(std::size_t iC = 0; iC < c; iC++)
|
138 |
+
{
|
139 |
+
AccDataType curr_value = ck::type_convert<AccDataType>(in(iN, iH, iW, iC));
|
140 |
+
|
141 |
+
mean += curr_value;
|
142 |
+
meansquare += curr_value * curr_value;
|
143 |
+
};
|
144 |
+
}
|
145 |
+
};
|
146 |
+
|
147 |
+
mean = mean / (h * w * c);
|
148 |
+
meansquare = meansquare / (h * w * c);
|
149 |
+
|
150 |
+
mean_ref(iN) = ck::type_convert<OutDataType1>(mean);
|
151 |
+
meansquare_ref(iN) = ck::type_convert<OutDataType2>(meansquare);
|
152 |
+
};
|
153 |
+
|
154 |
+
std::size_t num_thread = std::thread::hardware_concurrency();
|
155 |
+
std::size_t work_per_thread = (n + num_thread - 1) / num_thread;
|
156 |
+
|
157 |
+
std::vector<joinable_thread> threads(num_thread);
|
158 |
+
|
159 |
+
for(std::size_t it = 0; it < num_thread; it++)
|
160 |
+
{
|
161 |
+
std::size_t iN_begin = it * work_per_thread;
|
162 |
+
std::size_t iN_end = std::min(static_cast<size_t>((it + 1) * work_per_thread), n);
|
163 |
+
|
164 |
+
auto f = [=] {
|
165 |
+
for(std::size_t iN = iN_begin; iN < iN_end; iN++)
|
166 |
+
{
|
167 |
+
thread_reduce_func(iN);
|
168 |
+
}
|
169 |
+
};
|
170 |
+
|
171 |
+
threads[it] = joinable_thread(f);
|
172 |
+
}
|
173 |
+
};
|
174 |
+
|
175 |
+
using ReduceOperation = ck::reduce::Add;
|
176 |
+
|
177 |
+
using InElementwiseOperation_Mean = ck::tensor_operation::element_wise::PassThrough;
|
178 |
+
using AccElementwiseOperation_Mean = ck::tensor_operation::element_wise::UnaryDivide;
|
179 |
+
|
180 |
+
using InElementwiseOperation_Meansquare = ck::tensor_operation::element_wise::UnarySquare;
|
181 |
+
using AccElementwiseOperation_Meansquare = ck::tensor_operation::element_wise::UnaryDivide;
|
182 |
+
|
183 |
+
using InElementwiseOperationTuple =
|
184 |
+
ck::Tuple<InElementwiseOperation_Mean, InElementwiseOperation_Meansquare>;
|
185 |
+
using AccElementwiseOperationTuple =
|
186 |
+
ck::Tuple<AccElementwiseOperation_Mean, AccElementwiseOperation_Meansquare>;
|
187 |
+
|
188 |
+
template <typename DeviceDualReduce,
|
189 |
+
typename InDataType,
|
190 |
+
typename OutDataType,
|
191 |
+
typename AccDataType,
|
192 |
+
int Rank,
|
193 |
+
int NumReduceDim>
|
194 |
+
int mean_meansquare_dual_reduce_test(size_t n,
|
195 |
+
size_t h,
|
196 |
+
size_t w,
|
197 |
+
size_t c,
|
198 |
+
bool do_verification,
|
199 |
+
int init_method,
|
200 |
+
bool time_kernel,
|
201 |
+
const std::array<int, NumReduceDim> reduceDims)
|
202 |
+
{
|
203 |
+
const std::vector<size_t> inLengths = {n, h, w, c};
|
204 |
+
|
205 |
+
Tensor<InDataType> in(inLengths);
|
206 |
+
|
207 |
+
std::vector<size_t> outLengths{n};
|
208 |
+
|
209 |
+
Tensor<OutDataType> mean_ref(outLengths);
|
210 |
+
Tensor<OutDataType> mean(outLengths);
|
211 |
+
Tensor<OutDataType> meansquare_ref(outLengths);
|
212 |
+
Tensor<OutDataType> meansquare(outLengths);
|
213 |
+
|
214 |
+
auto inStrides = in.mDesc.GetStrides();
|
215 |
+
auto outStrides = mean.mDesc.GetStrides();
|
216 |
+
|
217 |
+
size_t invariant_total_length = n;
|
218 |
+
size_t reduce_total_length = h * w * c;
|
219 |
+
|
220 |
+
const double alpha = 1.0f;
|
221 |
+
const double beta = 0.0f;
|
222 |
+
|
223 |
+
std::size_t num_thread = 1;
|
224 |
+
|
225 |
+
if(do_verification)
|
226 |
+
{
|
227 |
+
switch(init_method)
|
228 |
+
{
|
229 |
+
case 0: break;
|
230 |
+
case 1: in.GenerateTensorValue(GeneratorTensor_1<InDataType>{1}, num_thread); break;
|
231 |
+
case 2: in.GenerateTensorValue(GeneratorTensor_2<InDataType>{-5, 5}, num_thread); break;
|
232 |
+
default: in.GenerateTensorValue(GeneratorTensor_3<InDataType>{-5.0, 5.0}, num_thread);
|
233 |
+
}
|
234 |
+
};
|
235 |
+
|
236 |
+
// these buffers are usually provided by the user application
|
237 |
+
DeviceMem in_dev(sizeof(InDataType) * in.mDesc.GetElementSpaceSize());
|
238 |
+
DeviceMem mean_dev(sizeof(OutDataType) * mean.mDesc.GetElementSpaceSize());
|
239 |
+
DeviceMem meansquare_dev(sizeof(OutDataType) * meansquare.mDesc.GetElementSpaceSize());
|
240 |
+
|
241 |
+
in_dev.ToDevice(in.mData.data());
|
242 |
+
|
243 |
+
if(do_verification)
|
244 |
+
{
|
245 |
+
mean_meansquare_host<InDataType, OutDataType, OutDataType, AccDataType>(
|
246 |
+
in, mean_ref, meansquare_ref, n, h, w, c);
|
247 |
+
};
|
248 |
+
|
249 |
+
constexpr ck::index_t NumInputDim = Rank;
|
250 |
+
constexpr ck::index_t NumOutputDim = (Rank - NumReduceDim > 1) ? Rank - NumReduceDim : 1;
|
251 |
+
|
252 |
+
std::array<ck::index_t, NumInputDim> i_inLengths;
|
253 |
+
std::array<ck::index_t, NumInputDim> i_inStrides;
|
254 |
+
std::array<ck::index_t, NumOutputDim> i_outLengths;
|
255 |
+
std::array<ck::index_t, NumOutputDim> i_outStrides;
|
256 |
+
|
257 |
+
ck::ranges::copy(inLengths, i_inLengths.begin());
|
258 |
+
ck::ranges::copy(inStrides, i_inStrides.begin());
|
259 |
+
ck::ranges::copy(outLengths, i_outLengths.begin());
|
260 |
+
ck::ranges::copy(outStrides, i_outStrides.begin());
|
261 |
+
|
262 |
+
auto dual_reduce_op = DeviceDualReduce{};
|
263 |
+
|
264 |
+
auto argument_ptr = dual_reduce_op.MakeArgumentPointer(
|
265 |
+
i_inLengths,
|
266 |
+
i_inStrides,
|
267 |
+
i_outLengths,
|
268 |
+
{i_outStrides, i_outStrides},
|
269 |
+
reduceDims,
|
270 |
+
{alpha, alpha},
|
271 |
+
{beta, beta},
|
272 |
+
in_dev.GetDeviceBuffer(),
|
273 |
+
{mean_dev.GetDeviceBuffer(), meansquare_dev.GetDeviceBuffer()},
|
274 |
+
ck::make_tuple(InElementwiseOperation_Mean{}, InElementwiseOperation_Meansquare{}),
|
275 |
+
ck::make_tuple(
|
276 |
+
AccElementwiseOperation_Mean{static_cast<int32_t>(reduce_total_length)},
|
277 |
+
AccElementwiseOperation_Meansquare{static_cast<int32_t>(reduce_total_length)}));
|
278 |
+
|
279 |
+
if(!dual_reduce_op.IsSupportedArgument(argument_ptr.get()))
|
280 |
+
{
|
281 |
+
std::cout
|
282 |
+
<< "The runtime parameters seems not supported by the DeviceReduce instance, exiting!"
|
283 |
+
<< std::endl;
|
284 |
+
return (-1);
|
285 |
+
};
|
286 |
+
|
287 |
+
std::string reduce_name = dual_reduce_op.GetTypeString();
|
288 |
+
|
289 |
+
auto invoker_ptr = dual_reduce_op.MakeInvokerPointer();
|
290 |
+
|
291 |
+
float avg_time = 0.0f;
|
292 |
+
|
293 |
+
avg_time += invoker_ptr->Run(argument_ptr.get(), StreamConfig{nullptr, time_kernel});
|
294 |
+
|
295 |
+
std::size_t num_bytes = invariant_total_length * reduce_total_length * sizeof(InDataType) +
|
296 |
+
2 * invariant_total_length * sizeof(OutDataType);
|
297 |
+
|
298 |
+
float gb_per_sec = num_bytes / 1.E6 / avg_time;
|
299 |
+
|
300 |
+
std::cout << "Perf: " << avg_time << " ms, " << gb_per_sec << " GB/s, " << reduce_name
|
301 |
+
<< std::endl;
|
302 |
+
|
303 |
+
bool pass = true;
|
304 |
+
|
305 |
+
if(do_verification)
|
306 |
+
{
|
307 |
+
mean_dev.FromDevice(mean.mData.data());
|
308 |
+
meansquare_dev.FromDevice(meansquare.mData.data());
|
309 |
+
pass = pass && ck::utils::check_err(mean, mean_ref);
|
310 |
+
pass = pass && ck::utils::check_err(meansquare, meansquare_ref);
|
311 |
+
};
|
312 |
+
|
313 |
+
return (pass ? 0 : 1);
|
314 |
+
}
|
sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/composable_kernels/example/33_multiple_reduce/dual_reduce_multiblock.cpp
ADDED
@@ -0,0 +1,98 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// SPDX-License-Identifier: MIT
|
2 |
+
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
|
3 |
+
|
4 |
+
#include <iostream>
|
5 |
+
#include <cstdlib>
|
6 |
+
#include <vector>
|
7 |
+
#include <array>
|
8 |
+
#include <algorithm>
|
9 |
+
#include <getopt.h>
|
10 |
+
|
11 |
+
#include "ck/ck.hpp"
|
12 |
+
#include "ck/utility/reduction_enums.hpp"
|
13 |
+
#include "ck/utility/data_type.hpp"
|
14 |
+
|
15 |
+
#include "ck/tensor_operation/gpu/device/device_base.hpp"
|
16 |
+
#include "ck/tensor_operation/gpu/device/impl/device_multiple_reduce_multiblock.hpp"
|
17 |
+
#include "ck/tensor_operation/gpu/device/reduction_operator_mapping.hpp"
|
18 |
+
|
19 |
+
#include "dual_reduce_common.hpp"
|
20 |
+
|
21 |
+
using namespace ck;
|
22 |
+
using namespace ck::tensor_operation::device;
|
23 |
+
|
24 |
+
using InDataType = ck::half_t;
|
25 |
+
using OutDataType = float;
|
26 |
+
using OutDataTypeTuple = Tuple<OutDataType, OutDataType>;
|
27 |
+
using AccDataType = float;
|
28 |
+
|
29 |
+
// for NHWC layer-norm calculation of mean and meansquare
|
30 |
+
constexpr int Rank = 4;
|
31 |
+
constexpr int NumReduceDim = 3;
|
32 |
+
|
33 |
+
constexpr bool PropagateNan = false;
|
34 |
+
|
35 |
+
constexpr InMemoryDataOperationEnum OutMemoryDataOperation = InMemoryDataOperationEnum::Set;
|
36 |
+
|
37 |
+
using DeviceDualReduce = DeviceMultipleReduceMultiBlock<2,
|
38 |
+
InDataType,
|
39 |
+
AccDataType,
|
40 |
+
OutDataTypeTuple,
|
41 |
+
Rank,
|
42 |
+
NumReduceDim,
|
43 |
+
ReduceOperation,
|
44 |
+
InElementwiseOperationTuple,
|
45 |
+
AccElementwiseOperationTuple,
|
46 |
+
OutMemoryDataOperation,
|
47 |
+
PropagateNan,
|
48 |
+
256,
|
49 |
+
4,
|
50 |
+
64,
|
51 |
+
1,
|
52 |
+
1,
|
53 |
+
1, // InSrcVectorDim
|
54 |
+
1,
|
55 |
+
ck::Sequence<1, 1>>;
|
56 |
+
|
57 |
+
int main(int argc, char* argv[])
|
58 |
+
{
|
59 |
+
int retval = 0;
|
60 |
+
|
61 |
+
if(argc > 1)
|
62 |
+
{
|
63 |
+
SimpleAppArgs arg;
|
64 |
+
|
65 |
+
if(arg.processArgs(argc, argv) < 0)
|
66 |
+
return (-1);
|
67 |
+
|
68 |
+
std::array<int, NumReduceDim> reduceDims = {1, 2, 3};
|
69 |
+
|
70 |
+
retval = mean_meansquare_dual_reduce_test<DeviceDualReduce,
|
71 |
+
InDataType,
|
72 |
+
OutDataType,
|
73 |
+
AccDataType,
|
74 |
+
Rank,
|
75 |
+
NumReduceDim>(arg.n,
|
76 |
+
arg.h,
|
77 |
+
arg.w,
|
78 |
+
arg.c,
|
79 |
+
arg.do_verification,
|
80 |
+
arg.init_method,
|
81 |
+
arg.time_kernel,
|
82 |
+
reduceDims);
|
83 |
+
}
|
84 |
+
else
|
85 |
+
{
|
86 |
+
std::array<int, NumReduceDim> reduceDims = {1, 2, 3};
|
87 |
+
|
88 |
+
retval = mean_meansquare_dual_reduce_test<DeviceDualReduce,
|
89 |
+
InDataType,
|
90 |
+
OutDataType,
|
91 |
+
AccDataType,
|
92 |
+
Rank,
|
93 |
+
NumReduceDim>(
|
94 |
+
600, 28, 28, 256, true, 2, true, reduceDims);
|
95 |
+
};
|
96 |
+
|
97 |
+
return (retval);
|
98 |
+
}
|
sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/composable_kernels/example/33_multiple_reduce/dual_reduce_threadwise.cpp
ADDED
@@ -0,0 +1,93 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// SPDX-License-Identifier: MIT
|
2 |
+
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
|
3 |
+
|
4 |
+
#include <iostream>
|
5 |
+
#include <cstdlib>
|
6 |
+
#include <vector>
|
7 |
+
#include <array>
|
8 |
+
#include <algorithm>
|
9 |
+
#include <getopt.h>
|
10 |
+
|
11 |
+
#include "ck/ck.hpp"
|
12 |
+
#include "ck/utility/reduction_enums.hpp"
|
13 |
+
#include "ck/utility/data_type.hpp"
|
14 |
+
|
15 |
+
#include "ck/tensor_operation/gpu/device/device_base.hpp"
|
16 |
+
#include "ck/tensor_operation/gpu/device/impl/device_multiple_reduce_threadwise.hpp"
|
17 |
+
#include "ck/tensor_operation/gpu/device/reduction_operator_mapping.hpp"
|
18 |
+
|
19 |
+
#include "dual_reduce_common.hpp"
|
20 |
+
|
21 |
+
using namespace ck;
|
22 |
+
using namespace ck::tensor_operation::device;
|
23 |
+
|
24 |
+
using InDataType = ck::half_t;
|
25 |
+
using OutDataType = float;
|
26 |
+
using OutDataTypeTuple = Tuple<OutDataType, OutDataType>;
|
27 |
+
using AccDataType = float;
|
28 |
+
|
29 |
+
// for NHWC layer-norm calculation of mean and meansquare
|
30 |
+
constexpr int Rank = 4;
|
31 |
+
constexpr int NumReduceDim = 3;
|
32 |
+
|
33 |
+
constexpr bool PropagateNan = false;
|
34 |
+
|
35 |
+
using DeviceDualReduce = DeviceMultipleReduceThreadWise<2,
|
36 |
+
InDataType,
|
37 |
+
AccDataType,
|
38 |
+
OutDataTypeTuple,
|
39 |
+
Rank,
|
40 |
+
NumReduceDim,
|
41 |
+
ReduceOperation,
|
42 |
+
InElementwiseOperationTuple,
|
43 |
+
AccElementwiseOperationTuple,
|
44 |
+
PropagateNan,
|
45 |
+
256,
|
46 |
+
1,
|
47 |
+
4,
|
48 |
+
1, // InSrcVectorDim
|
49 |
+
2,
|
50 |
+
ck::Sequence<1, 1>>;
|
51 |
+
|
52 |
+
int main(int argc, char* argv[])
|
53 |
+
{
|
54 |
+
int retval = 0;
|
55 |
+
|
56 |
+
if(argc > 1)
|
57 |
+
{
|
58 |
+
SimpleAppArgs arg;
|
59 |
+
|
60 |
+
if(arg.processArgs(argc, argv) < 0)
|
61 |
+
return (-1);
|
62 |
+
|
63 |
+
std::array<int, NumReduceDim> reduceDims = {1, 2, 3};
|
64 |
+
|
65 |
+
retval = mean_meansquare_dual_reduce_test<DeviceDualReduce,
|
66 |
+
InDataType,
|
67 |
+
OutDataType,
|
68 |
+
AccDataType,
|
69 |
+
Rank,
|
70 |
+
NumReduceDim>(arg.n,
|
71 |
+
arg.h,
|
72 |
+
arg.w,
|
73 |
+
arg.c,
|
74 |
+
arg.do_verification,
|
75 |
+
arg.init_method,
|
76 |
+
arg.time_kernel,
|
77 |
+
reduceDims);
|
78 |
+
}
|
79 |
+
else
|
80 |
+
{
|
81 |
+
std::array<int, NumReduceDim> reduceDims = {1, 2, 3};
|
82 |
+
|
83 |
+
retval = mean_meansquare_dual_reduce_test<DeviceDualReduce,
|
84 |
+
InDataType,
|
85 |
+
OutDataType,
|
86 |
+
AccDataType,
|
87 |
+
Rank,
|
88 |
+
NumReduceDim>(
|
89 |
+
8000, 4, 4, 4, true, 2, true, reduceDims);
|
90 |
+
};
|
91 |
+
|
92 |
+
return (retval);
|
93 |
+
}
|
sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/composable_kernels/example/52_im2col_col2im/common.hpp
ADDED
@@ -0,0 +1,97 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// SPDX-License-Identifier: MIT
|
2 |
+
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
|
3 |
+
|
4 |
+
#pragma once
|
5 |
+
|
6 |
+
#include <cstdlib>
|
7 |
+
#include <initializer_list>
|
8 |
+
#include <iostream>
|
9 |
+
#include <numeric>
|
10 |
+
|
11 |
+
#include "ck/ck.hpp"
|
12 |
+
#include "ck/tensor_operation/gpu/device/impl/device_image_to_column_impl.hpp"
|
13 |
+
#include "ck/tensor_operation/gpu/device/impl/device_column_to_image_impl.hpp"
|
14 |
+
#include "ck/tensor_operation/gpu/device/tensor_layout.hpp"
|
15 |
+
|
16 |
+
#include "ck/library/utility/algorithm.hpp"
|
17 |
+
#include "ck/library/utility/check_err.hpp"
|
18 |
+
#include "ck/library/utility/convolution_host_tensor_descriptor_helper.hpp"
|
19 |
+
#include "ck/library/utility/convolution_parameter.hpp"
|
20 |
+
#include "ck/library/utility/device_memory.hpp"
|
21 |
+
#include "ck/library/utility/host_tensor.hpp"
|
22 |
+
#include "ck/library/utility/host_tensor_generator.hpp"
|
23 |
+
#include "ck/library/reference_tensor_operation/cpu/reference_image_to_column.hpp"
|
24 |
+
#include "ck/library/reference_tensor_operation/cpu/reference_column_to_image.hpp"
|
25 |
+
|
26 |
+
template <ck::index_t... Is>
|
27 |
+
using S = ck::Sequence<Is...>;
|
28 |
+
|
29 |
+
static inline constexpr ck::index_t NDimSpatial = 2;
|
30 |
+
|
31 |
+
using FP32 = float;
|
32 |
+
|
33 |
+
struct ExecutionConfig final
|
34 |
+
{
|
35 |
+
bool do_verification = true;
|
36 |
+
int init_method = 1;
|
37 |
+
bool time_kernel = false;
|
38 |
+
};
|
39 |
+
|
40 |
+
#define DefaultConvParams \
|
41 |
+
ck::utils::conv::ConvParam \
|
42 |
+
{ \
|
43 |
+
NDimSpatial, 1, 32, 1, 1, {4, 4}, {64, 64}, {1, 1}, {1, 1}, {0, 0}, { 0, 0 } \
|
44 |
+
}
|
45 |
+
|
46 |
+
inline void print_help_msg()
|
47 |
+
{
|
48 |
+
std::cerr << "arg1: verification (0=no, 1=yes)\n"
|
49 |
+
<< "arg2: initialization (0=no init, 1=integer value, 2=decimal value)\n"
|
50 |
+
<< "arg3: time kernel (0=no, 1=yes)\n"
|
51 |
+
<< ck::utils::conv::get_conv_param_parser_helper_msg() << std::endl;
|
52 |
+
}
|
53 |
+
|
54 |
+
inline bool parse_cmd_args(int argc,
|
55 |
+
char* argv[],
|
56 |
+
ExecutionConfig& config,
|
57 |
+
ck::utils::conv::ConvParam& conv_params)
|
58 |
+
{
|
59 |
+
constexpr int num_execution_config_args =
|
60 |
+
3; // arguments for do_verification, init_method, time_kernel
|
61 |
+
constexpr int num_conv_param_leading_args = 5; // arguments for num_dim_spatial_, G_, N_, K_, C_
|
62 |
+
|
63 |
+
constexpr int threshold_to_catch_partial_args = 1 + num_execution_config_args;
|
64 |
+
constexpr int threshold_to_catch_all_args =
|
65 |
+
threshold_to_catch_partial_args + num_conv_param_leading_args;
|
66 |
+
|
67 |
+
if(argc == 1)
|
68 |
+
{
|
69 |
+
// use default
|
70 |
+
config = ExecutionConfig{};
|
71 |
+
}
|
72 |
+
// catch only ExecutionConfig arguments
|
73 |
+
else if(argc == threshold_to_catch_partial_args)
|
74 |
+
{
|
75 |
+
config.do_verification = std::stoi(argv[1]);
|
76 |
+
config.init_method = std::stoi(argv[2]);
|
77 |
+
config.time_kernel = std::stoi(argv[3]);
|
78 |
+
}
|
79 |
+
// catch both ExecutionConfig & ConvParam arguments
|
80 |
+
else if(threshold_to_catch_all_args < argc && ((argc - threshold_to_catch_all_args) % 3 == 0))
|
81 |
+
{
|
82 |
+
config.do_verification = std::stoi(argv[1]);
|
83 |
+
config.init_method = std::stoi(argv[2]);
|
84 |
+
config.time_kernel = std::stoi(argv[3]);
|
85 |
+
|
86 |
+
const ck::index_t num_dim_spatial = std::stoi(argv[4]);
|
87 |
+
conv_params = ck::utils::conv::parse_conv_param(
|
88 |
+
num_dim_spatial, threshold_to_catch_partial_args, argv);
|
89 |
+
}
|
90 |
+
else
|
91 |
+
{
|
92 |
+
print_help_msg();
|
93 |
+
return false;
|
94 |
+
}
|
95 |
+
|
96 |
+
return true;
|
97 |
+
}
|
sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/composable_kernels/example/52_im2col_col2im/image_to_column_f32.cpp
ADDED
@@ -0,0 +1,168 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// SPDX-License-Identifier: MIT
|
2 |
+
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
|
3 |
+
|
4 |
+
#include "common.hpp"
|
5 |
+
|
6 |
+
using InDataType = FP32;
|
7 |
+
using OutDataType = FP32;
|
8 |
+
|
9 |
+
using ImLayout = ck::tensor_layout::convolution::GNHWC;
|
10 |
+
using ImageToColumnOp = ck::conv_tensor_rearrange_op::ImageToColumn;
|
11 |
+
|
12 |
+
// clang-format off
|
13 |
+
using DeviceImgToColInstance = ck::tensor_operation::device::DeviceImageToColumnImpl
|
14 |
+
//#####################| Num| ImLayout| InDataType| OutDataType| Block| MPer| KPer| Thread| Scalar|
|
15 |
+
//#####################| Dim| | | | Size| Block| Block| Cluster| Per|
|
16 |
+
//#####################| Spatial| | | | | | | Lengths| Vector|
|
17 |
+
//#####################| | | | | | | | | |
|
18 |
+
< NDimSpatial, ImLayout, InDataType, OutDataType, 256, 128, 128, S<16, 16>, 1>;
|
19 |
+
// clang-format on
|
20 |
+
|
21 |
+
bool RunImageToColumn(const ExecutionConfig& config, const ck::utils::conv::ConvParam& conv_params)
|
22 |
+
{
|
23 |
+
const auto G = conv_params.G_;
|
24 |
+
const auto N = conv_params.N_;
|
25 |
+
const auto C = conv_params.C_;
|
26 |
+
|
27 |
+
const ck::index_t NDoHoWo =
|
28 |
+
N * ck::accumulate_n<ck::index_t>(
|
29 |
+
conv_params.output_spatial_lengths_.begin(), NDimSpatial, 1, std::multiplies<>());
|
30 |
+
const ck::index_t CZYX =
|
31 |
+
C * ck::accumulate_n<ck::index_t>(
|
32 |
+
conv_params.filter_spatial_lengths_.begin(), NDimSpatial, 1, std::multiplies<>());
|
33 |
+
|
34 |
+
const auto in_desc =
|
35 |
+
ck::utils::conv::make_input_host_tensor_descriptor_g_n_c_wis_packed<ImLayout>(conv_params);
|
36 |
+
const auto out_desc = HostTensorDescriptor({G, NDoHoWo, CZYX});
|
37 |
+
|
38 |
+
std::array<ck::index_t, NDimSpatial> input_spatial_lengths{};
|
39 |
+
std::array<ck::index_t, NDimSpatial> filter_spatial_lengths{};
|
40 |
+
std::array<ck::index_t, NDimSpatial> output_spatial_lengths{};
|
41 |
+
std::array<ck::index_t, NDimSpatial + 3> image_g_n_c_wis_strides{};
|
42 |
+
std::array<ck::index_t, 3> gemm_g_m_k_strides{};
|
43 |
+
std::array<ck::index_t, NDimSpatial> conv_filter_strides{};
|
44 |
+
std::array<ck::index_t, NDimSpatial> conv_filter_dilations{};
|
45 |
+
std::array<ck::index_t, NDimSpatial> input_left_pads{};
|
46 |
+
std::array<ck::index_t, NDimSpatial> input_right_pads{};
|
47 |
+
|
48 |
+
auto copy = [](const auto& x, auto& y) { std::copy(x.begin(), x.end(), y.begin()); };
|
49 |
+
|
50 |
+
copy(conv_params.input_spatial_lengths_, input_spatial_lengths);
|
51 |
+
copy(conv_params.filter_spatial_lengths_, filter_spatial_lengths);
|
52 |
+
copy(conv_params.output_spatial_lengths_, output_spatial_lengths);
|
53 |
+
copy(in_desc.GetStrides(), image_g_n_c_wis_strides);
|
54 |
+
copy(out_desc.GetStrides(), gemm_g_m_k_strides);
|
55 |
+
copy(conv_params.conv_filter_strides_, conv_filter_strides);
|
56 |
+
copy(conv_params.conv_filter_dilations_, conv_filter_dilations);
|
57 |
+
copy(conv_params.input_left_pads_, input_left_pads);
|
58 |
+
copy(conv_params.input_right_pads_, input_right_pads);
|
59 |
+
|
60 |
+
Tensor<InDataType> in(in_desc);
|
61 |
+
Tensor<OutDataType> out_device(out_desc);
|
62 |
+
Tensor<OutDataType> out_host(out_desc);
|
63 |
+
|
64 |
+
std::cout << "in: " << in.mDesc << std::endl;
|
65 |
+
std::cout << "out: " << out_device.mDesc << std::endl;
|
66 |
+
|
67 |
+
switch(config.init_method)
|
68 |
+
{
|
69 |
+
case 0: break;
|
70 |
+
case 1: in.GenerateTensorValue(GeneratorTensor_2<InDataType>{-5, 5}); break;
|
71 |
+
default: in.GenerateTensorValue(GeneratorTensor_3<InDataType>{-0.5, 0.5});
|
72 |
+
}
|
73 |
+
|
74 |
+
DeviceMem in_device_buf(sizeof(InDataType) * in.mDesc.GetElementSpaceSize());
|
75 |
+
DeviceMem out_device_buf(sizeof(OutDataType) * out_device.mDesc.GetElementSpaceSize());
|
76 |
+
|
77 |
+
in_device_buf.ToDevice(in.mData.data());
|
78 |
+
|
79 |
+
// reset input to zero
|
80 |
+
out_device_buf.SetZero();
|
81 |
+
|
82 |
+
static_assert(std::is_default_constructible_v<DeviceImgToColInstance>);
|
83 |
+
|
84 |
+
// do conv
|
85 |
+
auto img2col = DeviceImgToColInstance{};
|
86 |
+
auto invoker = img2col.MakeInvoker();
|
87 |
+
auto argument = img2col.MakeArgument(in_device_buf.GetDeviceBuffer(),
|
88 |
+
out_device_buf.GetDeviceBuffer(),
|
89 |
+
G,
|
90 |
+
N,
|
91 |
+
C,
|
92 |
+
input_spatial_lengths,
|
93 |
+
filter_spatial_lengths,
|
94 |
+
output_spatial_lengths,
|
95 |
+
image_g_n_c_wis_strides,
|
96 |
+
gemm_g_m_k_strides,
|
97 |
+
conv_filter_strides,
|
98 |
+
conv_filter_dilations,
|
99 |
+
input_left_pads,
|
100 |
+
input_right_pads);
|
101 |
+
|
102 |
+
if(!img2col.IsSupportedArgument(argument))
|
103 |
+
{
|
104 |
+
std::cerr << "wrong! device_img2col with the specified compilation parameters does "
|
105 |
+
"not support this img2col problem"
|
106 |
+
<< std::endl;
|
107 |
+
|
108 |
+
return false;
|
109 |
+
}
|
110 |
+
|
111 |
+
float ave_time = invoker.Run(argument, StreamConfig{nullptr, config.time_kernel});
|
112 |
+
std::size_t num_btype = G * NDoHoWo * CZYX * (sizeof(OutDataType) + sizeof(InDataType));
|
113 |
+
float gb_per_sec = num_btype / 1.E6 / ave_time;
|
114 |
+
std::cout << "Perf: " << ave_time << " ms, " << gb_per_sec << " GB/s" << std::endl;
|
115 |
+
|
116 |
+
if(config.do_verification)
|
117 |
+
{
|
118 |
+
auto ref_image_to_column = ck::tensor_operation::host::
|
119 |
+
ReferenceImageToColumn<NDimSpatial, ImLayout, InDataType, OutDataType>();
|
120 |
+
|
121 |
+
auto ref_invoker = ref_image_to_column.MakeInvoker();
|
122 |
+
|
123 |
+
auto ref_argument = ref_image_to_column.MakeArgument(in,
|
124 |
+
out_host,
|
125 |
+
conv_params.filter_spatial_lengths_,
|
126 |
+
conv_params.conv_filter_strides_,
|
127 |
+
conv_params.conv_filter_dilations_,
|
128 |
+
conv_params.input_left_pads_,
|
129 |
+
conv_params.input_right_pads_);
|
130 |
+
|
131 |
+
if(!ref_image_to_column.IsSupportedArgument(&ref_argument))
|
132 |
+
{
|
133 |
+
std::cerr << "wrong! ref_img2col with the specified compilation parameters does "
|
134 |
+
"not support this img2col problem"
|
135 |
+
<< std::endl;
|
136 |
+
return false;
|
137 |
+
}
|
138 |
+
|
139 |
+
ref_invoker.Run(ref_argument);
|
140 |
+
|
141 |
+
out_device_buf.FromDevice(out_device.mData.data());
|
142 |
+
|
143 |
+
return ck::utils::check_err(out_device.mData, out_host.mData);
|
144 |
+
}
|
145 |
+
|
146 |
+
return true;
|
147 |
+
}
|
148 |
+
|
149 |
+
int RunImageToColumnExample(int argc, char* argv[])
|
150 |
+
{
|
151 |
+
ExecutionConfig config;
|
152 |
+
ck::utils::conv::ConvParam conv_params = DefaultConvParams;
|
153 |
+
|
154 |
+
if(!parse_cmd_args(argc, argv, config, conv_params))
|
155 |
+
{
|
156 |
+
return EXIT_FAILURE;
|
157 |
+
}
|
158 |
+
|
159 |
+
if(conv_params.num_dim_spatial_ != NDimSpatial)
|
160 |
+
{
|
161 |
+
std::cerr << "unsupported # of spatial dimensions" << std::endl;
|
162 |
+
return EXIT_FAILURE;
|
163 |
+
}
|
164 |
+
|
165 |
+
return !RunImageToColumn(config, conv_params);
|
166 |
+
}
|
167 |
+
|
168 |
+
int main(int argc, char* argv[]) { return RunImageToColumnExample(argc, argv); }
|
sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/composable_kernels/example/60_gemm_multi_ABD/gemm_multi_ABD_xdl_bias_fastgelu_bf16_i8.cpp
ADDED
@@ -0,0 +1,273 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// SPDX-License-Identifier: MIT
|
2 |
+
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
|
3 |
+
|
4 |
+
#include <iostream>
|
5 |
+
#include <numeric>
|
6 |
+
#include <initializer_list>
|
7 |
+
#include <cstdlib>
|
8 |
+
|
9 |
+
#include "ck/ck.hpp"
|
10 |
+
#include "ck/tensor_operation/gpu/device/gemm_specialization.hpp"
|
11 |
+
#include "ck/tensor_operation/gpu/device/impl/device_gemm_multiple_abd_xdl_cshuffle.hpp"
|
12 |
+
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"
|
13 |
+
|
14 |
+
#include "ck/library/utility/device_memory.hpp"
|
15 |
+
#include "ck/library/utility/host_tensor.hpp"
|
16 |
+
#include "ck/library/utility/host_tensor_generator.hpp"
|
17 |
+
#include "ck/library/utility/literals.hpp"
|
18 |
+
#include "ck/library/reference_tensor_operation/cpu/reference_gemm.hpp"
|
19 |
+
#include "ck/library/utility/check_err.hpp"
|
20 |
+
|
21 |
+
#include "ck/utility/blkgemmpipe_scheduler.hpp"
|
22 |
+
|
23 |
+
template <ck::index_t... Is>
|
24 |
+
using S = ck::Sequence<Is...>;
|
25 |
+
|
26 |
+
using F16 = ck::half_t;
|
27 |
+
using BF16 = ck::bhalf_t;
|
28 |
+
using I8 = int8_t;
|
29 |
+
using F32 = float;
|
30 |
+
|
31 |
+
using Row = ck::tensor_layout::gemm::RowMajor;
|
32 |
+
using Col = ck::tensor_layout::gemm::ColumnMajor;
|
33 |
+
|
34 |
+
using A0DataType = BF16;
|
35 |
+
using AsDataType = ck::Tuple<A0DataType>;
|
36 |
+
using B0DataType = I8;
|
37 |
+
using B1DataType = BF16;
|
38 |
+
using BsDataType = ck::Tuple<B0DataType, B1DataType>;
|
39 |
+
using AccDataType = F32;
|
40 |
+
using CShuffleDataType = BF16;
|
41 |
+
using D0DataType = BF16;
|
42 |
+
using DsDataType = ck::Tuple<D0DataType>;
|
43 |
+
using EDataType = BF16;
|
44 |
+
|
45 |
+
using A0Layout = Row;
|
46 |
+
using AsLayout = ck::Tuple<A0Layout>;
|
47 |
+
using B0Layout = Row;
|
48 |
+
using B1Layout = B0Layout;
|
49 |
+
using BsLayout = ck::Tuple<B0Layout, B1Layout>;
|
50 |
+
using D0Layout = Row;
|
51 |
+
using DsLayout = ck::Tuple<D0Layout>;
|
52 |
+
using ELayout = Row;
|
53 |
+
|
54 |
+
using Multiply = ck::tensor_operation::element_wise::Multiply;
|
55 |
+
using PassThrough = ck::tensor_operation::element_wise::PassThrough;
|
56 |
+
using AddFastGelu = ck::tensor_operation::element_wise::AddFastGelu;
|
57 |
+
|
58 |
+
using AElementOp = PassThrough;
|
59 |
+
using BElementOp = Multiply;
|
60 |
+
using CDEElementOp = AddFastGelu;
|
61 |
+
|
62 |
+
static constexpr auto GemmSpec = ck::tensor_operation::device::GemmSpecialization::Default;
|
63 |
+
|
64 |
+
using DeviceOpInstance = ck::tensor_operation::device::DeviceGemmMultipleABD_Xdl_CShuffle
|
65 |
+
// clang-format off
|
66 |
+
///######| ALayout| BLayout| DsLayout| ELayout| AData| BData| AccData| CShuffle| DsData| EData| A| B| CDE| GEMM| NumGemmK| Block| MPer| NPer| KPer| AK1| BK1| MPer| NPer| MXdl| NXdl| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockLds| BBlockTransfer| BBlockTransfer| BBlockTransfer| BlockTransfer| BBlockTransfer| BBlockTransfer| BBlockLds| CShuffle| CShuffle| CBlockTransferClusterLengths| CBlockTransfer|
|
67 |
+
///######| | | | | Type| Type| Type| DataType| Type| Type| Elementwise| Elementwise| Elementwise| Spacialization| Prefetch| Size| Block| Block| Block| | | XDL| XDL| Per| Per| ThreadCluster| ThreadCluster| SrcAccessOrder| SrcVectorDim| SrcScalar| DstScalar| AddExtraM| ThreadCluster| ThreadCluster| SrcAccessOrder| SrcVectorDim| SrcScalar| DstScalar| AddExtraN| MXdlPerWave| NXdlPerWave| _MBlock_MWaveMPerXdl| ScalarPerVector|
|
68 |
+
///######| | | | | | | | | | | Operation| Operation| Operation| | Stage| | | | | | | | | Wave| Wave| Lengths_K0_M_K1| ArrangeOrder| | | PerVector| PerVector_K1| | Lengths_K0_N_K1| ArrangeOrder| | | PerVector| PerVector_K1| | PerShuffle| PerShuffle| _NBlock_NWaveNPerXdl| _NWaveNPerXdl|
|
69 |
+
///######| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
|
70 |
+
< AsLayout, BsLayout, DsLayout, ELayout, AsDataType, BsDataType, AccDataType, CShuffleDataType, DsDataType, EDataType, AElementOp, BElementOp, CDEElementOp, GemmSpec, 1, 256, 128, 128, 64, 8, 4, 32, 32, 2, 2, S<8, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 0, S<16, 16, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 8, 4, 0, 1, 1, S<1, 32, 1, 8>, 8, ck::BlockGemmPipelineScheduler::Intrawave, ck::BlockGemmPipelineVersion::v4>;
|
71 |
+
// clang-format on
|
72 |
+
|
73 |
+
int main(int argc, char* argv[])
|
74 |
+
{
|
75 |
+
bool do_verification = true;
|
76 |
+
int init_method = 1;
|
77 |
+
bool time_kernel = false;
|
78 |
+
|
79 |
+
// GEMM shape
|
80 |
+
ck::index_t M = 4096;
|
81 |
+
ck::index_t N = 768;
|
82 |
+
ck::index_t K = 6144;
|
83 |
+
|
84 |
+
ck::index_t StrideA = K;
|
85 |
+
ck::index_t StrideB = N;
|
86 |
+
ck::index_t StrideD = 0;
|
87 |
+
ck::index_t StrideE = N;
|
88 |
+
|
89 |
+
if(argc == 1)
|
90 |
+
{
|
91 |
+
// use default case
|
92 |
+
}
|
93 |
+
else if(argc == 4)
|
94 |
+
{
|
95 |
+
do_verification = std::stoi(argv[1]);
|
96 |
+
init_method = std::stoi(argv[2]);
|
97 |
+
time_kernel = std::stoi(argv[3]);
|
98 |
+
}
|
99 |
+
else if(argc == 11)
|
100 |
+
{
|
101 |
+
do_verification = std::stoi(argv[1]);
|
102 |
+
init_method = std::stoi(argv[2]);
|
103 |
+
time_kernel = std::stoi(argv[3]);
|
104 |
+
|
105 |
+
M = std::stoi(argv[4]);
|
106 |
+
N = std::stoi(argv[5]);
|
107 |
+
K = std::stoi(argv[6]);
|
108 |
+
|
109 |
+
StrideA = std::stoi(argv[7]);
|
110 |
+
StrideB = std::stoi(argv[8]);
|
111 |
+
StrideD = std::stoi(argv[9]);
|
112 |
+
StrideE = std::stoi(argv[10]);
|
113 |
+
}
|
114 |
+
else
|
115 |
+
{
|
116 |
+
printf("arg1: verification (0=no, 1=yes)\n");
|
117 |
+
printf("arg2: initialization (0=no init, 1=integer value, 2=decimal value)\n");
|
118 |
+
printf("arg3: time kernel (0=no, 1=yes)\n");
|
119 |
+
printf("arg4 to 9: M (256x), N(128x), K(32x), StrideA, StrideB, StrideD, StrideE\n");
|
120 |
+
exit(0);
|
121 |
+
}
|
122 |
+
|
123 |
+
auto f_host_tensor_descriptor =
|
124 |
+
[](std::size_t row, std::size_t col, std::size_t stride, auto layout) {
|
125 |
+
using namespace ck::literals;
|
126 |
+
|
127 |
+
if(std::is_same<decltype(layout), ck::tensor_layout::gemm::RowMajor>::value)
|
128 |
+
{
|
129 |
+
return HostTensorDescriptor({row, col}, {stride, 1_uz});
|
130 |
+
}
|
131 |
+
else
|
132 |
+
{
|
133 |
+
return HostTensorDescriptor({row, col}, {1_uz, stride});
|
134 |
+
}
|
135 |
+
};
|
136 |
+
|
137 |
+
Tensor<A0DataType> a0_m_k(f_host_tensor_descriptor(M, K, StrideA, A0Layout{}));
|
138 |
+
Tensor<B0DataType> b0_k_n(f_host_tensor_descriptor(K, N, StrideB, B0Layout{}));
|
139 |
+
Tensor<B1DataType> b1_k_n(f_host_tensor_descriptor(K, N, 0, B1Layout{}));
|
140 |
+
Tensor<D0DataType> d_m_n(f_host_tensor_descriptor(M, N, StrideD, D0Layout{}));
|
141 |
+
Tensor<EDataType> e_m_n_host_result(f_host_tensor_descriptor(M, N, StrideE, ELayout{}));
|
142 |
+
Tensor<EDataType> e_m_n_device_result(f_host_tensor_descriptor(M, N, StrideE, ELayout{}));
|
143 |
+
|
144 |
+
std::cout << "a0_m_k: " << a0_m_k.mDesc << std::endl;
|
145 |
+
std::cout << "b0_k_n: " << b0_k_n.mDesc << std::endl;
|
146 |
+
std::cout << "b1_k_n: " << b1_k_n.mDesc << std::endl;
|
147 |
+
std::cout << "d_m_n: " << d_m_n.mDesc << std::endl;
|
148 |
+
std::cout << "e_m_n: " << e_m_n_host_result.mDesc << std::endl;
|
149 |
+
|
150 |
+
switch(init_method)
|
151 |
+
{
|
152 |
+
case 0: break;
|
153 |
+
case 1:
|
154 |
+
a0_m_k.GenerateTensorValue(GeneratorTensor_2<A0DataType>{-5, 5});
|
155 |
+
b0_k_n.GenerateTensorValue(GeneratorTensor_2<B0DataType>{-5, 5});
|
156 |
+
b1_k_n.GenerateTensorValue(GeneratorTensor_2<B1DataType>{0, 5});
|
157 |
+
d_m_n.GenerateTensorValue(GeneratorTensor_2<D0DataType>{-5, 5});
|
158 |
+
break;
|
159 |
+
default:
|
160 |
+
a0_m_k.GenerateTensorValue(GeneratorTensor_3<A0DataType>{0.0, 1.0});
|
161 |
+
b0_k_n.GenerateTensorValue(GeneratorTensor_2<B0DataType>{-5, 5});
|
162 |
+
b1_k_n.GenerateTensorValue(GeneratorTensor_3<B1DataType>{0, 5});
|
163 |
+
d_m_n.GenerateTensorValue(GeneratorTensor_3<D0DataType>{-0.5, 0.5});
|
164 |
+
}
|
165 |
+
|
166 |
+
DeviceMem a0_device_buf(sizeof(A0DataType) * a0_m_k.mDesc.GetElementSpaceSize());
|
167 |
+
DeviceMem b0_device_buf(sizeof(B0DataType) * b0_k_n.mDesc.GetElementSpaceSize());
|
168 |
+
DeviceMem b1_device_buf(sizeof(B1DataType) * b1_k_n.mDesc.GetElementSpaceSize());
|
169 |
+
DeviceMem d_device_buf(sizeof(D0DataType) * d_m_n.mDesc.GetElementSpaceSize());
|
170 |
+
DeviceMem e_device_buf(sizeof(EDataType) * e_m_n_device_result.mDesc.GetElementSpaceSize());
|
171 |
+
|
172 |
+
a0_device_buf.ToDevice(a0_m_k.mData.data());
|
173 |
+
b0_device_buf.ToDevice(b0_k_n.mData.data());
|
174 |
+
b1_device_buf.ToDevice(b1_k_n.mData.data());
|
175 |
+
d_device_buf.ToDevice(d_m_n.mData.data());
|
176 |
+
e_device_buf.ToDevice(e_m_n_device_result.mData.data());
|
177 |
+
|
178 |
+
auto a_element_op = AElementOp{};
|
179 |
+
auto b_element_op = BElementOp{};
|
180 |
+
auto cde_element_op = CDEElementOp{};
|
181 |
+
|
182 |
+
constexpr ck::index_t NumATensor = 1;
|
183 |
+
constexpr ck::index_t NumBTensor = 2;
|
184 |
+
constexpr ck::index_t NumDTensor = 1;
|
185 |
+
|
186 |
+
// do GEMM
|
187 |
+
auto device_op = DeviceOpInstance{};
|
188 |
+
auto invoker = device_op.MakeInvoker();
|
189 |
+
auto argument =
|
190 |
+
device_op.MakeArgument(std::array<const void*, NumATensor>{a0_device_buf.GetDeviceBuffer()},
|
191 |
+
std::array<const void*, NumBTensor>{b0_device_buf.GetDeviceBuffer(),
|
192 |
+
b1_device_buf.GetDeviceBuffer()},
|
193 |
+
std::array<const void*, NumDTensor>{d_device_buf.GetDeviceBuffer()},
|
194 |
+
e_device_buf.GetDeviceBuffer(),
|
195 |
+
M,
|
196 |
+
N,
|
197 |
+
K,
|
198 |
+
std::array<ck::index_t, NumATensor>{StrideA},
|
199 |
+
std::array<ck::index_t, NumBTensor>{StrideB, 0},
|
200 |
+
std::array<ck::index_t, NumDTensor>{StrideD},
|
201 |
+
StrideE,
|
202 |
+
a_element_op,
|
203 |
+
b_element_op,
|
204 |
+
cde_element_op);
|
205 |
+
|
206 |
+
if(!device_op.IsSupportedArgument(argument))
|
207 |
+
{
|
208 |
+
throw std::runtime_error(
|
209 |
+
"wrong! device_gemm with the specified compilation parameters does "
|
210 |
+
"not support this GEMM problem");
|
211 |
+
}
|
212 |
+
|
213 |
+
float ave_time = invoker.Run(argument, StreamConfig{nullptr, time_kernel});
|
214 |
+
|
215 |
+
std::size_t flop = std::size_t(2) * M * N * K;
|
216 |
+
std::size_t num_btype =
|
217 |
+
sizeof(A0DataType) * M * K + sizeof(B0DataType) * K * N + sizeof(EDataType) * M * N;
|
218 |
+
|
219 |
+
float tflops = static_cast<float>(flop) / 1.E9 / ave_time;
|
220 |
+
|
221 |
+
float gb_per_sec = num_btype / 1.E6 / ave_time;
|
222 |
+
|
223 |
+
std::cout << "Perf: " << ave_time << " ms, " << tflops << " TFlops, " << gb_per_sec << " GB/s"
|
224 |
+
<< std::endl;
|
225 |
+
|
226 |
+
e_device_buf.FromDevice(e_m_n_device_result.mData.data());
|
227 |
+
|
228 |
+
if(do_verification)
|
229 |
+
{
|
230 |
+
Tensor<CShuffleDataType> c_m_n({M, N});
|
231 |
+
|
232 |
+
Tensor<A0DataType> a_m_k({M, K});
|
233 |
+
|
234 |
+
Tensor<B1DataType> b_k_n(f_host_tensor_descriptor(K, N, StrideB, B0Layout{}));
|
235 |
+
|
236 |
+
for(int n = 0; n < N; ++n)
|
237 |
+
{
|
238 |
+
for(int k = 0; k < K; ++k)
|
239 |
+
{
|
240 |
+
b_element_op(b_k_n(k, n), b0_k_n(k, n), b1_k_n(k, n));
|
241 |
+
}
|
242 |
+
}
|
243 |
+
|
244 |
+
using ReferenceGemmInstance = ck::tensor_operation::host::ReferenceGemm<A0DataType,
|
245 |
+
B1DataType,
|
246 |
+
CShuffleDataType,
|
247 |
+
AccDataType,
|
248 |
+
PassThrough,
|
249 |
+
PassThrough,
|
250 |
+
PassThrough>;
|
251 |
+
auto ref_gemm = ReferenceGemmInstance{};
|
252 |
+
auto ref_invoker = ref_gemm.MakeInvoker();
|
253 |
+
|
254 |
+
auto ref_argument = ref_gemm.MakeArgument(
|
255 |
+
a0_m_k, b_k_n, c_m_n, PassThrough{}, PassThrough{}, PassThrough{});
|
256 |
+
|
257 |
+
ref_invoker.Run(ref_argument);
|
258 |
+
|
259 |
+
for(int m = 0; m < M; ++m)
|
260 |
+
{
|
261 |
+
for(int n = 0; n < N; ++n)
|
262 |
+
{
|
263 |
+
cde_element_op(e_m_n_host_result(m, n), c_m_n(m, n), d_m_n(m, n));
|
264 |
+
}
|
265 |
+
}
|
266 |
+
|
267 |
+
e_device_buf.FromDevice(e_m_n_device_result.mData.data());
|
268 |
+
|
269 |
+
return ck::utils::check_err(e_m_n_device_result, e_m_n_host_result) ? 0 : 1;
|
270 |
+
}
|
271 |
+
|
272 |
+
return 0;
|
273 |
+
}
|
sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/composable_kernels/example/60_gemm_multi_ABD/gemm_multi_ABD_xdl_fastgelu_bf16_i8.cpp
ADDED
@@ -0,0 +1,273 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// SPDX-License-Identifier: MIT
|
2 |
+
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
|
3 |
+
|
4 |
+
#include <iostream>
|
5 |
+
#include <numeric>
|
6 |
+
#include <initializer_list>
|
7 |
+
#include <cstdlib>
|
8 |
+
|
9 |
+
#include "ck/ck.hpp"
|
10 |
+
#include "ck/tensor_operation/gpu/device/gemm_specialization.hpp"
|
11 |
+
#include "ck/tensor_operation/gpu/device/impl/device_gemm_multiple_abd_xdl_cshuffle.hpp"
|
12 |
+
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"
|
13 |
+
|
14 |
+
#include "ck/library/utility/device_memory.hpp"
|
15 |
+
#include "ck/library/utility/host_tensor.hpp"
|
16 |
+
#include "ck/library/utility/host_tensor_generator.hpp"
|
17 |
+
#include "ck/library/utility/literals.hpp"
|
18 |
+
#include "ck/library/reference_tensor_operation/cpu/reference_gemm.hpp"
|
19 |
+
#include "ck/library/utility/check_err.hpp"
|
20 |
+
|
21 |
+
#include "ck/utility/blkgemmpipe_scheduler.hpp"
|
22 |
+
|
23 |
+
template <ck::index_t... Is>
|
24 |
+
using S = ck::Sequence<Is...>;
|
25 |
+
|
26 |
+
using F16 = ck::half_t;
|
27 |
+
using BF16 = ck::bhalf_t;
|
28 |
+
using I8 = int8_t;
|
29 |
+
using F32 = float;
|
30 |
+
|
31 |
+
using Row = ck::tensor_layout::gemm::RowMajor;
|
32 |
+
using Col = ck::tensor_layout::gemm::ColumnMajor;
|
33 |
+
|
34 |
+
using A0DataType = BF16;
|
35 |
+
using AsDataType = ck::Tuple<A0DataType>;
|
36 |
+
using B0DataType = I8;
|
37 |
+
using B1DataType = BF16;
|
38 |
+
using BsDataType = ck::Tuple<B0DataType, B1DataType>;
|
39 |
+
using AccDataType = F32;
|
40 |
+
using CShuffleDataType = F32;
|
41 |
+
using D0DataType = BF16;
|
42 |
+
using DsDataType = ck::Tuple<>;
|
43 |
+
using EDataType = BF16;
|
44 |
+
|
45 |
+
using A0Layout = Row;
|
46 |
+
using AsLayout = ck::Tuple<A0Layout>;
|
47 |
+
using B0Layout = Row;
|
48 |
+
using B1Layout = B0Layout;
|
49 |
+
using BsLayout = ck::Tuple<B0Layout, B1Layout>;
|
50 |
+
using D0Layout = Row;
|
51 |
+
using DsLayout = ck::Tuple<>;
|
52 |
+
using ELayout = Row;
|
53 |
+
|
54 |
+
using Multiply = ck::tensor_operation::element_wise::Multiply;
|
55 |
+
using PassThrough = ck::tensor_operation::element_wise::PassThrough;
|
56 |
+
using FastGelu = ck::tensor_operation::element_wise::FastGelu;
|
57 |
+
|
58 |
+
using AElementOp = PassThrough;
|
59 |
+
using BElementOp = Multiply;
|
60 |
+
using CDEElementOp = FastGelu;
|
61 |
+
|
62 |
+
static constexpr auto GemmSpec = ck::tensor_operation::device::GemmSpecialization::Default;
|
63 |
+
|
64 |
+
using DeviceOpInstance = ck::tensor_operation::device::DeviceGemmMultipleABD_Xdl_CShuffle
|
65 |
+
// clang-format off
|
66 |
+
///######| ALayout| BLayout| DsLayout| ELayout| AData| BData| AccData| CShuffle| DsData| EData| A| B| CDE| GEMM| NumGemmK| Block| MPer| NPer| KPer| AK1| BK1| MPer| NPer| MXdl| NXdl| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockLds| BBlockTransfer| BBlockTransfer| BBlockTransfer| BlockTransfer| BBlockTransfer| BBlockTransfer| BBlockLds| CShuffle| CShuffle| CBlockTransferClusterLengths| CBlockTransfer|
|
67 |
+
///######| | | | | Type| Type| Type| DataType| Type| Type| Elementwise| Elementwise| Elementwise| Spacialization| Prefetch| Size| Block| Block| Block| | | XDL| XDL| Per| Per| ThreadCluster| ThreadCluster| SrcAccessOrder| SrcVectorDim| SrcScalar| DstScalar| AddExtraM| ThreadCluster| ThreadCluster| SrcAccessOrder| SrcVectorDim| SrcScalar| DstScalar| AddExtraN| MXdlPerWave| NXdlPerWave| _MBlock_MWaveMPerXdl| ScalarPerVector|
|
68 |
+
///######| | | | | | | | | | | Operation| Operation| Operation| | Stage| | | | | | | | | Wave| Wave| Lengths_K0_M_K1| ArrangeOrder| | | PerVector| PerVector_K1| | Lengths_K0_N_K1| ArrangeOrder| | | PerVector| PerVector_K1| | PerShuffle| PerShuffle| _NBlock_NWaveNPerXdl| _NWaveNPerXdl|
|
69 |
+
///######| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
|
70 |
+
< AsLayout, BsLayout, DsLayout, ELayout, AsDataType, BsDataType, AccDataType, CShuffleDataType, DsDataType, EDataType, AElementOp, BElementOp, CDEElementOp, GemmSpec, 1, 256, 128, 128, 64, 8, 4, 32, 32, 2, 2, S<8, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 0, S<16, 16, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 8, 4, 0, 1, 1, S<1, 32, 1, 8>, 8, ck::BlockGemmPipelineScheduler::Intrawave, ck::BlockGemmPipelineVersion::v4>;
|
71 |
+
// clang-format on
|
72 |
+
|
73 |
+
int main(int argc, char* argv[])
|
74 |
+
{
|
75 |
+
bool do_verification = true;
|
76 |
+
int init_method = 1;
|
77 |
+
bool time_kernel = false;
|
78 |
+
|
79 |
+
// GEMM shape
|
80 |
+
ck::index_t M = 4096;
|
81 |
+
ck::index_t N = 768;
|
82 |
+
ck::index_t K = 6144;
|
83 |
+
|
84 |
+
ck::index_t StrideA = K;
|
85 |
+
ck::index_t StrideB = N;
|
86 |
+
ck::index_t StrideD = 0;
|
87 |
+
ck::index_t StrideE = N;
|
88 |
+
|
89 |
+
if(argc == 1)
|
90 |
+
{
|
91 |
+
// use default case
|
92 |
+
}
|
93 |
+
else if(argc == 4)
|
94 |
+
{
|
95 |
+
do_verification = std::stoi(argv[1]);
|
96 |
+
init_method = std::stoi(argv[2]);
|
97 |
+
time_kernel = std::stoi(argv[3]);
|
98 |
+
}
|
99 |
+
else if(argc == 11)
|
100 |
+
{
|
101 |
+
do_verification = std::stoi(argv[1]);
|
102 |
+
init_method = std::stoi(argv[2]);
|
103 |
+
time_kernel = std::stoi(argv[3]);
|
104 |
+
|
105 |
+
M = std::stoi(argv[4]);
|
106 |
+
N = std::stoi(argv[5]);
|
107 |
+
K = std::stoi(argv[6]);
|
108 |
+
|
109 |
+
StrideA = std::stoi(argv[7]);
|
110 |
+
StrideB = std::stoi(argv[8]);
|
111 |
+
StrideD = std::stoi(argv[9]);
|
112 |
+
StrideE = std::stoi(argv[10]);
|
113 |
+
}
|
114 |
+
else
|
115 |
+
{
|
116 |
+
printf("arg1: verification (0=no, 1=yes)\n");
|
117 |
+
printf("arg2: initialization (0=no init, 1=integer value, 2=decimal value)\n");
|
118 |
+
printf("arg3: time kernel (0=no, 1=yes)\n");
|
119 |
+
printf("arg4 to 9: M (256x), N(128x), K(32x), StrideA, StrideB, StrideD, StrideE\n");
|
120 |
+
exit(0);
|
121 |
+
}
|
122 |
+
|
123 |
+
auto f_host_tensor_descriptor =
|
124 |
+
[](std::size_t row, std::size_t col, std::size_t stride, auto layout) {
|
125 |
+
using namespace ck::literals;
|
126 |
+
|
127 |
+
if(std::is_same<decltype(layout), ck::tensor_layout::gemm::RowMajor>::value)
|
128 |
+
{
|
129 |
+
return HostTensorDescriptor({row, col}, {stride, 1_uz});
|
130 |
+
}
|
131 |
+
else
|
132 |
+
{
|
133 |
+
return HostTensorDescriptor({row, col}, {1_uz, stride});
|
134 |
+
}
|
135 |
+
};
|
136 |
+
|
137 |
+
Tensor<A0DataType> a0_m_k(f_host_tensor_descriptor(M, K, StrideA, A0Layout{}));
|
138 |
+
Tensor<B0DataType> b0_k_n(f_host_tensor_descriptor(K, N, StrideB, B0Layout{}));
|
139 |
+
Tensor<B1DataType> b1_k_n(f_host_tensor_descriptor(K, N, 0, B1Layout{}));
|
140 |
+
Tensor<D0DataType> d_m_n(f_host_tensor_descriptor(M, N, StrideD, D0Layout{}));
|
141 |
+
Tensor<EDataType> e_m_n_host_result(f_host_tensor_descriptor(M, N, StrideE, ELayout{}));
|
142 |
+
Tensor<EDataType> e_m_n_device_result(f_host_tensor_descriptor(M, N, StrideE, ELayout{}));
|
143 |
+
|
144 |
+
std::cout << "a0_m_k: " << a0_m_k.mDesc << std::endl;
|
145 |
+
std::cout << "b0_k_n: " << b0_k_n.mDesc << std::endl;
|
146 |
+
std::cout << "b1_k_n: " << b1_k_n.mDesc << std::endl;
|
147 |
+
std::cout << "d_m_n: " << d_m_n.mDesc << std::endl;
|
148 |
+
std::cout << "e_m_n: " << e_m_n_host_result.mDesc << std::endl;
|
149 |
+
|
150 |
+
switch(init_method)
|
151 |
+
{
|
152 |
+
case 0: break;
|
153 |
+
case 1:
|
154 |
+
a0_m_k.GenerateTensorValue(GeneratorTensor_2<A0DataType>{-5, 5});
|
155 |
+
b0_k_n.GenerateTensorValue(GeneratorTensor_2<B0DataType>{-5, 5});
|
156 |
+
b1_k_n.GenerateTensorValue(GeneratorTensor_2<B1DataType>{0, 5});
|
157 |
+
d_m_n.GenerateTensorValue(GeneratorTensor_2<D0DataType>{-5, 5});
|
158 |
+
break;
|
159 |
+
default:
|
160 |
+
a0_m_k.GenerateTensorValue(GeneratorTensor_3<A0DataType>{0.0, 1.0});
|
161 |
+
b0_k_n.GenerateTensorValue(GeneratorTensor_2<B0DataType>{-5, 5});
|
162 |
+
b1_k_n.GenerateTensorValue(GeneratorTensor_3<B1DataType>{0, 5});
|
163 |
+
d_m_n.GenerateTensorValue(GeneratorTensor_3<D0DataType>{-0.5, 0.5});
|
164 |
+
}
|
165 |
+
|
166 |
+
DeviceMem a0_device_buf(sizeof(A0DataType) * a0_m_k.mDesc.GetElementSpaceSize());
|
167 |
+
DeviceMem b0_device_buf(sizeof(B0DataType) * b0_k_n.mDesc.GetElementSpaceSize());
|
168 |
+
DeviceMem b1_device_buf(sizeof(B1DataType) * b1_k_n.mDesc.GetElementSpaceSize());
|
169 |
+
DeviceMem d_device_buf(sizeof(D0DataType) * d_m_n.mDesc.GetElementSpaceSize());
|
170 |
+
DeviceMem e_device_buf(sizeof(EDataType) * e_m_n_device_result.mDesc.GetElementSpaceSize());
|
171 |
+
|
172 |
+
a0_device_buf.ToDevice(a0_m_k.mData.data());
|
173 |
+
b0_device_buf.ToDevice(b0_k_n.mData.data());
|
174 |
+
b1_device_buf.ToDevice(b1_k_n.mData.data());
|
175 |
+
d_device_buf.ToDevice(d_m_n.mData.data());
|
176 |
+
e_device_buf.ToDevice(e_m_n_device_result.mData.data());
|
177 |
+
|
178 |
+
auto a_element_op = AElementOp{};
|
179 |
+
auto b_element_op = BElementOp{};
|
180 |
+
auto cde_element_op = CDEElementOp{};
|
181 |
+
|
182 |
+
constexpr ck::index_t NumATensor = 1;
|
183 |
+
constexpr ck::index_t NumBTensor = 2;
|
184 |
+
constexpr ck::index_t NumDTensor = 0;
|
185 |
+
|
186 |
+
// do GEMM
|
187 |
+
auto device_op = DeviceOpInstance{};
|
188 |
+
auto invoker = device_op.MakeInvoker();
|
189 |
+
auto argument =
|
190 |
+
device_op.MakeArgument(std::array<const void*, NumATensor>{a0_device_buf.GetDeviceBuffer()},
|
191 |
+
std::array<const void*, NumBTensor>{b0_device_buf.GetDeviceBuffer(),
|
192 |
+
b1_device_buf.GetDeviceBuffer()},
|
193 |
+
std::array<const void*, NumDTensor>{},
|
194 |
+
e_device_buf.GetDeviceBuffer(),
|
195 |
+
M,
|
196 |
+
N,
|
197 |
+
K,
|
198 |
+
std::array<ck::index_t, NumATensor>{StrideA},
|
199 |
+
std::array<ck::index_t, NumBTensor>{StrideB, 0},
|
200 |
+
std::array<ck::index_t, NumDTensor>{},
|
201 |
+
StrideE,
|
202 |
+
a_element_op,
|
203 |
+
b_element_op,
|
204 |
+
cde_element_op);
|
205 |
+
|
206 |
+
if(!device_op.IsSupportedArgument(argument))
|
207 |
+
{
|
208 |
+
throw std::runtime_error(
|
209 |
+
"wrong! device_gemm with the specified compilation parameters does "
|
210 |
+
"not support this GEMM problem");
|
211 |
+
}
|
212 |
+
|
213 |
+
float ave_time = invoker.Run(argument, StreamConfig{nullptr, time_kernel});
|
214 |
+
|
215 |
+
std::size_t flop = std::size_t(2) * M * N * K;
|
216 |
+
std::size_t num_btype =
|
217 |
+
sizeof(A0DataType) * M * K + sizeof(B0DataType) * K * N + sizeof(EDataType) * M * N;
|
218 |
+
|
219 |
+
float tflops = static_cast<float>(flop) / 1.E9 / ave_time;
|
220 |
+
|
221 |
+
float gb_per_sec = num_btype / 1.E6 / ave_time;
|
222 |
+
|
223 |
+
std::cout << "Perf: " << ave_time << " ms, " << tflops << " TFlops, " << gb_per_sec << " GB/s"
|
224 |
+
<< std::endl;
|
225 |
+
|
226 |
+
e_device_buf.FromDevice(e_m_n_device_result.mData.data());
|
227 |
+
|
228 |
+
if(do_verification)
|
229 |
+
{
|
230 |
+
Tensor<CShuffleDataType> c_m_n({M, N});
|
231 |
+
|
232 |
+
Tensor<A0DataType> a_m_k({M, K});
|
233 |
+
|
234 |
+
Tensor<B1DataType> b_k_n(f_host_tensor_descriptor(K, N, StrideB, B0Layout{}));
|
235 |
+
|
236 |
+
for(int n = 0; n < N; ++n)
|
237 |
+
{
|
238 |
+
for(int k = 0; k < K; ++k)
|
239 |
+
{
|
240 |
+
b_element_op(b_k_n(k, n), b0_k_n(k, n), b1_k_n(k, n));
|
241 |
+
}
|
242 |
+
}
|
243 |
+
|
244 |
+
using ReferenceGemmInstance = ck::tensor_operation::host::ReferenceGemm<A0DataType,
|
245 |
+
B1DataType,
|
246 |
+
CShuffleDataType,
|
247 |
+
AccDataType,
|
248 |
+
PassThrough,
|
249 |
+
PassThrough,
|
250 |
+
PassThrough>;
|
251 |
+
auto ref_gemm = ReferenceGemmInstance{};
|
252 |
+
auto ref_invoker = ref_gemm.MakeInvoker();
|
253 |
+
|
254 |
+
auto ref_argument = ref_gemm.MakeArgument(
|
255 |
+
a0_m_k, b_k_n, c_m_n, PassThrough{}, PassThrough{}, PassThrough{});
|
256 |
+
|
257 |
+
ref_invoker.Run(ref_argument);
|
258 |
+
|
259 |
+
for(int m = 0; m < M; ++m)
|
260 |
+
{
|
261 |
+
for(int n = 0; n < N; ++n)
|
262 |
+
{
|
263 |
+
cde_element_op(e_m_n_host_result(m, n), c_m_n(m, n));
|
264 |
+
}
|
265 |
+
}
|
266 |
+
|
267 |
+
e_device_buf.FromDevice(e_m_n_device_result.mData.data());
|
268 |
+
|
269 |
+
return ck::utils::check_err(e_m_n_device_result, e_m_n_host_result) ? 0 : 1;
|
270 |
+
}
|
271 |
+
|
272 |
+
return 0;
|
273 |
+
}
|
sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/composable_kernels/example/60_gemm_multi_ABD/gemm_multi_ABD_xdl_fp16.cpp
ADDED
@@ -0,0 +1,363 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// SPDX-License-Identifier: MIT
|
2 |
+
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
|
3 |
+
|
4 |
+
#include <iostream>
|
5 |
+
#include <numeric>
|
6 |
+
#include <initializer_list>
|
7 |
+
#include <cstdlib>
|
8 |
+
|
9 |
+
#include "ck/ck.hpp"
|
10 |
+
#include "ck/tensor_operation/gpu/device/gemm_specialization.hpp"
|
11 |
+
#include "ck/tensor_operation/gpu/device/impl/device_gemm_multiple_abd_xdl_cshuffle.hpp"
|
12 |
+
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"
|
13 |
+
|
14 |
+
#include "ck/library/utility/device_memory.hpp"
|
15 |
+
#include "ck/library/utility/host_tensor.hpp"
|
16 |
+
#include "ck/library/utility/host_tensor_generator.hpp"
|
17 |
+
#include "ck/library/utility/literals.hpp"
|
18 |
+
#include "ck/library/reference_tensor_operation/cpu/reference_gemm.hpp"
|
19 |
+
#include "ck/library/utility/check_err.hpp"
|
20 |
+
|
21 |
+
template <ck::index_t... Is>
|
22 |
+
using S = ck::Sequence<Is...>;
|
23 |
+
|
24 |
+
using F16 = ck::half_t;
|
25 |
+
using F32 = float;
|
26 |
+
|
27 |
+
using Row = ck::tensor_layout::gemm::RowMajor;
|
28 |
+
using Col = ck::tensor_layout::gemm::ColumnMajor;
|
29 |
+
|
30 |
+
using PassThrough = ck::tensor_operation::element_wise::PassThrough;
|
31 |
+
|
32 |
+
using ADataType = F16;
|
33 |
+
using BDataType = F16;
|
34 |
+
using AccDataType = F32;
|
35 |
+
using CShuffleDataType = F32;
|
36 |
+
using DDataType = F16;
|
37 |
+
using EDataType = F16;
|
38 |
+
|
39 |
+
using ALayout = Row;
|
40 |
+
using BLayout = Row;
|
41 |
+
using DLayout = Row;
|
42 |
+
using ELayout = Row;
|
43 |
+
|
44 |
+
struct AddScale
|
45 |
+
{
|
46 |
+
static constexpr auto I0 = ck::Number<0>{};
|
47 |
+
static constexpr auto I1 = ck::Number<1>{};
|
48 |
+
static constexpr auto I2 = ck::Number<2>{};
|
49 |
+
static constexpr auto I3 = ck::Number<3>{};
|
50 |
+
|
51 |
+
__host__ __device__ constexpr void
|
52 |
+
operator()(ck::half4_t& a, const ck::half4_t& a0, const ck::half4_t& a1) const
|
53 |
+
{
|
54 |
+
const auto a0_v_t = ck::vector_type<ck::half_t, 4>{a0};
|
55 |
+
const auto a1_v_t = ck::vector_type<ck::half_t, 4>{a1};
|
56 |
+
|
57 |
+
auto r_v_t = ck::vector_type<ck::half_t, 4>{};
|
58 |
+
|
59 |
+
r_v_t.AsType<ck::half_t>()(I0) =
|
60 |
+
scale * (a0_v_t.AsType<ck::half_t>()[I0] + a1_v_t.AsType<ck::half_t>()[I0]);
|
61 |
+
r_v_t.AsType<ck::half_t>()(I1) =
|
62 |
+
scale * (a0_v_t.AsType<ck::half_t>()[I1] + a1_v_t.AsType<ck::half_t>()[I1]);
|
63 |
+
r_v_t.AsType<ck::half_t>()(I2) =
|
64 |
+
scale * (a0_v_t.AsType<ck::half_t>()[I2] + a1_v_t.AsType<ck::half_t>()[I2]);
|
65 |
+
r_v_t.AsType<ck::half_t>()(I3) =
|
66 |
+
scale * (a0_v_t.AsType<ck::half_t>()[I3] + a1_v_t.AsType<ck::half_t>()[I3]);
|
67 |
+
|
68 |
+
a = r_v_t.AsType<ck::half4_t>()[I0];
|
69 |
+
}
|
70 |
+
|
71 |
+
__host__ __device__ constexpr void
|
72 |
+
operator()(ck::half_t& a, const ck::half_t& a0, const ck::half_t& a1) const
|
73 |
+
{
|
74 |
+
a = scale * (a0 + a1);
|
75 |
+
}
|
76 |
+
|
77 |
+
// this attribute controls the copy_function applying element_wise_op with
|
78 |
+
// pack4_data
|
79 |
+
constexpr const static bool is_pack4_invocable = true;
|
80 |
+
|
81 |
+
float scale = 1.0;
|
82 |
+
};
|
83 |
+
|
84 |
+
struct AlphaBetaAdd
|
85 |
+
{
|
86 |
+
AlphaBetaAdd(float alpha, float beta) : alpha_(alpha), beta_(beta){};
|
87 |
+
|
88 |
+
template <typename E, typename C, typename D>
|
89 |
+
__host__ __device__ constexpr void operator()(E& e, const C& c, const D& d) const;
|
90 |
+
|
91 |
+
template <>
|
92 |
+
__host__ __device__ constexpr void operator()<ck::half_t, float, ck::half_t>(
|
93 |
+
ck::half_t& e, const float& c, const ck::half_t& d) const
|
94 |
+
{
|
95 |
+
e = ck::type_convert<ck::half_t>(alpha_ * c + beta_ * ck::type_convert<float>(d));
|
96 |
+
};
|
97 |
+
|
98 |
+
float alpha_;
|
99 |
+
float beta_;
|
100 |
+
};
|
101 |
+
|
102 |
+
using AElementOp = AddScale;
|
103 |
+
using BElementOp = PassThrough;
|
104 |
+
using CDEElementOp = AlphaBetaAdd;
|
105 |
+
|
106 |
+
static constexpr auto GemmSpec = ck::tensor_operation::device::GemmSpecialization::MNKPadding;
|
107 |
+
|
108 |
+
using DeviceOpInstance = ck::tensor_operation::device::DeviceGemmMultipleABD_Xdl_CShuffle<
|
109 |
+
ck::Tuple<ALayout, ALayout>,
|
110 |
+
ck::Tuple<BLayout>,
|
111 |
+
ck::Tuple<DLayout>,
|
112 |
+
ELayout,
|
113 |
+
ck::Tuple<ADataType, ADataType>,
|
114 |
+
ck::Tuple<BDataType>,
|
115 |
+
AccDataType,
|
116 |
+
CShuffleDataType,
|
117 |
+
ck::Tuple<DDataType>,
|
118 |
+
EDataType,
|
119 |
+
AElementOp,
|
120 |
+
BElementOp,
|
121 |
+
CDEElementOp,
|
122 |
+
GemmSpec,
|
123 |
+
1,
|
124 |
+
256,
|
125 |
+
256,
|
126 |
+
128,
|
127 |
+
32,
|
128 |
+
8,
|
129 |
+
8,
|
130 |
+
32,
|
131 |
+
32,
|
132 |
+
4,
|
133 |
+
2,
|
134 |
+
S<4, 64, 1>,
|
135 |
+
S<1, 0, 2>,
|
136 |
+
S<1, 0, 2>,
|
137 |
+
2,
|
138 |
+
8,
|
139 |
+
8,
|
140 |
+
1,
|
141 |
+
S<4, 64, 1>,
|
142 |
+
S<1, 0, 2>,
|
143 |
+
S<1, 0, 2>,
|
144 |
+
1,
|
145 |
+
2,
|
146 |
+
8,
|
147 |
+
1,
|
148 |
+
1,
|
149 |
+
1,
|
150 |
+
S<1, 32, 1, 8>,
|
151 |
+
8>;
|
152 |
+
|
153 |
+
int main(int argc, char* argv[])
|
154 |
+
{
|
155 |
+
bool do_verification = true;
|
156 |
+
int init_method = 1;
|
157 |
+
bool time_kernel = false;
|
158 |
+
|
159 |
+
// GEMM shape
|
160 |
+
ck::index_t M = 3840;
|
161 |
+
ck::index_t N = 4096;
|
162 |
+
ck::index_t K = 4096;
|
163 |
+
|
164 |
+
ck::index_t StrideA = K;
|
165 |
+
ck::index_t StrideB = N;
|
166 |
+
ck::index_t StrideD = N;
|
167 |
+
ck::index_t StrideE = N;
|
168 |
+
|
169 |
+
float alpha = 1.0f;
|
170 |
+
float beta = 1.0f;
|
171 |
+
|
172 |
+
if(argc == 1)
|
173 |
+
{
|
174 |
+
// use default case
|
175 |
+
}
|
176 |
+
else if(argc == 4)
|
177 |
+
{
|
178 |
+
do_verification = std::stoi(argv[1]);
|
179 |
+
init_method = std::stoi(argv[2]);
|
180 |
+
time_kernel = std::stoi(argv[3]);
|
181 |
+
}
|
182 |
+
else if(argc == 6)
|
183 |
+
{
|
184 |
+
do_verification = std::stoi(argv[1]);
|
185 |
+
init_method = std::stoi(argv[2]);
|
186 |
+
time_kernel = std::stoi(argv[3]);
|
187 |
+
|
188 |
+
alpha = std::stof(argv[4]);
|
189 |
+
beta = std::stof(argv[5]);
|
190 |
+
}
|
191 |
+
else if(argc == 13)
|
192 |
+
{
|
193 |
+
do_verification = std::stoi(argv[1]);
|
194 |
+
init_method = std::stoi(argv[2]);
|
195 |
+
time_kernel = std::stoi(argv[3]);
|
196 |
+
|
197 |
+
M = std::stoi(argv[4]);
|
198 |
+
N = std::stoi(argv[5]);
|
199 |
+
K = std::stoi(argv[6]);
|
200 |
+
|
201 |
+
StrideA = std::stoi(argv[7]);
|
202 |
+
StrideB = std::stoi(argv[8]);
|
203 |
+
StrideD = std::stoi(argv[9]);
|
204 |
+
StrideE = std::stoi(argv[10]);
|
205 |
+
|
206 |
+
alpha = std::stof(argv[11]);
|
207 |
+
beta = std::stof(argv[12]);
|
208 |
+
}
|
209 |
+
else
|
210 |
+
{
|
211 |
+
printf("arg1: verification (0=no, 1=yes)\n");
|
212 |
+
printf("arg2: initialization (0=no init, 1=integer value, 2=decimal value)\n");
|
213 |
+
printf("arg3: time kernel (0=no, 1=yes)\n");
|
214 |
+
printf("arg4 to 9: M (256x), N(128x), K(32x), StrideA, StrideB, StrideD, StrideE, alpha, "
|
215 |
+
"beta\n");
|
216 |
+
exit(0);
|
217 |
+
}
|
218 |
+
|
219 |
+
auto f_host_tensor_descriptor =
|
220 |
+
[](std::size_t row, std::size_t col, std::size_t stride, auto layout) {
|
221 |
+
using namespace ck::literals;
|
222 |
+
|
223 |
+
if(std::is_same<decltype(layout), ck::tensor_layout::gemm::RowMajor>::value)
|
224 |
+
{
|
225 |
+
return HostTensorDescriptor({row, col}, {stride, 1_uz});
|
226 |
+
}
|
227 |
+
else
|
228 |
+
{
|
229 |
+
return HostTensorDescriptor({row, col}, {1_uz, stride});
|
230 |
+
}
|
231 |
+
};
|
232 |
+
|
233 |
+
Tensor<ADataType> a0_m_k(f_host_tensor_descriptor(M, K, StrideA, ALayout{}));
|
234 |
+
Tensor<ADataType> a1_m_k(f_host_tensor_descriptor(M, K, StrideA, ALayout{}));
|
235 |
+
Tensor<BDataType> b_k_n(f_host_tensor_descriptor(K, N, StrideB, BLayout{}));
|
236 |
+
Tensor<DDataType> d_m_n(f_host_tensor_descriptor(M, N, StrideD, DLayout{}));
|
237 |
+
Tensor<EDataType> e_m_n_host_result(f_host_tensor_descriptor(M, N, StrideE, ELayout{}));
|
238 |
+
Tensor<EDataType> e_m_n_device_result(f_host_tensor_descriptor(M, N, StrideE, ELayout{}));
|
239 |
+
|
240 |
+
std::cout << "a0_m_k: " << a0_m_k.mDesc << std::endl;
|
241 |
+
std::cout << "a1_m_k: " << a1_m_k.mDesc << std::endl;
|
242 |
+
std::cout << "b_k_n: " << b_k_n.mDesc << std::endl;
|
243 |
+
std::cout << "d_m_n: " << d_m_n.mDesc << std::endl;
|
244 |
+
std::cout << "e_m_n: " << e_m_n_host_result.mDesc << std::endl;
|
245 |
+
|
246 |
+
switch(init_method)
|
247 |
+
{
|
248 |
+
case 0: break;
|
249 |
+
case 1:
|
250 |
+
a0_m_k.GenerateTensorValue(GeneratorTensor_2<ADataType>{-5, 5});
|
251 |
+
a1_m_k.GenerateTensorValue(GeneratorTensor_2<ADataType>{-5, 5});
|
252 |
+
b_k_n.GenerateTensorValue(GeneratorTensor_2<BDataType>{-5, 5});
|
253 |
+
d_m_n.GenerateTensorValue(GeneratorTensor_2<DDataType>{-5, 5});
|
254 |
+
break;
|
255 |
+
default:
|
256 |
+
a0_m_k.GenerateTensorValue(GeneratorTensor_3<ADataType>{0.0, 1.0});
|
257 |
+
a1_m_k.GenerateTensorValue(GeneratorTensor_3<ADataType>{0.0, 1.0});
|
258 |
+
b_k_n.GenerateTensorValue(GeneratorTensor_3<BDataType>{-0.5, 0.5});
|
259 |
+
d_m_n.GenerateTensorValue(GeneratorTensor_3<DDataType>{-0.5, 0.5});
|
260 |
+
}
|
261 |
+
|
262 |
+
DeviceMem a0_device_buf(sizeof(ADataType) * a0_m_k.mDesc.GetElementSpaceSize());
|
263 |
+
DeviceMem a1_device_buf(sizeof(ADataType) * a1_m_k.mDesc.GetElementSpaceSize());
|
264 |
+
DeviceMem b_device_buf(sizeof(BDataType) * b_k_n.mDesc.GetElementSpaceSize());
|
265 |
+
DeviceMem d_device_buf(sizeof(DDataType) * d_m_n.mDesc.GetElementSpaceSize());
|
266 |
+
DeviceMem e_device_buf(sizeof(EDataType) * e_m_n_device_result.mDesc.GetElementSpaceSize());
|
267 |
+
|
268 |
+
a0_device_buf.ToDevice(a0_m_k.mData.data());
|
269 |
+
a1_device_buf.ToDevice(a1_m_k.mData.data());
|
270 |
+
b_device_buf.ToDevice(b_k_n.mData.data());
|
271 |
+
d_device_buf.ToDevice(d_m_n.mData.data());
|
272 |
+
e_device_buf.ToDevice(e_m_n_device_result.mData.data());
|
273 |
+
|
274 |
+
auto a_element_op = AElementOp{0.2};
|
275 |
+
auto b_element_op = BElementOp{};
|
276 |
+
auto cde_element_op = CDEElementOp{alpha, beta};
|
277 |
+
|
278 |
+
// do GEMM
|
279 |
+
auto device_op = DeviceOpInstance{};
|
280 |
+
auto invoker = device_op.MakeInvoker();
|
281 |
+
auto argument =
|
282 |
+
device_op.MakeArgument(std::array<const void*, 2>{a0_device_buf.GetDeviceBuffer(),
|
283 |
+
a1_device_buf.GetDeviceBuffer()},
|
284 |
+
std::array<const void*, 1>{b_device_buf.GetDeviceBuffer()},
|
285 |
+
std::array<const void*, 1>{d_device_buf.GetDeviceBuffer()},
|
286 |
+
e_device_buf.GetDeviceBuffer(),
|
287 |
+
M,
|
288 |
+
N,
|
289 |
+
K,
|
290 |
+
std::array<ck::index_t, 2>{StrideA, StrideA},
|
291 |
+
std::array<ck::index_t, 1>{StrideB},
|
292 |
+
std::array<ck::index_t, 1>{StrideD},
|
293 |
+
StrideE,
|
294 |
+
a_element_op,
|
295 |
+
b_element_op,
|
296 |
+
cde_element_op);
|
297 |
+
|
298 |
+
if(!device_op.IsSupportedArgument(argument))
|
299 |
+
{
|
300 |
+
throw std::runtime_error(
|
301 |
+
"wrong! device_gemm with the specified compilation parameters does "
|
302 |
+
"not support this GEMM problem");
|
303 |
+
}
|
304 |
+
|
305 |
+
float ave_time = invoker.Run(argument, StreamConfig{nullptr, time_kernel});
|
306 |
+
|
307 |
+
std::size_t flop = std::size_t(2) * M * N * K;
|
308 |
+
std::size_t num_btype =
|
309 |
+
sizeof(ADataType) * M * K + sizeof(BDataType) * K * N + sizeof(EDataType) * M * N;
|
310 |
+
|
311 |
+
float tflops = static_cast<float>(flop) / 1.E9 / ave_time;
|
312 |
+
|
313 |
+
float gb_per_sec = num_btype / 1.E6 / ave_time;
|
314 |
+
|
315 |
+
std::cout << "Perf: " << ave_time << " ms, " << tflops << " TFlops, " << gb_per_sec << " GB/s"
|
316 |
+
<< std::endl;
|
317 |
+
|
318 |
+
e_device_buf.FromDevice(e_m_n_device_result.mData.data());
|
319 |
+
|
320 |
+
if(do_verification)
|
321 |
+
{
|
322 |
+
Tensor<CShuffleDataType> c_m_n({M, N});
|
323 |
+
|
324 |
+
Tensor<ADataType> a_m_k({M, K});
|
325 |
+
|
326 |
+
for(int m = 0; m < M; ++m)
|
327 |
+
{
|
328 |
+
for(int k = 0; k < K; ++k)
|
329 |
+
{
|
330 |
+
a_element_op(a_m_k(m, k), a0_m_k(m, k), a1_m_k(m, k));
|
331 |
+
}
|
332 |
+
}
|
333 |
+
|
334 |
+
using ReferenceGemmInstance = ck::tensor_operation::host::ReferenceGemm<ADataType,
|
335 |
+
BDataType,
|
336 |
+
CShuffleDataType,
|
337 |
+
AccDataType,
|
338 |
+
PassThrough,
|
339 |
+
BElementOp,
|
340 |
+
PassThrough>;
|
341 |
+
auto ref_gemm = ReferenceGemmInstance{};
|
342 |
+
auto ref_invoker = ref_gemm.MakeInvoker();
|
343 |
+
|
344 |
+
auto ref_argument =
|
345 |
+
ref_gemm.MakeArgument(a_m_k, b_k_n, c_m_n, PassThrough{}, b_element_op, PassThrough{});
|
346 |
+
|
347 |
+
ref_invoker.Run(ref_argument);
|
348 |
+
|
349 |
+
for(int m = 0; m < M; ++m)
|
350 |
+
{
|
351 |
+
for(int n = 0; n < N; ++n)
|
352 |
+
{
|
353 |
+
cde_element_op(e_m_n_host_result(m, n), c_m_n(m, n), d_m_n(m, n));
|
354 |
+
}
|
355 |
+
}
|
356 |
+
|
357 |
+
e_device_buf.FromDevice(e_m_n_device_result.mData.data());
|
358 |
+
|
359 |
+
return ck::utils::check_err(e_m_n_device_result, e_m_n_host_result) ? 0 : 1;
|
360 |
+
}
|
361 |
+
|
362 |
+
return 0;
|
363 |
+
}
|
sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/composable_kernels/example/60_gemm_multi_ABD/gemm_multi_ABD_xdl_multiply_bias_fastgelu_bf16_i8.cpp
ADDED
@@ -0,0 +1,274 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// SPDX-License-Identifier: MIT
|
2 |
+
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
|
3 |
+
|
4 |
+
#include <iostream>
|
5 |
+
#include <numeric>
|
6 |
+
#include <initializer_list>
|
7 |
+
#include <cstdlib>
|
8 |
+
|
9 |
+
#include "ck/ck.hpp"
|
10 |
+
#include "ck/tensor_operation/gpu/device/gemm_specialization.hpp"
|
11 |
+
#include "ck/tensor_operation/gpu/device/impl/device_gemm_multiple_abd_xdl_cshuffle.hpp"
|
12 |
+
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"
|
13 |
+
|
14 |
+
#include "ck/library/utility/device_memory.hpp"
|
15 |
+
#include "ck/library/utility/host_tensor.hpp"
|
16 |
+
#include "ck/library/utility/host_tensor_generator.hpp"
|
17 |
+
#include "ck/library/utility/literals.hpp"
|
18 |
+
#include "ck/library/reference_tensor_operation/cpu/reference_gemm.hpp"
|
19 |
+
#include "ck/library/utility/check_err.hpp"
|
20 |
+
|
21 |
+
#include "ck/utility/blkgemmpipe_scheduler.hpp"
|
22 |
+
|
23 |
+
template <ck::index_t... Is>
|
24 |
+
using S = ck::Sequence<Is...>;
|
25 |
+
|
26 |
+
using F16 = ck::half_t;
|
27 |
+
using BF16 = ck::bhalf_t;
|
28 |
+
using I8 = int8_t;
|
29 |
+
using F32 = float;
|
30 |
+
|
31 |
+
using Row = ck::tensor_layout::gemm::RowMajor;
|
32 |
+
using Col = ck::tensor_layout::gemm::ColumnMajor;
|
33 |
+
|
34 |
+
using A0DataType = BF16;
|
35 |
+
using AsDataType = ck::Tuple<A0DataType>;
|
36 |
+
using B0DataType = I8;
|
37 |
+
using B1DataType = BF16;
|
38 |
+
using BsDataType = ck::Tuple<B0DataType>;
|
39 |
+
using AccDataType = F32;
|
40 |
+
using CShuffleDataType = F32;
|
41 |
+
using D0DataType = BF16;
|
42 |
+
using DsDataType = ck::Tuple<B1DataType, D0DataType>;
|
43 |
+
using EDataType = BF16;
|
44 |
+
|
45 |
+
using A0Layout = Row;
|
46 |
+
using AsLayout = ck::Tuple<A0Layout>;
|
47 |
+
using B0Layout = Row;
|
48 |
+
using B1Layout = B0Layout;
|
49 |
+
using BsLayout = ck::Tuple<B0Layout>;
|
50 |
+
using D0Layout = Row;
|
51 |
+
using DsLayout = ck::Tuple<B1Layout, D0Layout>;
|
52 |
+
using ELayout = Row;
|
53 |
+
|
54 |
+
using PassThrough = ck::tensor_operation::element_wise::PassThrough;
|
55 |
+
using MultiplyAddFastGelu = ck::tensor_operation::element_wise::MultiplyAddFastGelu;
|
56 |
+
|
57 |
+
using AElementOp = PassThrough;
|
58 |
+
using BElementOp = PassThrough;
|
59 |
+
using CDEElementOp = MultiplyAddFastGelu;
|
60 |
+
|
61 |
+
static constexpr auto GemmSpec = ck::tensor_operation::device::GemmSpecialization::Default;
|
62 |
+
|
63 |
+
using DeviceOpInstance = ck::tensor_operation::device::DeviceGemmMultipleABD_Xdl_CShuffle
|
64 |
+
// clang-format off
|
65 |
+
///######| ALayout| BLayout| DsLayout| ELayout| AData| BData| AccData| CShuffle| DsData| EData| A| B| CDE| GEMM| NumGemmK| Block| MPer| NPer| KPer| AK1| BK1| MPer| NPer| MXdl| NXdl| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockLds| BBlockTransfer| BBlockTransfer| BBlockTransfer| BlockTransfer| BBlockTransfer| BBlockTransfer| BBlockLds| CShuffle| CShuffle| CBlockTransferClusterLengths| CBlockTransfer|
|
66 |
+
///######| | | | | Type| Type| Type| DataType| Type| Type| Elementwise| Elementwise| Elementwise| Spacialization| Prefetch| Size| Block| Block| Block| | | XDL| XDL| Per| Per| ThreadCluster| ThreadCluster| SrcAccessOrder| SrcVectorDim| SrcScalar| DstScalar| AddExtraM| ThreadCluster| ThreadCluster| SrcAccessOrder| SrcVectorDim| SrcScalar| DstScalar| AddExtraN| MXdlPerWave| NXdlPerWave| _MBlock_MWaveMPerXdl| ScalarPerVector|
|
67 |
+
///######| | | | | | | | | | | Operation| Operation| Operation| | Stage| | | | | | | | | Wave| Wave| Lengths_K0_M_K1| ArrangeOrder| | | PerVector| PerVector_K1| | Lengths_K0_N_K1| ArrangeOrder| | | PerVector| PerVector_K1| | PerShuffle| PerShuffle| _NBlock_NWaveNPerXdl| _NWaveNPerXdl|
|
68 |
+
///######| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
|
69 |
+
< AsLayout, BsLayout, DsLayout, ELayout, AsDataType, BsDataType, AccDataType, CShuffleDataType, DsDataType, EDataType, AElementOp, BElementOp, CDEElementOp, GemmSpec, 1, 256, 128, 128, 64, 8, 4, 32, 32, 2, 2, S<8, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, 2, 8, 8, 0, S<16, 16, 1>, S<0, 2, 1>, S<0, 2, 1>, 1, 8, 4, 0, 1, 1, S<1, 32, 1, 8>, 8, ck::BlockGemmPipelineScheduler::Intrawave, ck::BlockGemmPipelineVersion::v4>;
|
70 |
+
// clang-format on
|
71 |
+
|
72 |
+
int main(int argc, char* argv[])
|
73 |
+
{
|
74 |
+
bool do_verification = true;
|
75 |
+
int init_method = 1;
|
76 |
+
bool time_kernel = false;
|
77 |
+
|
78 |
+
// GEMM shape
|
79 |
+
ck::index_t M = 4096;
|
80 |
+
ck::index_t N = 768;
|
81 |
+
ck::index_t K = 6144;
|
82 |
+
|
83 |
+
ck::index_t StrideA = K;
|
84 |
+
ck::index_t StrideB = N;
|
85 |
+
ck::index_t StrideD = 0;
|
86 |
+
ck::index_t StrideE = N;
|
87 |
+
|
88 |
+
if(argc == 1)
|
89 |
+
{
|
90 |
+
// use default case
|
91 |
+
}
|
92 |
+
else if(argc == 4)
|
93 |
+
{
|
94 |
+
do_verification = std::stoi(argv[1]);
|
95 |
+
init_method = std::stoi(argv[2]);
|
96 |
+
time_kernel = std::stoi(argv[3]);
|
97 |
+
}
|
98 |
+
else if(argc == 11)
|
99 |
+
{
|
100 |
+
do_verification = std::stoi(argv[1]);
|
101 |
+
init_method = std::stoi(argv[2]);
|
102 |
+
time_kernel = std::stoi(argv[3]);
|
103 |
+
|
104 |
+
M = std::stoi(argv[4]);
|
105 |
+
N = std::stoi(argv[5]);
|
106 |
+
K = std::stoi(argv[6]);
|
107 |
+
|
108 |
+
StrideA = std::stoi(argv[7]);
|
109 |
+
StrideB = std::stoi(argv[8]);
|
110 |
+
StrideD = std::stoi(argv[9]);
|
111 |
+
StrideE = std::stoi(argv[10]);
|
112 |
+
}
|
113 |
+
else
|
114 |
+
{
|
115 |
+
printf("arg1: verification (0=no, 1=yes)\n");
|
116 |
+
printf("arg2: initialization (0=no init, 1=integer value, 2=decimal value)\n");
|
117 |
+
printf("arg3: time kernel (0=no, 1=yes)\n");
|
118 |
+
printf("arg4 to 9: M (256x), N(128x), K(32x), StrideA, StrideB, StrideD, StrideE\n");
|
119 |
+
exit(0);
|
120 |
+
}
|
121 |
+
|
122 |
+
auto f_host_tensor_descriptor =
|
123 |
+
[](std::size_t row, std::size_t col, std::size_t stride, auto layout) {
|
124 |
+
using namespace ck::literals;
|
125 |
+
|
126 |
+
if(std::is_same<decltype(layout), ck::tensor_layout::gemm::RowMajor>::value)
|
127 |
+
{
|
128 |
+
return HostTensorDescriptor({row, col}, {stride, 1_uz});
|
129 |
+
}
|
130 |
+
else
|
131 |
+
{
|
132 |
+
return HostTensorDescriptor({row, col}, {1_uz, stride});
|
133 |
+
}
|
134 |
+
};
|
135 |
+
|
136 |
+
Tensor<A0DataType> a0_m_k(f_host_tensor_descriptor(M, K, StrideA, A0Layout{}));
|
137 |
+
Tensor<B0DataType> b0_k_n(f_host_tensor_descriptor(K, N, StrideB, B0Layout{}));
|
138 |
+
Tensor<B1DataType> b1_k_n(f_host_tensor_descriptor(K, N, 0, B1Layout{}));
|
139 |
+
Tensor<D0DataType> d_m_n(f_host_tensor_descriptor(M, N, StrideD, D0Layout{}));
|
140 |
+
Tensor<EDataType> e_m_n_host_result(f_host_tensor_descriptor(M, N, StrideE, ELayout{}));
|
141 |
+
Tensor<EDataType> e_m_n_device_result(f_host_tensor_descriptor(M, N, StrideE, ELayout{}));
|
142 |
+
|
143 |
+
std::cout << "a0_m_k: " << a0_m_k.mDesc << std::endl;
|
144 |
+
std::cout << "b0_k_n: " << b0_k_n.mDesc << std::endl;
|
145 |
+
std::cout << "b1_k_n: " << b1_k_n.mDesc << std::endl;
|
146 |
+
std::cout << "d_m_n: " << d_m_n.mDesc << std::endl;
|
147 |
+
std::cout << "e_m_n: " << e_m_n_host_result.mDesc << std::endl;
|
148 |
+
|
149 |
+
switch(init_method)
|
150 |
+
{
|
151 |
+
case 0: break;
|
152 |
+
case 1:
|
153 |
+
a0_m_k.GenerateTensorValue(GeneratorTensor_2<A0DataType>{-5, 5});
|
154 |
+
b0_k_n.GenerateTensorValue(GeneratorTensor_2<B0DataType>{-5, 5});
|
155 |
+
b1_k_n.GenerateTensorValue(GeneratorTensor_2<B1DataType>{0, 5});
|
156 |
+
d_m_n.GenerateTensorValue(GeneratorTensor_2<D0DataType>{-5, 5});
|
157 |
+
break;
|
158 |
+
default:
|
159 |
+
a0_m_k.GenerateTensorValue(GeneratorTensor_3<A0DataType>{0.0, 1.0});
|
160 |
+
b0_k_n.GenerateTensorValue(GeneratorTensor_2<B0DataType>{-5, 5});
|
161 |
+
b1_k_n.GenerateTensorValue(GeneratorTensor_3<B1DataType>{0, 5});
|
162 |
+
d_m_n.GenerateTensorValue(GeneratorTensor_3<D0DataType>{-0.5, 0.5});
|
163 |
+
}
|
164 |
+
|
165 |
+
DeviceMem a0_device_buf(sizeof(A0DataType) * a0_m_k.mDesc.GetElementSpaceSize());
|
166 |
+
DeviceMem b0_device_buf(sizeof(B0DataType) * b0_k_n.mDesc.GetElementSpaceSize());
|
167 |
+
DeviceMem b1_device_buf(sizeof(B1DataType) * b1_k_n.mDesc.GetElementSpaceSize());
|
168 |
+
DeviceMem d_device_buf(sizeof(D0DataType) * d_m_n.mDesc.GetElementSpaceSize());
|
169 |
+
DeviceMem e_device_buf(sizeof(EDataType) * e_m_n_device_result.mDesc.GetElementSpaceSize());
|
170 |
+
|
171 |
+
a0_device_buf.ToDevice(a0_m_k.mData.data());
|
172 |
+
b0_device_buf.ToDevice(b0_k_n.mData.data());
|
173 |
+
b1_device_buf.ToDevice(b1_k_n.mData.data());
|
174 |
+
d_device_buf.ToDevice(d_m_n.mData.data());
|
175 |
+
e_device_buf.ToDevice(e_m_n_device_result.mData.data());
|
176 |
+
|
177 |
+
auto a_element_op = AElementOp{};
|
178 |
+
auto b_element_op = BElementOp{};
|
179 |
+
auto cde_element_op = CDEElementOp{};
|
180 |
+
|
181 |
+
constexpr ck::index_t NumATensor = 1;
|
182 |
+
constexpr ck::index_t NumBTensor = 1;
|
183 |
+
constexpr ck::index_t NumDTensor = 2;
|
184 |
+
|
185 |
+
// do GEMM
|
186 |
+
auto device_op = DeviceOpInstance{};
|
187 |
+
auto invoker = device_op.MakeInvoker();
|
188 |
+
auto argument =
|
189 |
+
device_op.MakeArgument(std::array<const void*, NumATensor>{a0_device_buf.GetDeviceBuffer()},
|
190 |
+
std::array<const void*, NumBTensor>{b0_device_buf.GetDeviceBuffer()},
|
191 |
+
std::array<const void*, NumDTensor>{b1_device_buf.GetDeviceBuffer(),
|
192 |
+
d_device_buf.GetDeviceBuffer()},
|
193 |
+
e_device_buf.GetDeviceBuffer(),
|
194 |
+
M,
|
195 |
+
N,
|
196 |
+
K,
|
197 |
+
std::array<ck::index_t, NumATensor>{StrideA},
|
198 |
+
std::array<ck::index_t, NumBTensor>{StrideB},
|
199 |
+
std::array<ck::index_t, NumDTensor>{0, StrideD},
|
200 |
+
StrideE,
|
201 |
+
a_element_op,
|
202 |
+
b_element_op,
|
203 |
+
cde_element_op);
|
204 |
+
|
205 |
+
if(!device_op.IsSupportedArgument(argument))
|
206 |
+
{
|
207 |
+
throw std::runtime_error(
|
208 |
+
"wrong! device_gemm with the specified compilation parameters does "
|
209 |
+
"not support this GEMM problem");
|
210 |
+
}
|
211 |
+
|
212 |
+
float ave_time = invoker.Run(argument, StreamConfig{nullptr, time_kernel});
|
213 |
+
|
214 |
+
std::size_t flop = std::size_t(2) * M * N * K;
|
215 |
+
std::size_t num_btype =
|
216 |
+
sizeof(A0DataType) * M * K + sizeof(B0DataType) * K * N + sizeof(EDataType) * M * N;
|
217 |
+
|
218 |
+
float tflops = static_cast<float>(flop) / 1.E9 / ave_time;
|
219 |
+
|
220 |
+
float gb_per_sec = num_btype / 1.E6 / ave_time;
|
221 |
+
|
222 |
+
std::cout << "Perf: " << ave_time << " ms, " << tflops << " TFlops, " << gb_per_sec << " GB/s"
|
223 |
+
<< std::endl;
|
224 |
+
|
225 |
+
e_device_buf.FromDevice(e_m_n_device_result.mData.data());
|
226 |
+
|
227 |
+
if(do_verification)
|
228 |
+
{
|
229 |
+
Tensor<CShuffleDataType> c_m_n({M, N});
|
230 |
+
|
231 |
+
Tensor<A0DataType> a_m_k({M, K});
|
232 |
+
|
233 |
+
Tensor<B1DataType> b_k_n(f_host_tensor_descriptor(K, N, StrideB, B0Layout{}));
|
234 |
+
|
235 |
+
#if 0
|
236 |
+
for(int n = 0; n < N; ++n)
|
237 |
+
{
|
238 |
+
for(int k = 0; k < K; ++k)
|
239 |
+
{
|
240 |
+
b_element_op(b_k_n(k, n), b0_k_n(k, n), b1_k_n(k, n));
|
241 |
+
}
|
242 |
+
}
|
243 |
+
#endif
|
244 |
+
|
245 |
+
using ReferenceGemmInstance = ck::tensor_operation::host::ReferenceGemm<A0DataType,
|
246 |
+
B0DataType,
|
247 |
+
CShuffleDataType,
|
248 |
+
AccDataType,
|
249 |
+
PassThrough,
|
250 |
+
PassThrough,
|
251 |
+
PassThrough>;
|
252 |
+
auto ref_gemm = ReferenceGemmInstance{};
|
253 |
+
auto ref_invoker = ref_gemm.MakeInvoker();
|
254 |
+
|
255 |
+
auto ref_argument = ref_gemm.MakeArgument(
|
256 |
+
a0_m_k, b0_k_n, c_m_n, PassThrough{}, PassThrough{}, PassThrough{});
|
257 |
+
|
258 |
+
ref_invoker.Run(ref_argument);
|
259 |
+
|
260 |
+
for(int m = 0; m < M; ++m)
|
261 |
+
{
|
262 |
+
for(int n = 0; n < N; ++n)
|
263 |
+
{
|
264 |
+
cde_element_op(e_m_n_host_result(m, n), c_m_n(m, n), b1_k_n(0, n), d_m_n(m, n));
|
265 |
+
}
|
266 |
+
}
|
267 |
+
|
268 |
+
e_device_buf.FromDevice(e_m_n_device_result.mData.data());
|
269 |
+
|
270 |
+
return ck::utils::check_err(e_m_n_device_result, e_m_n_host_result) ? 0 : 1;
|
271 |
+
}
|
272 |
+
|
273 |
+
return 0;
|
274 |
+
}
|
sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/composable_kernels/profiler/include/profiler/data_type_enum.hpp
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// SPDX-License-Identifier: MIT
|
2 |
+
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
|
3 |
+
|
4 |
+
#pragma once
|
5 |
+
|
6 |
+
namespace ck {
|
7 |
+
|
8 |
+
enum struct DataTypeEnum
|
9 |
+
{
|
10 |
+
Half = 0,
|
11 |
+
Float = 1,
|
12 |
+
Int32 = 2,
|
13 |
+
Int8 = 3,
|
14 |
+
Int8x4 = 4,
|
15 |
+
BFloat16 = 5,
|
16 |
+
Double = 6,
|
17 |
+
Unknown = 100,
|
18 |
+
};
|
19 |
+
|
20 |
+
} // namespace ck
|
sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/composable_kernels/profiler/include/profiler/profile_batched_gemm_add_relu_gemm_add_impl.hpp
ADDED
@@ -0,0 +1,360 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// SPDX-License-Identifier: MIT
|
2 |
+
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
|
3 |
+
|
4 |
+
#pragma once
|
5 |
+
|
6 |
+
#include <memory>
|
7 |
+
|
8 |
+
#include "ck/ck.hpp"
|
9 |
+
#include "ck/tensor_operation/gpu/device/tensor_layout.hpp"
|
10 |
+
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"
|
11 |
+
#include "ck/library/tensor_operation_instance/gpu/batched_gemm_add_relu_gemm_add.hpp"
|
12 |
+
|
13 |
+
#include "ck/library/utility/check_err.hpp"
|
14 |
+
#include "ck/library/utility/device_memory.hpp"
|
15 |
+
#include "ck/library/utility/host_tensor.hpp"
|
16 |
+
#include "ck/library/utility/host_tensor_generator.hpp"
|
17 |
+
#include "ck/library/utility/literals.hpp"
|
18 |
+
#include "ck/library/reference_tensor_operation/cpu/reference_batched_gemm.hpp"
|
19 |
+
|
20 |
+
namespace ck {
|
21 |
+
namespace profiler {
|
22 |
+
|
23 |
+
template <typename A0Layout,
|
24 |
+
typename B0Layout,
|
25 |
+
typename D0sLayout,
|
26 |
+
typename B1Layout,
|
27 |
+
typename D1sLayout,
|
28 |
+
typename E1Layout,
|
29 |
+
typename A0DataType,
|
30 |
+
typename B0DataType,
|
31 |
+
typename D0sDataType,
|
32 |
+
typename B1DataType,
|
33 |
+
typename D1sDataType,
|
34 |
+
typename E1DataType>
|
35 |
+
bool profile_batched_gemm_add_relu_gemm_add_impl(bool do_verification,
|
36 |
+
int init_method,
|
37 |
+
bool do_log,
|
38 |
+
bool time_kernel,
|
39 |
+
int M,
|
40 |
+
int N,
|
41 |
+
int K,
|
42 |
+
int O,
|
43 |
+
int BatchCount = 1,
|
44 |
+
int StrideA0 = -1,
|
45 |
+
int StrideB0 = -1,
|
46 |
+
int StrideD0 = -1,
|
47 |
+
int StrideB1 = -1,
|
48 |
+
int StrideD1 = -1,
|
49 |
+
int StrideE1 = -1,
|
50 |
+
int BatchStrideA0 = -1,
|
51 |
+
int BatchStrideB0 = -1,
|
52 |
+
int BatchStrideD0 = -1,
|
53 |
+
int BatchStrideB1 = -1,
|
54 |
+
int BatchStrideD1 = -1,
|
55 |
+
int BatchStrideE1 = -1)
|
56 |
+
|
57 |
+
{
|
58 |
+
using Row = tensor_layout::gemm::RowMajor;
|
59 |
+
using Col = tensor_layout::gemm::ColumnMajor;
|
60 |
+
|
61 |
+
using PassThrough = tensor_operation::element_wise::PassThrough;
|
62 |
+
|
63 |
+
using A0ElementOp = PassThrough;
|
64 |
+
using B0ElementOp = PassThrough;
|
65 |
+
using CDE0ElementOp = ck::tensor_operation::element_wise::AddRelu;
|
66 |
+
using B1ElementOp = PassThrough;
|
67 |
+
using CDE1ElementOp = ck::tensor_operation::element_wise::Add;
|
68 |
+
|
69 |
+
using D0DataType = remove_cvref_t<tuple_element_t<0, D0sDataType>>;
|
70 |
+
|
71 |
+
using D0Layout = remove_cvref_t<tuple_element_t<0, D0sLayout>>;
|
72 |
+
using D1DataType = remove_cvref_t<tuple_element_t<0, D1sDataType>>;
|
73 |
+
using D1Layout = remove_cvref_t<tuple_element_t<0, D1sLayout>>;
|
74 |
+
|
75 |
+
// for reference
|
76 |
+
using RefAcc0DataType = float;
|
77 |
+
using RefAcc1DataType = float;
|
78 |
+
|
79 |
+
bool pass = true;
|
80 |
+
|
81 |
+
const int DefaultStrideA0 = ck::is_same_v<A0Layout, Row> ? K : M;
|
82 |
+
const int DefaultStrideB0 = ck::is_same_v<B0Layout, Row> ? N : K;
|
83 |
+
const int DefaultStrideD0 = ck::is_same_v<D0Layout, Row> ? N : M;
|
84 |
+
const int DefaultStrideB1 = ck::is_same_v<B1Layout, Row> ? O : N;
|
85 |
+
const int DefaultStrideD1 = ck::is_same_v<D1Layout, Row> ? O : M;
|
86 |
+
const int DefaultStrideE1 = ck::is_same_v<E1Layout, Row> ? O : M;
|
87 |
+
|
88 |
+
StrideA0 = (StrideA0 < 0) ? DefaultStrideA0 : StrideA0;
|
89 |
+
StrideB0 = (StrideB0 < 0) ? DefaultStrideB0 : StrideB0;
|
90 |
+
StrideD0 = (StrideD0 < 0) ? DefaultStrideD0 : StrideD0;
|
91 |
+
StrideB1 = (StrideB1 < 0) ? DefaultStrideB1 : StrideB1;
|
92 |
+
StrideD1 = (StrideD1 < 0) ? DefaultStrideD1 : StrideD1;
|
93 |
+
StrideE1 = (StrideE1 < 0) ? DefaultStrideE1 : StrideE1;
|
94 |
+
|
95 |
+
const int DefaultBatchStrideA0 = (ck::is_same_v<A0Layout, Col> ? K : M) * StrideA0;
|
96 |
+
const int DefaultBatchStrideB0 = (ck::is_same_v<B0Layout, Col> ? N : K) * StrideB0;
|
97 |
+
const int DefaultBatchStrideD0 = (ck::is_same_v<D0Layout, Col> ? N : M) * StrideD0;
|
98 |
+
const int DefaultBatchStrideB1 = (ck::is_same_v<B1Layout, Col> ? O : N) * StrideB1;
|
99 |
+
const int DefaultBatchStrideD1 = (ck::is_same_v<D1Layout, Col> ? O : M) * StrideD1;
|
100 |
+
const int DefaultBatchStrideE1 = (ck::is_same_v<E1Layout, Col> ? O : M) * StrideE1;
|
101 |
+
|
102 |
+
BatchStrideA0 = BatchStrideA0 < 0 ? DefaultBatchStrideA0 : BatchStrideA0;
|
103 |
+
BatchStrideB0 = BatchStrideB0 < 0 ? DefaultBatchStrideB0 : BatchStrideB0;
|
104 |
+
BatchStrideD0 = BatchStrideD0 < 0 ? DefaultBatchStrideD0 : BatchStrideD0;
|
105 |
+
BatchStrideB1 = BatchStrideB1 < 0 ? DefaultBatchStrideB1 : BatchStrideB1;
|
106 |
+
BatchStrideD1 = BatchStrideD1 < 0 ? DefaultBatchStrideD1 : BatchStrideD1;
|
107 |
+
BatchStrideE1 = BatchStrideE1 < 0 ? DefaultBatchStrideE1 : BatchStrideE1;
|
108 |
+
|
109 |
+
auto f_host_tensor_descriptor = [](std::size_t batch_count,
|
110 |
+
std::size_t row,
|
111 |
+
std::size_t col,
|
112 |
+
std::size_t stride,
|
113 |
+
std::size_t batch_stride,
|
114 |
+
auto layout) {
|
115 |
+
using namespace ck::literals;
|
116 |
+
|
117 |
+
if(std::is_same<decltype(layout), Row>::value)
|
118 |
+
{
|
119 |
+
return HostTensorDescriptor({batch_count, row, col}, {batch_stride, stride, 1_uz});
|
120 |
+
}
|
121 |
+
else
|
122 |
+
{
|
123 |
+
return HostTensorDescriptor({batch_count, row, col}, {batch_stride, 1_uz, stride});
|
124 |
+
}
|
125 |
+
};
|
126 |
+
|
127 |
+
// E_m_o = A_m_k * B0_k_n * B1_n_o
|
128 |
+
Tensor<A0DataType> a0_g_m_k(
|
129 |
+
f_host_tensor_descriptor(BatchCount, M, K, StrideA0, BatchStrideA0, A0Layout{}));
|
130 |
+
Tensor<B0DataType> b0_g_k_n(
|
131 |
+
f_host_tensor_descriptor(BatchCount, K, N, StrideB0, BatchStrideB0, B0Layout{}));
|
132 |
+
Tensor<D0DataType> d0_g_m_n(
|
133 |
+
f_host_tensor_descriptor(BatchCount, M, N, StrideD0, BatchStrideD0, D0Layout{}));
|
134 |
+
Tensor<B1DataType> b1_g_n_o(
|
135 |
+
f_host_tensor_descriptor(BatchCount, N, O, StrideB1, BatchStrideB1, B1Layout{}));
|
136 |
+
Tensor<D1DataType> d1_g_m_o(
|
137 |
+
f_host_tensor_descriptor(BatchCount, M, O, StrideD1, BatchStrideD1, D1Layout{}));
|
138 |
+
Tensor<E1DataType> e1_g_m_o_host_result(
|
139 |
+
f_host_tensor_descriptor(BatchCount, M, O, StrideE1, BatchStrideE1, E1Layout{}));
|
140 |
+
Tensor<E1DataType> e1_g_m_o_device_result(
|
141 |
+
f_host_tensor_descriptor(BatchCount, M, O, StrideE1, BatchStrideE1, E1Layout{}));
|
142 |
+
|
143 |
+
// Host verification: Output of Gemm0 is input A of Gemm1
|
144 |
+
Tensor<RefAcc0DataType> c0_g_m_n(f_host_tensor_descriptor(BatchCount, M, N, N, M * N, Row{}));
|
145 |
+
Tensor<RefAcc0DataType> e0_g_m_n(f_host_tensor_descriptor(BatchCount, M, N, N, M * N, Row{}));
|
146 |
+
Tensor<RefAcc1DataType> c1_g_m_o(f_host_tensor_descriptor(BatchCount, M, O, O, M * O, Row{}));
|
147 |
+
|
148 |
+
std::cout << "a0_g_m_k: " << a0_g_m_k.mDesc << std::endl;
|
149 |
+
std::cout << "b0_g_k_n: " << b0_g_k_n.mDesc << std::endl;
|
150 |
+
std::cout << "d0_g_m_n: " << d0_g_m_n.mDesc << std::endl;
|
151 |
+
std::cout << "b1_g_n_o: " << b1_g_n_o.mDesc << std::endl;
|
152 |
+
std::cout << "d1_g_m_o: " << d1_g_m_o.mDesc << std::endl;
|
153 |
+
std::cout << "e1_g_m_o: " << e1_g_m_o_host_result.mDesc << std::endl;
|
154 |
+
|
155 |
+
switch(init_method)
|
156 |
+
{
|
157 |
+
case 0: break;
|
158 |
+
case 1:
|
159 |
+
a0_g_m_k.GenerateTensorValue(GeneratorTensor_2<A0DataType>{-2, 3});
|
160 |
+
b0_g_k_n.GenerateTensorValue(GeneratorTensor_2<B0DataType>{-2, 3});
|
161 |
+
d0_g_m_n.GenerateTensorValue(GeneratorTensor_2<D0DataType>{-2, 3});
|
162 |
+
b1_g_n_o.GenerateTensorValue(GeneratorTensor_2<B1DataType>{-2, 3});
|
163 |
+
d1_g_m_o.GenerateTensorValue(GeneratorTensor_2<D1DataType>{-2, 3});
|
164 |
+
break;
|
165 |
+
default:
|
166 |
+
a0_g_m_k.GenerateTensorValue(GeneratorTensor_3<A0DataType>{0.0, 1.0});
|
167 |
+
b0_g_k_n.GenerateTensorValue(GeneratorTensor_3<B0DataType>{-0.5, 0.5});
|
168 |
+
d0_g_m_n.GenerateTensorValue(GeneratorTensor_3<D0DataType>{0.0, 1.0});
|
169 |
+
b1_g_n_o.GenerateTensorValue(GeneratorTensor_3<B1DataType>{-0.5, 0.5});
|
170 |
+
d1_g_m_o.GenerateTensorValue(GeneratorTensor_3<D1DataType>{0.0, 1.0});
|
171 |
+
}
|
172 |
+
|
173 |
+
DeviceMem a0_g_m_k_device_buf(sizeof(A0DataType) * a0_g_m_k.mDesc.GetElementSize());
|
174 |
+
DeviceMem b0_g_k_n_device_buf(sizeof(B0DataType) * b0_g_k_n.mDesc.GetElementSize());
|
175 |
+
DeviceMem d0_g_m_n_device_buf(sizeof(D0DataType) * d0_g_m_n.mDesc.GetElementSpaceSize());
|
176 |
+
DeviceMem b1_g_n_o_device_buf(sizeof(B1DataType) * b1_g_n_o.mDesc.GetElementSize());
|
177 |
+
DeviceMem d1_g_m_o_device_buf(sizeof(D1DataType) * d1_g_m_o.mDesc.GetElementSpaceSize());
|
178 |
+
DeviceMem e1_g_m_o_device_buf(sizeof(E1DataType) *
|
179 |
+
e1_g_m_o_device_result.mDesc.GetElementSize());
|
180 |
+
|
181 |
+
a0_g_m_k_device_buf.ToDevice(a0_g_m_k.mData.data());
|
182 |
+
b0_g_k_n_device_buf.ToDevice(b0_g_k_n.mData.data());
|
183 |
+
d0_g_m_n_device_buf.ToDevice(d0_g_m_n.mData.data());
|
184 |
+
b1_g_n_o_device_buf.ToDevice(b1_g_n_o.mData.data());
|
185 |
+
d1_g_m_o_device_buf.ToDevice(d1_g_m_o.mData.data());
|
186 |
+
|
187 |
+
auto a0_element_op = A0ElementOp{};
|
188 |
+
auto b0_element_op = B0ElementOp{};
|
189 |
+
auto cde0_element_op = CDE0ElementOp{};
|
190 |
+
auto b1_element_op = B1ElementOp{};
|
191 |
+
auto cde1_element_op = CDE1ElementOp{};
|
192 |
+
|
193 |
+
using DeviceOp =
|
194 |
+
tensor_operation::device::DeviceBatchedGemmMultipleDGemmMultipleD<A0Layout,
|
195 |
+
B0Layout,
|
196 |
+
D0sLayout,
|
197 |
+
B1Layout,
|
198 |
+
D1sLayout,
|
199 |
+
E1Layout,
|
200 |
+
A0DataType,
|
201 |
+
B0DataType,
|
202 |
+
D0sDataType,
|
203 |
+
B1DataType,
|
204 |
+
D1sDataType,
|
205 |
+
E1DataType,
|
206 |
+
A0ElementOp,
|
207 |
+
B0ElementOp,
|
208 |
+
CDE0ElementOp,
|
209 |
+
B1ElementOp,
|
210 |
+
CDE1ElementOp>;
|
211 |
+
|
212 |
+
// get device op instances
|
213 |
+
const auto op_ptrs = tensor_operation::device::instance::DeviceOperationInstanceFactory<
|
214 |
+
DeviceOp>::GetInstances();
|
215 |
+
|
216 |
+
std::cout << "found " << op_ptrs.size() << " instances" << std::endl;
|
217 |
+
|
218 |
+
if(do_verification)
|
219 |
+
{
|
220 |
+
// Ref Gemm0
|
221 |
+
using ReferenceGemm0Instance = tensor_operation::host::ReferenceBatchedGemm<A0DataType,
|
222 |
+
B0DataType,
|
223 |
+
RefAcc0DataType,
|
224 |
+
RefAcc0DataType,
|
225 |
+
A0ElementOp,
|
226 |
+
B0ElementOp,
|
227 |
+
PassThrough>;
|
228 |
+
|
229 |
+
// Ref Gemm1
|
230 |
+
using ReferenceGemm1Instance = tensor_operation::host::ReferenceBatchedGemm<RefAcc0DataType,
|
231 |
+
B1DataType,
|
232 |
+
RefAcc1DataType,
|
233 |
+
RefAcc1DataType,
|
234 |
+
PassThrough,
|
235 |
+
B1ElementOp,
|
236 |
+
PassThrough>;
|
237 |
+
|
238 |
+
auto ref_gemm0 = ReferenceGemm0Instance{};
|
239 |
+
auto ref_gemm0_invoker = ref_gemm0.MakeInvoker();
|
240 |
+
auto ref_gemm0_argument = ref_gemm0.MakeArgument(
|
241 |
+
a0_g_m_k, b0_g_k_n, c0_g_m_n, a0_element_op, b0_element_op, PassThrough{});
|
242 |
+
|
243 |
+
ref_gemm0_invoker.Run(ref_gemm0_argument);
|
244 |
+
|
245 |
+
// cde0_elementwise
|
246 |
+
e0_g_m_n.ForEach(
|
247 |
+
[&](auto&, auto idx) { cde0_element_op(e0_g_m_n(idx), c0_g_m_n(idx), d0_g_m_n(idx)); });
|
248 |
+
|
249 |
+
auto ref_gemm1 = ReferenceGemm1Instance{};
|
250 |
+
auto ref_gemm1_invoker = ref_gemm1.MakeInvoker();
|
251 |
+
auto ref_gemm1_argument = ref_gemm1.MakeArgument(
|
252 |
+
e0_g_m_n, b1_g_n_o, c1_g_m_o, PassThrough{}, b1_element_op, PassThrough{});
|
253 |
+
|
254 |
+
ref_gemm1_invoker.Run(ref_gemm1_argument);
|
255 |
+
|
256 |
+
// cde1_elementwise
|
257 |
+
e1_g_m_o_host_result.ForEach([&](auto&, auto idx) {
|
258 |
+
cde1_element_op(e1_g_m_o_host_result(idx), c1_g_m_o(idx), d1_g_m_o(idx));
|
259 |
+
});
|
260 |
+
}
|
261 |
+
|
262 |
+
std::string best_op_name;
|
263 |
+
float best_ave_time = 0;
|
264 |
+
float best_tflops = 0;
|
265 |
+
float best_gb_per_sec = 0;
|
266 |
+
|
267 |
+
// profile device op instances
|
268 |
+
for(auto& op_ptr : op_ptrs)
|
269 |
+
{
|
270 |
+
auto argument_ptr = op_ptr->MakeArgumentPointer(
|
271 |
+
static_cast<A0DataType*>(a0_g_m_k_device_buf.GetDeviceBuffer()),
|
272 |
+
static_cast<B0DataType*>(b0_g_k_n_device_buf.GetDeviceBuffer()),
|
273 |
+
std::array<const void*, 1>{d0_g_m_n_device_buf.GetDeviceBuffer()},
|
274 |
+
static_cast<B1DataType*>(b1_g_n_o_device_buf.GetDeviceBuffer()),
|
275 |
+
std::array<const void*, 1>{d1_g_m_o_device_buf.GetDeviceBuffer()},
|
276 |
+
static_cast<E1DataType*>(e1_g_m_o_device_buf.GetDeviceBuffer()),
|
277 |
+
M,
|
278 |
+
N,
|
279 |
+
K,
|
280 |
+
O,
|
281 |
+
BatchCount,
|
282 |
+
StrideA0,
|
283 |
+
StrideB0,
|
284 |
+
std::array<ck::index_t, 1>{StrideD0},
|
285 |
+
StrideB1,
|
286 |
+
std::array<ck::index_t, 1>{StrideD1},
|
287 |
+
StrideE1,
|
288 |
+
BatchStrideA0,
|
289 |
+
BatchStrideB0,
|
290 |
+
std::array<ck::index_t, 1>{BatchStrideD0},
|
291 |
+
BatchStrideB1,
|
292 |
+
std::array<ck::index_t, 1>{BatchStrideD1},
|
293 |
+
BatchStrideE1,
|
294 |
+
a0_element_op,
|
295 |
+
b0_element_op,
|
296 |
+
cde0_element_op,
|
297 |
+
b1_element_op,
|
298 |
+
cde1_element_op);
|
299 |
+
|
300 |
+
auto invoker_ptr = op_ptr->MakeInvokerPointer();
|
301 |
+
|
302 |
+
if(op_ptr->IsSupportedArgument(argument_ptr.get()))
|
303 |
+
{
|
304 |
+
std::string op_name = op_ptr->GetTypeString();
|
305 |
+
|
306 |
+
float ave_time =
|
307 |
+
invoker_ptr->Run(argument_ptr.get(), StreamConfig{nullptr, time_kernel});
|
308 |
+
|
309 |
+
std::size_t flop = (size_t(M) * N * K * 2 + size_t(M) * N * O * 2) * BatchCount;
|
310 |
+
std::size_t num_btype =
|
311 |
+
(sizeof(A0DataType) * M * K + sizeof(B0DataType) * K * N + sizeof(D0DataType) * N +
|
312 |
+
sizeof(B1DataType) * N * O + sizeof(E1DataType) * M * O + sizeof(D1DataType) * O) *
|
313 |
+
BatchCount;
|
314 |
+
|
315 |
+
float tflops = static_cast<float>(flop) / 1.E9 / ave_time;
|
316 |
+
|
317 |
+
float gb_per_sec = num_btype / 1.E6 / ave_time;
|
318 |
+
|
319 |
+
std::cout << "Perf: " << ave_time << " ms, " << tflops << " TFlops, " << gb_per_sec
|
320 |
+
<< " GB/s, " << op_name << std::endl;
|
321 |
+
|
322 |
+
if(tflops > best_tflops)
|
323 |
+
{
|
324 |
+
best_op_name = op_name;
|
325 |
+
best_tflops = tflops;
|
326 |
+
best_ave_time = ave_time;
|
327 |
+
best_gb_per_sec = gb_per_sec;
|
328 |
+
}
|
329 |
+
|
330 |
+
if(do_verification)
|
331 |
+
{
|
332 |
+
e1_g_m_o_device_buf.FromDevice(e1_g_m_o_device_result.mData.data());
|
333 |
+
|
334 |
+
pass = pass & ck::utils::check_err(e1_g_m_o_device_result, e1_g_m_o_host_result);
|
335 |
+
|
336 |
+
if(do_log)
|
337 |
+
{
|
338 |
+
LogRangeAsType<float>(
|
339 |
+
std::cout << "e1_g_m_o_host_result : ", e1_g_m_o_host_result.mData, ",")
|
340 |
+
<< std::endl;
|
341 |
+
LogRangeAsType<float>(
|
342 |
+
std::cout << "e1_g_m_o_device_result : ", e1_g_m_o_device_result.mData, ",")
|
343 |
+
<< std::endl;
|
344 |
+
}
|
345 |
+
}
|
346 |
+
}
|
347 |
+
else
|
348 |
+
{
|
349 |
+
std::cout << op_ptr->GetTypeString() << " does not support this problem" << std::endl;
|
350 |
+
}
|
351 |
+
}
|
352 |
+
|
353 |
+
std::cout << "Best Perf: " << best_ave_time << " ms, " << best_tflops << " TFlops, "
|
354 |
+
<< best_gb_per_sec << " GB/s, " << best_op_name << std::endl;
|
355 |
+
|
356 |
+
return pass;
|
357 |
+
}
|
358 |
+
|
359 |
+
} // namespace profiler
|
360 |
+
} // namespace ck
|
sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/composable_kernels/profiler/include/profiler/profile_batched_gemm_gemm_impl.hpp
ADDED
@@ -0,0 +1,319 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// SPDX-License-Identifier: MIT
|
2 |
+
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
|
3 |
+
|
4 |
+
#pragma once
|
5 |
+
|
6 |
+
#include <memory>
|
7 |
+
|
8 |
+
#include "ck/ck.hpp"
|
9 |
+
#include "ck/tensor_operation/gpu/device/tensor_layout.hpp"
|
10 |
+
#include "ck/tensor_operation/gpu/device/device_batched_gemm_gemm.hpp"
|
11 |
+
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"
|
12 |
+
|
13 |
+
#include "ck/library/tensor_operation_instance/gpu/batched_gemm_gemm.hpp"
|
14 |
+
|
15 |
+
#include "ck/library/utility/check_err.hpp"
|
16 |
+
#include "ck/library/utility/device_memory.hpp"
|
17 |
+
#include "ck/library/utility/host_tensor.hpp"
|
18 |
+
#include "ck/library/utility/host_tensor_generator.hpp"
|
19 |
+
#include "ck/library/utility/literals.hpp"
|
20 |
+
#include "ck/library/reference_tensor_operation/cpu/reference_batched_gemm.hpp"
|
21 |
+
|
22 |
+
namespace ck {
|
23 |
+
namespace profiler {
|
24 |
+
|
25 |
+
template <typename ADataType,
|
26 |
+
typename B0DataType,
|
27 |
+
typename B1DataType,
|
28 |
+
typename CDataType,
|
29 |
+
typename ALayout,
|
30 |
+
typename B0Layout,
|
31 |
+
typename B1Layout,
|
32 |
+
typename CLayout>
|
33 |
+
bool profile_batched_gemm_gemm_impl(bool do_verification,
|
34 |
+
int init_method,
|
35 |
+
bool do_log,
|
36 |
+
bool time_kernel,
|
37 |
+
int M,
|
38 |
+
int N,
|
39 |
+
int K,
|
40 |
+
int O,
|
41 |
+
int BatchCount = 1,
|
42 |
+
int StrideA = -1,
|
43 |
+
int StrideB0 = -1,
|
44 |
+
int StrideB1 = -1,
|
45 |
+
int StrideC = -1,
|
46 |
+
int BatchStrideA = -1,
|
47 |
+
int BatchStrideB0 = -1,
|
48 |
+
int BatchStrideB1 = -1,
|
49 |
+
int BatchStrideC = -1)
|
50 |
+
|
51 |
+
{
|
52 |
+
|
53 |
+
using Row = tensor_layout::gemm::RowMajor;
|
54 |
+
using Col = tensor_layout::gemm::ColumnMajor;
|
55 |
+
using PassThrough = tensor_operation::element_wise::PassThrough;
|
56 |
+
using AElementOp = PassThrough;
|
57 |
+
using B0ElementOp = PassThrough;
|
58 |
+
using B1ElementOp = PassThrough;
|
59 |
+
using Acc0ElementOp = PassThrough;
|
60 |
+
using CElementOp = PassThrough;
|
61 |
+
using AccDataType = float;
|
62 |
+
|
63 |
+
// Ref Gemm0
|
64 |
+
using ReferenceGemm0Instance = tensor_operation::host::ReferenceBatchedGemm<ADataType,
|
65 |
+
B0DataType,
|
66 |
+
ADataType,
|
67 |
+
AccDataType,
|
68 |
+
AElementOp,
|
69 |
+
B0ElementOp,
|
70 |
+
CElementOp>;
|
71 |
+
|
72 |
+
// Ref Gemm
|
73 |
+
using ReferenceGemm1Instance = tensor_operation::host::ReferenceBatchedGemm<ADataType,
|
74 |
+
B1DataType,
|
75 |
+
CDataType,
|
76 |
+
AccDataType,
|
77 |
+
AElementOp,
|
78 |
+
B1ElementOp,
|
79 |
+
CElementOp>;
|
80 |
+
|
81 |
+
bool pass = true;
|
82 |
+
|
83 |
+
const int DefaultStrideA = ck::is_same_v<ALayout, Row> ? K : M;
|
84 |
+
const int DefaultStrideB0 = ck::is_same_v<B0Layout, Row> ? N : K;
|
85 |
+
const int DefaultStrideB1 = ck::is_same_v<B1Layout, Row> ? O : N;
|
86 |
+
const int DefaultStrideC = ck::is_same_v<CLayout, Row> ? O : M;
|
87 |
+
|
88 |
+
StrideA = (StrideA < 0) ? DefaultStrideA : StrideA;
|
89 |
+
StrideB0 = (StrideB0 < 0) ? DefaultStrideB0 : StrideB0;
|
90 |
+
StrideB1 = (StrideB1 < 0) ? DefaultStrideB1 : StrideB1;
|
91 |
+
StrideC = (StrideC < 0) ? DefaultStrideC : StrideC;
|
92 |
+
|
93 |
+
const int DefaultBatchStrideA = (ck::is_same_v<ALayout, Col> ? K : M) * StrideA;
|
94 |
+
const int DefaultBatchStrideB0 = (ck::is_same_v<B0Layout, Col> ? N : K) * StrideB0;
|
95 |
+
const int DefaultBatchStrideB1 = (ck::is_same_v<B1Layout, Col> ? O : N) * StrideB1;
|
96 |
+
const int DefaultBatchStrideC = (ck::is_same_v<CLayout, Col> ? O : M) * StrideC;
|
97 |
+
|
98 |
+
BatchStrideA = BatchStrideA < 0 ? DefaultBatchStrideA : BatchStrideA;
|
99 |
+
BatchStrideB0 = BatchStrideB0 < 0 ? DefaultBatchStrideB0 : BatchStrideB0;
|
100 |
+
BatchStrideB1 = BatchStrideB1 < 0 ? DefaultBatchStrideB1 : BatchStrideB1;
|
101 |
+
BatchStrideC = BatchStrideC < 0 ? DefaultBatchStrideC : BatchStrideC;
|
102 |
+
|
103 |
+
auto f_host_tensor_descriptor = [](std::size_t batch_count,
|
104 |
+
std::size_t row,
|
105 |
+
std::size_t col,
|
106 |
+
std::size_t stride,
|
107 |
+
std::size_t batch_stride,
|
108 |
+
auto layout) {
|
109 |
+
using namespace ck::literals;
|
110 |
+
|
111 |
+
if(std::is_same<decltype(layout), Row>::value)
|
112 |
+
{
|
113 |
+
return HostTensorDescriptor({batch_count, row, col}, {batch_stride, stride, 1_uz});
|
114 |
+
}
|
115 |
+
else
|
116 |
+
{
|
117 |
+
return HostTensorDescriptor({batch_count, row, col}, {batch_stride, 1_uz, stride});
|
118 |
+
}
|
119 |
+
};
|
120 |
+
|
121 |
+
// C_m_o = A_m_k * B0_k_n * B1_n_o
|
122 |
+
Tensor<ADataType> a_g_m_k(
|
123 |
+
f_host_tensor_descriptor(BatchCount, M, K, StrideA, BatchStrideA, ALayout{}));
|
124 |
+
Tensor<B0DataType> b0_g_k_n(
|
125 |
+
f_host_tensor_descriptor(BatchCount, K, N, StrideB0, BatchStrideB0, B0Layout{}));
|
126 |
+
Tensor<B1DataType> b1_g_n_o(
|
127 |
+
f_host_tensor_descriptor(BatchCount, N, O, StrideB1, BatchStrideB1, B1Layout{}));
|
128 |
+
Tensor<CDataType> c_g_m_o_host_result(
|
129 |
+
f_host_tensor_descriptor(BatchCount, M, O, StrideC, BatchStrideC, CLayout{}));
|
130 |
+
Tensor<CDataType> c_g_m_o_device_result(
|
131 |
+
f_host_tensor_descriptor(BatchCount, M, O, StrideC, BatchStrideC, CLayout{}));
|
132 |
+
// Host verification: Output of Gemm0 is input A of Gemm1
|
133 |
+
Tensor<ADataType> acc0_g_m_n(f_host_tensor_descriptor(BatchCount, M, N, N, M * N, Row{}));
|
134 |
+
|
135 |
+
std::cout << "a_g_m_k: " << a_g_m_k.mDesc << std::endl;
|
136 |
+
std::cout << "b0_g_k_n: " << b0_g_k_n.mDesc << std::endl;
|
137 |
+
std::cout << "b1_g_n_o: " << b1_g_n_o.mDesc << std::endl;
|
138 |
+
std::cout << "c_g_m_o: " << c_g_m_o_host_result.mDesc << std::endl;
|
139 |
+
|
140 |
+
switch(init_method)
|
141 |
+
{
|
142 |
+
case 0: break;
|
143 |
+
case 1:
|
144 |
+
a_g_m_k.GenerateTensorValue(GeneratorTensor_2<ADataType>{-2, 3});
|
145 |
+
b0_g_k_n.GenerateTensorValue(GeneratorTensor_2<B0DataType>{-2, 3});
|
146 |
+
b1_g_n_o.GenerateTensorValue(GeneratorTensor_2<B1DataType>{-2, 3});
|
147 |
+
break;
|
148 |
+
case 2:
|
149 |
+
a_g_m_k.GenerateTensorValue(GeneratorTensor_3<ADataType>{0.0, 1.0});
|
150 |
+
b0_g_k_n.GenerateTensorValue(GeneratorTensor_3<B0DataType>{0.0, 1.0});
|
151 |
+
b1_g_n_o.GenerateTensorValue(GeneratorTensor_3<B1DataType>{-0.5, 0.5});
|
152 |
+
break;
|
153 |
+
case 3:
|
154 |
+
a_g_m_k.GenerateTensorValue(GeneratorTensor_2<ADataType>{-2, 2});
|
155 |
+
b0_g_k_n.GenerateTensorValue(GeneratorTensor_Diagonal<B0DataType>{});
|
156 |
+
b1_g_n_o.GenerateTensorValue(GeneratorTensor_Diagonal<B1DataType>{});
|
157 |
+
break;
|
158 |
+
default:
|
159 |
+
a_g_m_k.GenerateTensorValue(GeneratorTensor_1<ADataType>{1});
|
160 |
+
b0_g_k_n.GenerateTensorValue(GeneratorTensor_Sequential<1>{});
|
161 |
+
b1_g_n_o.GenerateTensorValue(GeneratorTensor_Diagonal<B1DataType>{});
|
162 |
+
}
|
163 |
+
|
164 |
+
DeviceMem a_g_m_k_device_buf(sizeof(ADataType) * a_g_m_k.mDesc.GetElementSize());
|
165 |
+
DeviceMem b0_g_k_n_device_buf(sizeof(B0DataType) * b0_g_k_n.mDesc.GetElementSize());
|
166 |
+
DeviceMem b1_g_n_o_device_buf(sizeof(B1DataType) * b1_g_n_o.mDesc.GetElementSize());
|
167 |
+
DeviceMem c_g_m_o_device_buf(sizeof(CDataType) * c_g_m_o_device_result.mDesc.GetElementSize());
|
168 |
+
|
169 |
+
a_g_m_k_device_buf.ToDevice(a_g_m_k.mData.data());
|
170 |
+
b0_g_k_n_device_buf.ToDevice(b0_g_k_n.mData.data());
|
171 |
+
b1_g_n_o_device_buf.ToDevice(b1_g_n_o.mData.data());
|
172 |
+
|
173 |
+
auto a_element_op = AElementOp{};
|
174 |
+
auto b0_element_op = B0ElementOp{};
|
175 |
+
auto acc0_element_op = Acc0ElementOp{};
|
176 |
+
auto b1_element_op = B1ElementOp{};
|
177 |
+
auto c_element_op = CElementOp{};
|
178 |
+
|
179 |
+
using DeviceOp = tensor_operation::device::DeviceBatchedGemmGemm<ALayout,
|
180 |
+
B0Layout,
|
181 |
+
B1Layout,
|
182 |
+
CLayout,
|
183 |
+
ADataType,
|
184 |
+
B0DataType,
|
185 |
+
B1DataType,
|
186 |
+
CDataType,
|
187 |
+
AElementOp,
|
188 |
+
B0ElementOp,
|
189 |
+
Acc0ElementOp,
|
190 |
+
B1ElementOp,
|
191 |
+
CElementOp>;
|
192 |
+
|
193 |
+
// get device op instances
|
194 |
+
const auto op_ptrs = tensor_operation::device::instance::DeviceOperationInstanceFactory<
|
195 |
+
DeviceOp>::GetInstances();
|
196 |
+
|
197 |
+
std::cout << "found " << op_ptrs.size() << " instances" << std::endl;
|
198 |
+
|
199 |
+
// early fail when no instances are found
|
200 |
+
if(op_ptrs.size() == 0)
|
201 |
+
{
|
202 |
+
return false;
|
203 |
+
}
|
204 |
+
|
205 |
+
if(do_verification)
|
206 |
+
{
|
207 |
+
auto ref_gemm0 = ReferenceGemm0Instance{};
|
208 |
+
auto ref_gemm0_invoker = ref_gemm0.MakeInvoker();
|
209 |
+
auto ref_gemm0_argument = ref_gemm0.MakeArgument(
|
210 |
+
a_g_m_k, b0_g_k_n, acc0_g_m_n, a_element_op, b0_element_op, PassThrough{});
|
211 |
+
|
212 |
+
ref_gemm0_invoker.Run(ref_gemm0_argument);
|
213 |
+
|
214 |
+
auto ref_gemm1 = ReferenceGemm1Instance{};
|
215 |
+
auto ref_gemm1_invoker = ref_gemm1.MakeInvoker();
|
216 |
+
auto ref_gemm1_argument = ref_gemm1.MakeArgument(
|
217 |
+
acc0_g_m_n, b1_g_n_o, c_g_m_o_host_result, PassThrough{}, b1_element_op, c_element_op);
|
218 |
+
|
219 |
+
ref_gemm1_invoker.Run(ref_gemm1_argument);
|
220 |
+
}
|
221 |
+
|
222 |
+
std::string best_op_name;
|
223 |
+
float best_ave_time = 0;
|
224 |
+
float best_tflops = 0;
|
225 |
+
float best_gb_per_sec = 0;
|
226 |
+
|
227 |
+
// profile device op instances
|
228 |
+
for(auto& op_ptr : op_ptrs)
|
229 |
+
{
|
230 |
+
auto argument_ptr = op_ptr->MakeArgumentPointer(
|
231 |
+
static_cast<ADataType*>(a_g_m_k_device_buf.GetDeviceBuffer()),
|
232 |
+
static_cast<B0DataType*>(b0_g_k_n_device_buf.GetDeviceBuffer()),
|
233 |
+
static_cast<B1DataType*>(b1_g_n_o_device_buf.GetDeviceBuffer()),
|
234 |
+
static_cast<CDataType*>(c_g_m_o_device_buf.GetDeviceBuffer()),
|
235 |
+
M,
|
236 |
+
N,
|
237 |
+
K,
|
238 |
+
O,
|
239 |
+
BatchCount,
|
240 |
+
StrideA,
|
241 |
+
StrideB0,
|
242 |
+
StrideB1,
|
243 |
+
StrideC,
|
244 |
+
BatchStrideA,
|
245 |
+
BatchStrideB0,
|
246 |
+
BatchStrideB1,
|
247 |
+
BatchStrideC,
|
248 |
+
a_element_op,
|
249 |
+
b0_element_op,
|
250 |
+
acc0_element_op,
|
251 |
+
b1_element_op,
|
252 |
+
c_element_op);
|
253 |
+
|
254 |
+
auto invoker_ptr = op_ptr->MakeInvokerPointer();
|
255 |
+
|
256 |
+
if(op_ptr->IsSupportedArgument(argument_ptr.get()))
|
257 |
+
{
|
258 |
+
std::string op_name = op_ptr->GetTypeString();
|
259 |
+
|
260 |
+
float ave_time =
|
261 |
+
invoker_ptr->Run(argument_ptr.get(), StreamConfig{nullptr, time_kernel});
|
262 |
+
|
263 |
+
std::size_t flop = (size_t(M) * N * K * 2 + size_t(M) * N * O * 2) * BatchCount;
|
264 |
+
std::size_t num_btype = (sizeof(ADataType) * M * K + sizeof(B0DataType) * K * N +
|
265 |
+
sizeof(B1DataType) * N * O + sizeof(CDataType) * M * O) *
|
266 |
+
BatchCount;
|
267 |
+
|
268 |
+
float tflops = static_cast<float>(flop) / 1.E9 / ave_time;
|
269 |
+
|
270 |
+
float gb_per_sec = num_btype / 1.E6 / ave_time;
|
271 |
+
|
272 |
+
std::cout << "Perf: " << ave_time << " ms, " << tflops << " TFlops, " << gb_per_sec
|
273 |
+
<< " GB/s, " << op_name << std::endl;
|
274 |
+
|
275 |
+
if(tflops > best_tflops)
|
276 |
+
{
|
277 |
+
best_op_name = op_name;
|
278 |
+
best_tflops = tflops;
|
279 |
+
best_ave_time = ave_time;
|
280 |
+
best_gb_per_sec = gb_per_sec;
|
281 |
+
}
|
282 |
+
|
283 |
+
if(do_verification)
|
284 |
+
{
|
285 |
+
c_g_m_o_device_buf.FromDevice(c_g_m_o_device_result.mData.data());
|
286 |
+
|
287 |
+
pass = pass & ck::utils::check_err(c_g_m_o_device_result, c_g_m_o_host_result);
|
288 |
+
|
289 |
+
if(do_log)
|
290 |
+
{
|
291 |
+
LogRangeAsType<float>(std::cout << "a_g_m_k: ", a_g_m_k.mData, ",")
|
292 |
+
<< std::endl;
|
293 |
+
LogRangeAsType<float>(std::cout << "b0_g_k_n : ", b0_g_k_n.mData, ",")
|
294 |
+
<< std::endl;
|
295 |
+
LogRangeAsType<float>(std::cout << "b1_g_n_o : ", b1_g_n_o.mData, ",")
|
296 |
+
<< std::endl;
|
297 |
+
LogRangeAsType<float>(
|
298 |
+
std::cout << "c_g_m_o_host_result : ", c_g_m_o_host_result.mData, ",")
|
299 |
+
<< std::endl;
|
300 |
+
LogRangeAsType<float>(
|
301 |
+
std::cout << "c_g_m_o_device_result : ", c_g_m_o_device_result.mData, ",")
|
302 |
+
<< std::endl;
|
303 |
+
}
|
304 |
+
}
|
305 |
+
}
|
306 |
+
else
|
307 |
+
{
|
308 |
+
std::cout << op_ptr->GetTypeString() << " does not support this problem" << std::endl;
|
309 |
+
}
|
310 |
+
}
|
311 |
+
|
312 |
+
std::cout << "Best Perf: " << best_ave_time << " ms, " << best_tflops << " TFlops, "
|
313 |
+
<< best_gb_per_sec << " GB/s, " << best_op_name << std::endl;
|
314 |
+
|
315 |
+
return pass;
|
316 |
+
}
|
317 |
+
|
318 |
+
} // namespace profiler
|
319 |
+
} // namespace ck
|
sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/composable_kernels/profiler/include/profiler/profile_batched_gemm_impl.hpp
ADDED
@@ -0,0 +1,264 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// SPDX-License-Identifier: MIT
|
2 |
+
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
|
3 |
+
|
4 |
+
#pragma once
|
5 |
+
|
6 |
+
#include <memory>
|
7 |
+
|
8 |
+
#include "ck/ck.hpp"
|
9 |
+
#include "ck/tensor_operation/gpu/device/tensor_layout.hpp"
|
10 |
+
#include "ck/tensor_operation/gpu/device/device_batched_gemm.hpp"
|
11 |
+
#include "ck/tensor_operation/gpu/device/device_batched_gemm_multi_d.hpp"
|
12 |
+
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"
|
13 |
+
|
14 |
+
#include "ck/library/tensor_operation_instance/gpu/batched_gemm.hpp"
|
15 |
+
#include "ck/library/tensor_operation_instance/gpu/batched_gemm_multi_d.hpp"
|
16 |
+
|
17 |
+
#include "ck/library/utility/check_err.hpp"
|
18 |
+
#include "ck/library/utility/device_memory.hpp"
|
19 |
+
#include "ck/library/utility/host_tensor.hpp"
|
20 |
+
#include "ck/library/utility/host_tensor_generator.hpp"
|
21 |
+
#include "ck/library/utility/literals.hpp"
|
22 |
+
#include "ck/library/reference_tensor_operation/cpu/reference_batched_gemm.hpp"
|
23 |
+
|
24 |
+
namespace ck {
|
25 |
+
namespace profiler {
|
26 |
+
|
27 |
+
template <typename ADataType,
|
28 |
+
typename BDataType,
|
29 |
+
typename CDataType,
|
30 |
+
typename ALayout,
|
31 |
+
typename BLayout,
|
32 |
+
typename CLayout,
|
33 |
+
typename AElementOp,
|
34 |
+
typename BElementOp,
|
35 |
+
typename CElementOp,
|
36 |
+
typename DeviceOp>
|
37 |
+
bool profile_batched_gemm_impl(int do_verification,
|
38 |
+
int init_method,
|
39 |
+
bool do_log,
|
40 |
+
bool time_kernel,
|
41 |
+
int M,
|
42 |
+
int N,
|
43 |
+
int K,
|
44 |
+
int BatchStrideA,
|
45 |
+
int BatchStrideB,
|
46 |
+
int BatchStrideC,
|
47 |
+
int StrideA,
|
48 |
+
int StrideB,
|
49 |
+
int StrideC,
|
50 |
+
int BatchCount)
|
51 |
+
{
|
52 |
+
bool pass = true;
|
53 |
+
|
54 |
+
auto f_host_tensor_descriptor = [](std::size_t batch_count,
|
55 |
+
std::size_t row,
|
56 |
+
std::size_t col,
|
57 |
+
std::size_t stride,
|
58 |
+
std::size_t batch_stride,
|
59 |
+
auto layout) {
|
60 |
+
using namespace ck::literals;
|
61 |
+
|
62 |
+
if(is_same<decltype(layout), tensor_layout::gemm::RowMajor>::value)
|
63 |
+
{
|
64 |
+
return HostTensorDescriptor({batch_count, row, col}, {batch_stride, stride, 1_uz});
|
65 |
+
}
|
66 |
+
else
|
67 |
+
{
|
68 |
+
return HostTensorDescriptor({batch_count, row, col}, {batch_stride, 1_uz, stride});
|
69 |
+
}
|
70 |
+
};
|
71 |
+
|
72 |
+
Tensor<ADataType> a_g_m_k(
|
73 |
+
f_host_tensor_descriptor(BatchCount, M, K, StrideA, BatchStrideA, ALayout{}));
|
74 |
+
Tensor<BDataType> b_g_k_n(
|
75 |
+
f_host_tensor_descriptor(BatchCount, K, N, StrideB, BatchStrideB, BLayout{}));
|
76 |
+
Tensor<CDataType> c_g_m_n_host_result(
|
77 |
+
f_host_tensor_descriptor(BatchCount, M, N, StrideC, BatchStrideC, CLayout{}));
|
78 |
+
Tensor<CDataType> c_g_m_n_device_result(
|
79 |
+
f_host_tensor_descriptor(BatchCount, M, N, StrideC, BatchStrideC, CLayout{}));
|
80 |
+
|
81 |
+
std::cout << "a_g_m_k: " << a_g_m_k.mDesc << std::endl;
|
82 |
+
std::cout << "b_g_k_n: " << b_g_k_n.mDesc << std::endl;
|
83 |
+
std::cout << "c_g_m_n: " << c_g_m_n_host_result.mDesc << std::endl;
|
84 |
+
|
85 |
+
switch(init_method)
|
86 |
+
{
|
87 |
+
case 0: break;
|
88 |
+
case 1:
|
89 |
+
a_g_m_k.GenerateTensorValue(GeneratorTensor_2<ADataType>{-5, 5});
|
90 |
+
b_g_k_n.GenerateTensorValue(GeneratorTensor_2<BDataType>{-5, 5});
|
91 |
+
break;
|
92 |
+
default:
|
93 |
+
a_g_m_k.GenerateTensorValue(GeneratorTensor_3<ADataType>{0.0, 1.0});
|
94 |
+
b_g_k_n.GenerateTensorValue(GeneratorTensor_3<BDataType>{-0.5, 0.5});
|
95 |
+
}
|
96 |
+
|
97 |
+
const auto a_element_op = AElementOp{};
|
98 |
+
const auto b_element_op = BElementOp{};
|
99 |
+
const auto c_element_op = CElementOp{};
|
100 |
+
|
101 |
+
if(do_verification)
|
102 |
+
{
|
103 |
+
using ReferenceBatchedGemmInstance =
|
104 |
+
ck::tensor_operation::host::ReferenceBatchedGemm<ADataType,
|
105 |
+
BDataType,
|
106 |
+
CDataType,
|
107 |
+
float,
|
108 |
+
AElementOp,
|
109 |
+
BElementOp,
|
110 |
+
CElementOp>;
|
111 |
+
|
112 |
+
auto ref_batched_gemm = ReferenceBatchedGemmInstance{};
|
113 |
+
auto ref_invoker = ref_batched_gemm.MakeInvoker();
|
114 |
+
|
115 |
+
auto ref_argument = ref_batched_gemm.MakeArgument(
|
116 |
+
a_g_m_k, b_g_k_n, c_g_m_n_host_result, a_element_op, b_element_op, c_element_op);
|
117 |
+
|
118 |
+
ref_invoker.Run(ref_argument);
|
119 |
+
}
|
120 |
+
|
121 |
+
DeviceMem a_device_buf(sizeof(ADataType) * a_g_m_k.mDesc.GetElementSpaceSize());
|
122 |
+
DeviceMem b_device_buf(sizeof(BDataType) * b_g_k_n.mDesc.GetElementSpaceSize());
|
123 |
+
DeviceMem c_device_buf(sizeof(CDataType) * c_g_m_n_device_result.mDesc.GetElementSpaceSize());
|
124 |
+
|
125 |
+
a_device_buf.ToDevice(a_g_m_k.mData.data());
|
126 |
+
b_device_buf.ToDevice(b_g_k_n.mData.data());
|
127 |
+
c_device_buf.ToDevice(c_g_m_n_device_result.mData.data());
|
128 |
+
|
129 |
+
// get device op instances
|
130 |
+
const auto op_ptrs = ck::tensor_operation::device::instance::DeviceOperationInstanceFactory<
|
131 |
+
DeviceOp>::GetInstances();
|
132 |
+
|
133 |
+
std::cout << "found " << op_ptrs.size() << " instances" << std::endl;
|
134 |
+
|
135 |
+
std::string best_op_name;
|
136 |
+
float best_ave_time = 0;
|
137 |
+
float best_tflops = 0;
|
138 |
+
float best_gb_per_sec = 0;
|
139 |
+
|
140 |
+
// profile device op instances
|
141 |
+
for(auto& op_ptr : op_ptrs)
|
142 |
+
{
|
143 |
+
std::unique_ptr<tensor_operation::device::BaseArgument> argument_ptr;
|
144 |
+
// false branch for multi d dl kernel
|
145 |
+
if constexpr(std::is_same<
|
146 |
+
DeviceOp,
|
147 |
+
ck::tensor_operation::device::DeviceBatchedGemm<ALayout,
|
148 |
+
BLayout,
|
149 |
+
CLayout,
|
150 |
+
ADataType,
|
151 |
+
BDataType,
|
152 |
+
CDataType,
|
153 |
+
AElementOp,
|
154 |
+
BElementOp,
|
155 |
+
CElementOp>>::value)
|
156 |
+
{
|
157 |
+
|
158 |
+
argument_ptr =
|
159 |
+
op_ptr->MakeArgumentPointer(static_cast<ADataType*>(a_device_buf.GetDeviceBuffer()),
|
160 |
+
static_cast<BDataType*>(b_device_buf.GetDeviceBuffer()),
|
161 |
+
static_cast<CDataType*>(c_device_buf.GetDeviceBuffer()),
|
162 |
+
M,
|
163 |
+
N,
|
164 |
+
K,
|
165 |
+
StrideA,
|
166 |
+
StrideB,
|
167 |
+
StrideC,
|
168 |
+
BatchStrideA,
|
169 |
+
BatchStrideB,
|
170 |
+
BatchStrideC,
|
171 |
+
BatchCount,
|
172 |
+
ck::tensor_operation::element_wise::PassThrough{},
|
173 |
+
ck::tensor_operation::element_wise::PassThrough{},
|
174 |
+
ck::tensor_operation::element_wise::PassThrough{});
|
175 |
+
}
|
176 |
+
else
|
177 |
+
{
|
178 |
+
argument_ptr =
|
179 |
+
op_ptr->MakeArgumentPointer(static_cast<ADataType*>(a_device_buf.GetDeviceBuffer()),
|
180 |
+
static_cast<BDataType*>(b_device_buf.GetDeviceBuffer()),
|
181 |
+
{},
|
182 |
+
static_cast<CDataType*>(c_device_buf.GetDeviceBuffer()),
|
183 |
+
M,
|
184 |
+
N,
|
185 |
+
K,
|
186 |
+
BatchCount,
|
187 |
+
StrideA,
|
188 |
+
StrideB,
|
189 |
+
{},
|
190 |
+
StrideC,
|
191 |
+
BatchStrideA,
|
192 |
+
BatchStrideB,
|
193 |
+
{},
|
194 |
+
BatchStrideC,
|
195 |
+
ck::tensor_operation::element_wise::PassThrough{},
|
196 |
+
ck::tensor_operation::element_wise::PassThrough{},
|
197 |
+
ck::tensor_operation::element_wise::PassThrough{});
|
198 |
+
}
|
199 |
+
|
200 |
+
auto invoker_ptr = op_ptr->MakeInvokerPointer();
|
201 |
+
|
202 |
+
if(op_ptr->IsSupportedArgument(argument_ptr.get()))
|
203 |
+
{
|
204 |
+
// re-init C to zero before profiling next kernel
|
205 |
+
c_device_buf.SetZero();
|
206 |
+
|
207 |
+
std::string op_name = op_ptr->GetTypeString();
|
208 |
+
|
209 |
+
float ave_time =
|
210 |
+
invoker_ptr->Run(argument_ptr.get(), StreamConfig{nullptr, time_kernel});
|
211 |
+
|
212 |
+
std::size_t flop = std::size_t(2) * BatchCount * M * N * K;
|
213 |
+
|
214 |
+
std::size_t num_btype = (sizeof(ADataType) * M * K + sizeof(BDataType) * K * N +
|
215 |
+
sizeof(CDataType) * M * N) *
|
216 |
+
BatchCount;
|
217 |
+
|
218 |
+
float tflops = static_cast<float>(flop) / 1.E9 / ave_time;
|
219 |
+
|
220 |
+
float gb_per_sec = num_btype / 1.E6 / ave_time;
|
221 |
+
|
222 |
+
std::cout << "Perf: " << ave_time << " ms, " << tflops << " TFlops, " << gb_per_sec
|
223 |
+
<< " GB/s, " << op_name << std::endl;
|
224 |
+
|
225 |
+
if(tflops > best_tflops)
|
226 |
+
{
|
227 |
+
best_op_name = op_name;
|
228 |
+
best_tflops = tflops;
|
229 |
+
best_ave_time = ave_time;
|
230 |
+
best_gb_per_sec = gb_per_sec;
|
231 |
+
}
|
232 |
+
|
233 |
+
if(do_verification)
|
234 |
+
{
|
235 |
+
c_device_buf.FromDevice(c_g_m_n_device_result.mData.data());
|
236 |
+
|
237 |
+
pass = pass & ck::utils::check_err(c_g_m_n_device_result, c_g_m_n_host_result);
|
238 |
+
|
239 |
+
if(do_log)
|
240 |
+
{
|
241 |
+
LogRangeAsType<float>(std::cout << "a : ", a_g_m_k.mData, ",") << std::endl;
|
242 |
+
LogRangeAsType<float>(std::cout << "b: ", b_g_k_n.mData, ",") << std::endl;
|
243 |
+
LogRangeAsType<float>(std::cout << "c_host: ", c_g_m_n_host_result.mData, ",")
|
244 |
+
<< std::endl;
|
245 |
+
LogRangeAsType<float>(
|
246 |
+
std::cout << "c_device: ", c_g_m_n_device_result.mData, ",")
|
247 |
+
<< std::endl;
|
248 |
+
}
|
249 |
+
}
|
250 |
+
}
|
251 |
+
else
|
252 |
+
{
|
253 |
+
std::cout << op_ptr->GetTypeString() << " does not support this problem" << std::endl;
|
254 |
+
}
|
255 |
+
}
|
256 |
+
|
257 |
+
std::cout << "Best Perf: " << best_ave_time << " ms, " << best_tflops << " TFlops, "
|
258 |
+
<< best_gb_per_sec << " GB/s, " << best_op_name << std::endl;
|
259 |
+
|
260 |
+
return pass;
|
261 |
+
}
|
262 |
+
|
263 |
+
} // namespace profiler
|
264 |
+
} // namespace ck
|
sglang_repo/sgl-kernel/3rdparty/flashinfer/3rdparty/composable_kernels/profiler/include/profiler/profile_batched_gemm_reduce_impl.hpp
ADDED
@@ -0,0 +1,362 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// SPDX-License-Identifier: MIT
|
2 |
+
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
|
3 |
+
|
4 |
+
#pragma once
|
5 |
+
|
6 |
+
#include "ck/ck.hpp"
|
7 |
+
#include "ck/utility/reduction_operator.hpp"
|
8 |
+
#include "ck/tensor_operation/gpu/device/tensor_layout.hpp"
|
9 |
+
#include "ck/tensor_operation/gpu/device/device_gemm_reduce.hpp"
|
10 |
+
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"
|
11 |
+
|
12 |
+
#include "ck/library/utility/check_err.hpp"
|
13 |
+
#include "ck/library/utility/convolution_parameter.hpp"
|
14 |
+
#include "ck/library/utility/device_memory.hpp"
|
15 |
+
#include "ck/library/utility/host_tensor.hpp"
|
16 |
+
#include "ck/library/utility/host_tensor_generator.hpp"
|
17 |
+
#include "ck/library/utility/literals.hpp"
|
18 |
+
#include "ck/library/reference_tensor_operation/cpu/reference_batched_gemm.hpp"
|
19 |
+
|
20 |
+
namespace ck {
|
21 |
+
namespace tensor_operation {
|
22 |
+
namespace device {
|
23 |
+
namespace instance {
|
24 |
+
|
25 |
+
using F32 = float;
|
26 |
+
using F16 = ck::half_t;
|
27 |
+
using ReducePtrsGlobal = ck::Tuple<F32*, F32*>;
|
28 |
+
using Identity = ck::tensor_operation::element_wise::PassThrough;
|
29 |
+
using Square = ck::tensor_operation::element_wise::UnarySquare;
|
30 |
+
using ReduceInElementOps = ck::Tuple<Identity, Square>;
|
31 |
+
using ReduceOutElementOps = ck::Tuple<Identity, Identity>;
|
32 |
+
|
33 |
+
using DeviceGemmReduceNoOpPtr =
|
34 |
+
ck::tensor_operation::device::DeviceGemmReducePtr<0, ReducePtrsGlobal::Size()>;
|
35 |
+
|
36 |
+
void add_device_batched_gemm_reduce_xdl_cshuffle_f16_f16_f16_f32_f32_gmk_gkn_gmn_instances(
|
37 |
+
std::vector<DeviceGemmReduceNoOpPtr>&);
|
38 |
+
|
39 |
+
void add_device_batched_gemm_reduce_xdl_cshuffle_f16_f16_f16_f32_f32_gmk_gnk_gmn_instances(
|
40 |
+
std::vector<DeviceGemmReduceNoOpPtr>&);
|
41 |
+
|
42 |
+
void add_device_batched_gemm_reduce_xdl_cshuffle_f16_f16_f16_f32_f32_gkm_gkn_gmn_instances(
|
43 |
+
std::vector<DeviceGemmReduceNoOpPtr>&);
|
44 |
+
|
45 |
+
void add_device_batched_gemm_reduce_xdl_cshuffle_f16_f16_f16_f32_f32_gkm_gnk_gmn_instances(
|
46 |
+
std::vector<DeviceGemmReduceNoOpPtr>&);
|
47 |
+
|
48 |
+
} // namespace instance
|
49 |
+
} // namespace device
|
50 |
+
} // namespace tensor_operation
|
51 |
+
} // namespace ck
|
52 |
+
|
53 |
+
namespace ck {
|
54 |
+
namespace profiler {
|
55 |
+
|
56 |
+
template <typename ADataType,
|
57 |
+
typename BDataType,
|
58 |
+
typename CDataType,
|
59 |
+
typename ReduceDataType,
|
60 |
+
typename ALayout,
|
61 |
+
typename BLayout,
|
62 |
+
typename CLayout>
|
63 |
+
bool profile_batched_gemm_reduce_impl(int do_verification,
|
64 |
+
int init_method,
|
65 |
+
bool do_log,
|
66 |
+
bool time_kernel,
|
67 |
+
int M,
|
68 |
+
int N,
|
69 |
+
int K,
|
70 |
+
int StrideA,
|
71 |
+
int StrideB,
|
72 |
+
int StrideC,
|
73 |
+
int BatchCount)
|
74 |
+
{
|
75 |
+
bool pass = true;
|
76 |
+
|
77 |
+
auto f_host_tensor_descriptor = [](std::size_t batch_count,
|
78 |
+
std::size_t row,
|
79 |
+
std::size_t col,
|
80 |
+
std::size_t stride,
|
81 |
+
auto layout) {
|
82 |
+
using namespace ck::literals;
|
83 |
+
|
84 |
+
if(std::is_same<decltype(layout), ck::tensor_layout::gemm::RowMajor>::value)
|
85 |
+
{
|
86 |
+
return HostTensorDescriptor({batch_count, row, col}, {row * stride, stride, 1_uz});
|
87 |
+
}
|
88 |
+
else
|
89 |
+
{
|
90 |
+
return HostTensorDescriptor({batch_count, row, col}, {col * stride, 1_uz, stride});
|
91 |
+
}
|
92 |
+
};
|
93 |
+
|
94 |
+
Tensor<ADataType> a_g_m_k(f_host_tensor_descriptor(BatchCount, M, K, StrideA, ALayout{}));
|
95 |
+
Tensor<BDataType> b_g_k_n(f_host_tensor_descriptor(BatchCount, K, N, StrideB, BLayout{}));
|
96 |
+
|
97 |
+
Tensor<CDataType> c_g_m_n_host_result(
|
98 |
+
f_host_tensor_descriptor(BatchCount, M, N, StrideC, CLayout{}));
|
99 |
+
Tensor<ReduceDataType> d0_g_m_host_result({BatchCount, M});
|
100 |
+
Tensor<ReduceDataType> d1_g_m_host_result({BatchCount, M});
|
101 |
+
|
102 |
+
Tensor<CDataType> c_g_m_n_device_result(
|
103 |
+
f_host_tensor_descriptor(BatchCount, M, N, StrideC, CLayout{}));
|
104 |
+
Tensor<ReduceDataType> d0_g_m_device_result({BatchCount, M});
|
105 |
+
Tensor<ReduceDataType> d1_g_m_device_result({BatchCount, M});
|
106 |
+
|
107 |
+
std::cout << "a_g_m_k: " << a_g_m_k.mDesc << std::endl;
|
108 |
+
std::cout << "b_g_k_n: " << b_g_k_n.mDesc << std::endl;
|
109 |
+
std::cout << "c_g_m_n: " << c_g_m_n_host_result.mDesc << std::endl;
|
110 |
+
std::cout << "d0_g_m: " << d0_g_m_host_result.mDesc << std::endl;
|
111 |
+
std::cout << "d1_g_m: " << d1_g_m_host_result.mDesc << std::endl;
|
112 |
+
|
113 |
+
std::size_t num_thread = std::thread::hardware_concurrency();
|
114 |
+
switch(init_method)
|
115 |
+
{
|
116 |
+
case 0: break;
|
117 |
+
case 1:
|
118 |
+
std::srand(0);
|
119 |
+
a_g_m_k.GenerateTensorValue(GeneratorTensor_2<ADataType>{-5, 5}, num_thread);
|
120 |
+
b_g_k_n.GenerateTensorValue(GeneratorTensor_2<BDataType>{-5, 5}, num_thread);
|
121 |
+
break;
|
122 |
+
default:
|
123 |
+
std::srand(0);
|
124 |
+
a_g_m_k.GenerateTensorValue(GeneratorTensor_3<ADataType>{0.0, 1.0}, num_thread);
|
125 |
+
b_g_k_n.GenerateTensorValue(GeneratorTensor_3<BDataType>{-0.5, 0.5}, num_thread);
|
126 |
+
}
|
127 |
+
|
128 |
+
using AElementOp = ck::tensor_operation::element_wise::PassThrough;
|
129 |
+
using BElementOp = ck::tensor_operation::element_wise::PassThrough;
|
130 |
+
using CElementOp = ck::tensor_operation::element_wise::PassThrough;
|
131 |
+
using ReduceOp0 = ck::reduce::Add;
|
132 |
+
using ReduceOp1 = ck::reduce::Add;
|
133 |
+
using UnaryIdenticElementOp = ck::tensor_operation::element_wise::PassThrough;
|
134 |
+
using UnarySquareElementOp = ck::tensor_operation::element_wise::UnarySquare;
|
135 |
+
|
136 |
+
auto a_element_op = AElementOp{};
|
137 |
+
auto b_element_op = BElementOp{};
|
138 |
+
auto c_element_op = CElementOp{};
|
139 |
+
std::array<void*, 3> gemm_element_ops = {&a_element_op, &b_element_op, &c_element_op};
|
140 |
+
|
141 |
+
const auto reduce0_op = ReduceOp0{};
|
142 |
+
const auto reduce1_op = ReduceOp1{};
|
143 |
+
|
144 |
+
auto passthrough = UnaryIdenticElementOp{};
|
145 |
+
auto square = UnarySquareElementOp{};
|
146 |
+
std::array<void*, 2> reduce_in_element_ops = {&passthrough, &square};
|
147 |
+
std::array<void*, 2> reduce_out_element_ops = {&passthrough, &passthrough};
|
148 |
+
|
149 |
+
if(do_verification)
|
150 |
+
{
|
151 |
+
using ReferenceBatchedGemmInstance =
|
152 |
+
ck::tensor_operation::host::ReferenceBatchedGemm<ADataType,
|
153 |
+
BDataType,
|
154 |
+
CDataType,
|
155 |
+
float,
|
156 |
+
AElementOp,
|
157 |
+
BElementOp,
|
158 |
+
CElementOp>;
|
159 |
+
|
160 |
+
using ReduceAccDataType = ReduceDataType;
|
161 |
+
|
162 |
+
auto ref_batched_gemm = ReferenceBatchedGemmInstance{};
|
163 |
+
auto ref_invoker = ref_batched_gemm.MakeInvoker();
|
164 |
+
|
165 |
+
auto ref_argument = ref_batched_gemm.MakeArgument(
|
166 |
+
a_g_m_k, b_g_k_n, c_g_m_n_host_result, a_element_op, b_element_op, c_element_op);
|
167 |
+
|
168 |
+
ref_invoker.Run(ref_argument);
|
169 |
+
|
170 |
+
for(int batch = 0; batch < BatchCount; ++batch)
|
171 |
+
{
|
172 |
+
for(int m = 0; m < M; ++m)
|
173 |
+
{
|
174 |
+
auto reduce0_acc = reduce0_op.GetIdentityValue<ReduceAccDataType>();
|
175 |
+
auto reduce1_acc = reduce1_op.GetIdentityValue<ReduceAccDataType>();
|
176 |
+
|
177 |
+
for(int n = 0; n < N; ++n)
|
178 |
+
{
|
179 |
+
ReduceAccDataType d0_val =
|
180 |
+
ck::type_convert<ReduceAccDataType>(c_g_m_n_host_result(batch, m, n));
|
181 |
+
ReduceAccDataType d1_val;
|
182 |
+
|
183 |
+
square(d1_val, d0_val);
|
184 |
+
reduce0_op(reduce0_acc, d0_val);
|
185 |
+
reduce1_op(reduce1_acc, d1_val);
|
186 |
+
}
|
187 |
+
|
188 |
+
d0_g_m_host_result(batch, m) = ck::type_convert<ReduceDataType>(reduce0_acc);
|
189 |
+
d1_g_m_host_result(batch, m) = ck::type_convert<ReduceDataType>(reduce1_acc);
|
190 |
+
}
|
191 |
+
}
|
192 |
+
}
|
193 |
+
|
194 |
+
DeviceMem a_device_buf(sizeof(ADataType) * a_g_m_k.mDesc.GetElementSpaceSize());
|
195 |
+
DeviceMem b_device_buf(sizeof(BDataType) * b_g_k_n.mDesc.GetElementSpaceSize());
|
196 |
+
DeviceMem c_device_buf(sizeof(CDataType) * c_g_m_n_device_result.mDesc.GetElementSpaceSize());
|
197 |
+
DeviceMem reduce0_device_buf(sizeof(ReduceDataType) *
|
198 |
+
d0_g_m_device_result.mDesc.GetElementSpaceSize());
|
199 |
+
DeviceMem reduce1_device_buf(sizeof(ReduceDataType) *
|
200 |
+
d1_g_m_device_result.mDesc.GetElementSpaceSize());
|
201 |
+
|
202 |
+
std::array<void*, 2> p_reduces = {reduce0_device_buf.GetDeviceBuffer(),
|
203 |
+
reduce1_device_buf.GetDeviceBuffer()};
|
204 |
+
|
205 |
+
a_device_buf.ToDevice(a_g_m_k.mData.data());
|
206 |
+
b_device_buf.ToDevice(b_g_k_n.mData.data());
|
207 |
+
|
208 |
+
// add device GEMM instances
|
209 |
+
std::vector<ck::tensor_operation::device::instance::DeviceGemmReduceNoOpPtr> gemm_ptrs;
|
210 |
+
|
211 |
+
if constexpr(is_same<ADataType, half_t>::value && is_same<BDataType, half_t>::value &&
|
212 |
+
is_same<CDataType, half_t>::value)
|
213 |
+
{
|
214 |
+
if constexpr(is_same<ALayout, tensor_layout::gemm::RowMajor>::value &&
|
215 |
+
is_same<BLayout, tensor_layout::gemm::RowMajor>::value &&
|
216 |
+
is_same<CLayout, tensor_layout::gemm::RowMajor>::value)
|
217 |
+
{
|
218 |
+
ck::tensor_operation::device::instance::
|
219 |
+
add_device_batched_gemm_reduce_xdl_cshuffle_f16_f16_f16_f32_f32_gmk_gkn_gmn_instances(
|
220 |
+
gemm_ptrs);
|
221 |
+
}
|
222 |
+
else if constexpr(is_same<ALayout, tensor_layout::gemm::RowMajor>::value &&
|
223 |
+
is_same<BLayout, tensor_layout::gemm::ColumnMajor>::value &&
|
224 |
+
is_same<CLayout, tensor_layout::gemm::RowMajor>::value)
|
225 |
+
{
|
226 |
+
ck::tensor_operation::device::instance::
|
227 |
+
add_device_batched_gemm_reduce_xdl_cshuffle_f16_f16_f16_f32_f32_gmk_gnk_gmn_instances(
|
228 |
+
gemm_ptrs);
|
229 |
+
}
|
230 |
+
else if constexpr(is_same<ALayout, tensor_layout::gemm::ColumnMajor>::value &&
|
231 |
+
is_same<BLayout, tensor_layout::gemm::RowMajor>::value &&
|
232 |
+
is_same<CLayout, tensor_layout::gemm::RowMajor>::value)
|
233 |
+
{
|
234 |
+
ck::tensor_operation::device::instance::
|
235 |
+
add_device_batched_gemm_reduce_xdl_cshuffle_f16_f16_f16_f32_f32_gkm_gkn_gmn_instances(
|
236 |
+
gemm_ptrs);
|
237 |
+
}
|
238 |
+
else if constexpr(is_same<ALayout, tensor_layout::gemm::ColumnMajor>::value &&
|
239 |
+
is_same<BLayout, tensor_layout::gemm::ColumnMajor>::value &&
|
240 |
+
is_same<CLayout, tensor_layout::gemm::RowMajor>::value)
|
241 |
+
{
|
242 |
+
ck::tensor_operation::device::instance::
|
243 |
+
add_device_batched_gemm_reduce_xdl_cshuffle_f16_f16_f16_f32_f32_gkm_gnk_gmn_instances(
|
244 |
+
gemm_ptrs);
|
245 |
+
}
|
246 |
+
}
|
247 |
+
|
248 |
+
if(gemm_ptrs.size() <= 0)
|
249 |
+
{
|
250 |
+
throw std::runtime_error("wrong! no device GEMM instance found");
|
251 |
+
}
|
252 |
+
|
253 |
+
std::string best_gemm_name;
|
254 |
+
float best_ave_time = 0;
|
255 |
+
float best_tflops = 0;
|
256 |
+
float best_gb_per_sec = 0;
|
257 |
+
|
258 |
+
// profile device GEMM instances
|
259 |
+
for(auto& gemm_ptr : gemm_ptrs)
|
260 |
+
{
|
261 |
+
auto argument_ptr = gemm_ptr->MakeArgumentPointer(a_device_buf.GetDeviceBuffer(),
|
262 |
+
b_device_buf.GetDeviceBuffer(),
|
263 |
+
nullptr,
|
264 |
+
{},
|
265 |
+
c_device_buf.GetDeviceBuffer(),
|
266 |
+
p_reduces,
|
267 |
+
M,
|
268 |
+
N,
|
269 |
+
K,
|
270 |
+
StrideA,
|
271 |
+
StrideB,
|
272 |
+
StrideC,
|
273 |
+
{},
|
274 |
+
gemm_element_ops,
|
275 |
+
{},
|
276 |
+
reduce_in_element_ops,
|
277 |
+
reduce_out_element_ops,
|
278 |
+
BatchCount);
|
279 |
+
|
280 |
+
auto invoker_ptr = gemm_ptr->MakeInvokerPointer();
|
281 |
+
|
282 |
+
if(gemm_ptr->IsSupportedArgument(argument_ptr.get()))
|
283 |
+
{
|
284 |
+
// init DO, D1 to 0
|
285 |
+
reduce0_device_buf.SetZero();
|
286 |
+
reduce1_device_buf.SetZero();
|
287 |
+
|
288 |
+
float ave_time =
|
289 |
+
invoker_ptr->Run(argument_ptr.get(), StreamConfig{nullptr, time_kernel});
|
290 |
+
|
291 |
+
std::string gemm_name = gemm_ptr->GetTypeString();
|
292 |
+
|
293 |
+
std::size_t flop = std::size_t(2) * BatchCount * M * N * K;
|
294 |
+
std::size_t num_btype = sizeof(ADataType) * BatchCount * M * K +
|
295 |
+
sizeof(BDataType) * BatchCount * K * N +
|
296 |
+
sizeof(CDataType) * BatchCount * M * N;
|
297 |
+
|
298 |
+
float tflops = static_cast<float>(flop) / 1.E9 / ave_time;
|
299 |
+
|
300 |
+
float gb_per_sec = num_btype / 1.E6 / ave_time;
|
301 |
+
|
302 |
+
std::cout << "Perf: " << ave_time << " ms, " << tflops << " TFlops, " << gb_per_sec
|
303 |
+
<< " GB/s, " << gemm_name << std::endl;
|
304 |
+
|
305 |
+
if(tflops > best_tflops)
|
306 |
+
{
|
307 |
+
best_gemm_name = gemm_name;
|
308 |
+
best_tflops = tflops;
|
309 |
+
best_ave_time = ave_time;
|
310 |
+
best_gb_per_sec = gb_per_sec;
|
311 |
+
}
|
312 |
+
|
313 |
+
if(do_verification)
|
314 |
+
{
|
315 |
+
c_device_buf.FromDevice(c_g_m_n_device_result.mData.data());
|
316 |
+
reduce0_device_buf.FromDevice(d0_g_m_device_result.mData.data());
|
317 |
+
reduce1_device_buf.FromDevice(d1_g_m_device_result.mData.data());
|
318 |
+
|
319 |
+
bool c_error = ck::utils::check_err(c_g_m_n_device_result, c_g_m_n_host_result);
|
320 |
+
bool d0_error = ck::utils::check_err(d0_g_m_device_result, d0_g_m_host_result);
|
321 |
+
bool d1_error = ck::utils::check_err(d1_g_m_device_result, d1_g_m_host_result);
|
322 |
+
|
323 |
+
pass = pass && (c_error == true);
|
324 |
+
pass = pass && (d0_error == true);
|
325 |
+
pass = pass && (d1_error == true);
|
326 |
+
|
327 |
+
if(do_log)
|
328 |
+
{
|
329 |
+
LogRangeAsType<float>(std::cout << "a : ", a_g_m_k.mData, ",") << std::endl;
|
330 |
+
LogRangeAsType<float>(std::cout << "b: ", b_g_k_n.mData, ",") << std::endl;
|
331 |
+
LogRangeAsType<float>(std::cout << "c_host: ", c_g_m_n_host_result.mData, ",")
|
332 |
+
<< std::endl;
|
333 |
+
LogRangeAsType<float>(
|
334 |
+
std::cout << "c_device: ", c_g_m_n_device_result.mData, ",")
|
335 |
+
<< std::endl;
|
336 |
+
LogRangeAsType<float>(std::cout << "d0_host: ", d0_g_m_host_result.mData, ",")
|
337 |
+
<< std::endl;
|
338 |
+
LogRangeAsType<float>(
|
339 |
+
std::cout << "d0_device: ", d0_g_m_device_result.mData, ",")
|
340 |
+
<< std::endl;
|
341 |
+
LogRangeAsType<float>(std::cout << "d1_host: ", d1_g_m_host_result.mData, ",")
|
342 |
+
<< std::endl;
|
343 |
+
LogRangeAsType<float>(
|
344 |
+
std::cout << "d1_device: ", d1_g_m_device_result.mData, ",")
|
345 |
+
<< std::endl;
|
346 |
+
}
|
347 |
+
}
|
348 |
+
}
|
349 |
+
else
|
350 |
+
{
|
351 |
+
std::cout << "does not support this GEMM problem" << std::endl;
|
352 |
+
}
|
353 |
+
}
|
354 |
+
|
355 |
+
std::cout << "Best Perf: " << best_ave_time << " ms, " << best_tflops << " TFlops, "
|
356 |
+
<< best_gb_per_sec << " GB/s, " << best_gemm_name << std::endl;
|
357 |
+
|
358 |
+
return pass;
|
359 |
+
}
|
360 |
+
|
361 |
+
} // namespace profiler
|
362 |
+
} // namespace ck
|