File_name
stringlengths 7
33
| CUDA_code
stringlengths 967
14.9k
|
---|---|
reduce1D.cu | /**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../utils/util.cuh"
#include "../algorithm/shared_reduce.cuh"
#include "../store/store.cuh"
#include "../reduce.cuh"
#include <cstdlib>
#include <ctime>
#include <cstdio>
#include <cmath>
using namespace akg_reduce;
using namespace std;
template <typename T>
void CompareResults(T *arr1, T *arr2, int len) {
double total_err = 0.0;
bool flag = true;
for (auto i = 0; i < len; i++) {
if (std::abs(TypeTransform<double, T>(arr1[i]) - TypeTransform<double, T>(arr2[i])) > 1e-03) {
flag = false;
}
total_err += std::abs(TypeTransform<double, T>(arr1[i]) - TypeTransform<double, T>(arr2[i]));
}
if (flag) {
printf("[CORRECT] Output is equal to Expected.\n");
} else {
printf("[INCORRECT] Output is not equal to Expected\n");
}
printf("Ouput (show few results):\n");
for (auto i = 0; i < std::min(10, len); i++) {
printf("%f ", TypeTransform<double, T>(arr1[i]));
}
printf("\n");
printf("Expected:\n");
for (auto i = 0; i < std::min(10, len); i++) {
printf("%f ", TypeTransform<double, T>(arr2[i]));
}
printf("AVERAGE_ERROR = %f\n", total_err / (double)len);
printf("\n");
}
template <typename T>
__global__ void ComputeResultSingleThread1D(int x_len, T *arr, T *output) {
// 1D single thread computation: A Sum function using Kahan summation algorithm.
// more info in test_kahan.cc
T sum = 0.0;
T low_bits = 0.0;
T lower_val, cropped_sum;
for (auto i = 0; i < x_len; i++) {
lower_val = arr[i] - low_bits;
cropped_sum = sum + lower_val;
low_bits = (cropped_sum - sum) - lower_val;
sum = cropped_sum;
}
output[0] = sum;
}
template <typename T, typename ReduceOp>
__global__ void ComputeResultGPUSingleBlock1D(int x_len, T *arr, T *output, int item_per_thread, ReduceOp op) {
T temp_rf = 0.0;
__shared__ T red_buf[64];
__shared__ T temp_output[1];
temp_output[0] = (T)0.0;
for (int k = 0; k < item_per_thread; ++k) {
if ((int)threadIdx.x + k * blockDim.x < x_len) {
temp_rf += arr[(int)threadIdx.x + k * blockDim.x];
}
}
__syncthreads();
AkgReduce<T, ReduceOp, 64, ALL_REDUCE>(op, &temp_output[0], red_buf, temp_rf);
__syncthreads();
output[0] = temp_output[0];
}
template <typename T, typename ReduceOp>
__global__ void ComputeResultGPUMultiBlock1D(int x_len, T *arr, T *output, int item_per_thread, ReduceOp op) {
T temp_rf = 0.0;
__shared__ T red_buf[32];
__shared__ T temp_output[1]; // temp storage for output
temp_output[0] = (T)0.0;
for (int k = 0; k < item_per_thread; ++k) {
if (threadIdx.x + k * blockDim.x + blockIdx.x * blockDim.x * item_per_thread < x_len) {
temp_rf += arr[threadIdx.x + k * blockDim.x + blockIdx.x * blockDim.x * item_per_thread];
}
}
__syncthreads();
AkgReduce<T, ReduceOp, 32, ALL_REDUCE>(op, &temp_output[0], red_buf, temp_rf);
__syncthreads();
if (threadIdx.x == 0) {
AkgAtomicReturn<T, ReduceOp>(temp_output[0], &output[0], op);
}
}
template <typename T>
void TestReduce1D(int x_len, string type_name, bool single_block = true, bool verbose = false) {
printf("--- TEST CASE Reduce1D ---\n X = %d, TYPE = %s\n", x_len, type_name.c_str());
int input_bytes = x_len * sizeof(T);
int output_bytes = 1 * sizeof(T);
T *h_I, *d_I, *h_O, *d_O, *expected_h_O, *expected_d_O;
h_I = (T *)malloc(input_bytes);
h_O = (T *)malloc(output_bytes);
expected_h_O = (T *)malloc(output_bytes);
// random initialize
srand(time(0));
for (auto i = 0; i < x_len; i++) {
h_I[i] = TypeTransform<T, double>((rand() % 100) / 100.0);
}
if (verbose) {
printf("[VERBOSE] random Input data:\n");
for (auto i = 0; i < x_len; i++) {
printf("%f ", TypeTransform<double, T>(h_I[i]));
}
printf("\n");
}
h_O[0] = TypeTransform<T, double>(0.0);
expected_h_O[0] = TypeTransform<T, double>(0.0);
// host to device
GetGpuErr(cudaMalloc((void **)&d_I, input_bytes));
GetGpuErr(cudaMemcpy((void *)d_I, (void *)h_I, input_bytes, cudaMemcpyHostToDevice));
GetGpuErr(cudaMalloc((void **)&d_O, output_bytes));
GetGpuErr(cudaMemcpy((void *)d_O, (void *)h_O, output_bytes, cudaMemcpyHostToDevice));
GetGpuErr(cudaMalloc((void **)&expected_d_O, output_bytes));
GetGpuErr(cudaMemcpy((void *)expected_d_O, (void *)expected_h_O, output_bytes, cudaMemcpyHostToDevice));
// compute single thread results
ComputeResultSingleThread1D<T><<<1, 1>>>(x_len, d_I, expected_d_O);
GetGpuErr(cudaMemcpy((void *)expected_h_O, (void *)expected_d_O, output_bytes, cudaMemcpyDeviceToHost));
if (single_block) {
// compute GPU single-block results
dim3 gridSize(1);
dim3 blockSize(64);
int item_per_thread = (x_len - 1) / blockSize.x + 1;
ComputeResultGPUSingleBlock1D<T, akg_reduce::SumOp>
<<<gridSize, blockSize>>>(x_len, d_I, d_O, item_per_thread, akg_reduce::SumOp());
GetGpuErr(cudaMemcpy((void *)h_O, (void *)d_O, output_bytes, cudaMemcpyDeviceToHost));
} else {
// compute GPU multi-block results
dim3 gridSize1(2);
dim3 blockSize1(32);
int item_per_block = (x_len - 1) / gridSize1.x + 1;
int item_per_thread1 = (item_per_block - 1) / blockSize1.x + 1;
ComputeResultGPUMultiBlock1D<T, akg_reduce::SumOp>
<<<gridSize1, blockSize1>>>(x_len, d_I, d_O, item_per_thread1, akg_reduce::SumOp());
GetGpuErr(cudaMemcpy((void *)h_O, (void *)d_O, output_bytes, cudaMemcpyDeviceToHost));
}
// compare GPU with CPU
CompareResults<T>(h_O, expected_h_O, 1);
GetGpuErr(cudaFree(expected_d_O));
GetGpuErr(cudaFree(d_O));
GetGpuErr(cudaFree(d_I));
free(expected_h_O);
free(h_O);
free(h_I);
printf("--- CASE END ---\n\n");
}
int main() {
TestReduce1D<int>(128, "int", true);
TestReduce1D<half>(128, "half", true);
TestReduce1D<float>(128, "float", true);
TestReduce1D<double>(128, "double", true);
TestReduce1D<int>(128, "int", false);
TestReduce1D<half>(128, "half", false);
TestReduce1D<float>(128, "float", false);
TestReduce1D<double>(128, "double", false);
return 0;
}
|
reduce2D.cu | /**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../utils/util.cuh"
#include "../algorithm/shared_reduce.cuh"
#include "../reduce.cuh"
#include "../store/store.cuh"
#include <cstdlib>
#include <ctime>
#include <cstdio>
using namespace akg_reduce;
using namespace std;
template <typename T>
void CompareResults(T *arr1, T *arr2, int len) {
double total_err = 0.0;
bool flag = true;
for (auto i = 0; i < len; i++) {
if (std::abs(TypeTransform<double, T>(arr1[i]) - TypeTransform<double, T>(arr2[i])) > 1e-03) {
flag = false;
}
total_err += std::abs(TypeTransform<double, T>(arr1[i]) - TypeTransform<double, T>(arr2[i]));
}
if (flag) {
printf("[CORRECT] Output is equal to Expected.\n");
} else {
printf("[INCORRECT] Output is not equal to Expected\n");
printf("Ouput (show few results):\n");
for (auto i = 0; i < std::min(10, len); i++) {
printf("%f ", TypeTransform<double, T>(arr1[i]));
}
printf("\n");
printf("Expected:\n");
for (auto i = 0; i < std::min(10, len); i++) {
printf("%f ", TypeTransform<double, T>(arr2[i]));
}
printf("\n");
}
printf("AVERAGE_ERROR = %f\n", total_err / (double)len);
}
// Kahan summation for single thread Sum implement.
// More info in 'test_kahan.cc'
template <typename T>
__global__ void ComputeResultAlongXSingleThread(int x_len, int y_len, T *arr, T *output) {
for (auto j = 0; j < y_len; j++) {
T sum = 0.0;
T low_bits = 0.0;
T lower_val, cropped_sum;
for (auto i = 0; i < x_len; i++) {
lower_val = arr[i + j * x_len] - low_bits;
cropped_sum = sum + lower_val;
low_bits = (cropped_sum - sum) - lower_val;
sum = cropped_sum;
}
output[j] = sum;
}
}
template <typename T>
__global__ void ComputeResultAlongYSingleThread(int x_len, int y_len, T *arr, T *output) {
for (auto i = 0; i < x_len; i++) {
T sum = 0.0;
T low_bits = 0.0;
T lower_val, cropped_sum;
for (auto j = 0; j < y_len; j++) {
lower_val = arr[i + j * x_len] - low_bits;
cropped_sum = sum + lower_val;
low_bits = (cropped_sum - sum) - lower_val;
sum = cropped_sum;
}
output[i] = sum;
}
}
template <typename T, typename ReduceOp>
__global__ void ComputeResultAlongXGPUSingleBlock(int x_len, int y_len, T *arr, T *output, int item_per_thread,
ReduceOp op) {
T T_red_rf = 0.0;
__shared__ T red_buf[32];
__shared__ T temp_output[1]; // temp storage for output
temp_output[0] = (T) 0.0;
for (int k = 0; k < item_per_thread; ++k) {
if (threadIdx.x + k * blockDim.x < x_len && threadIdx.y + blockDim.y * blockIdx.x < y_len) {
T_red_rf += arr[threadIdx.x + k * blockDim.x + threadIdx.y * x_len + blockIdx.y * blockDim.y * x_len];
}
}
__syncthreads();
AkgReduce<T, ReduceOp, 32, REDUCE2D_X>(op, &temp_output[0], red_buf, T_red_rf);
__syncthreads();
if (threadIdx.x == 0) {
output[blockIdx.y * blockDim.y + threadIdx.y] = temp_output[0];
}
}
template <typename T, typename ReduceOp>
__global__ void ComputeResultAlongXGPUMultiBlock(int x_len, int y_len, T *arr, T *output, int item_per_thread,
ReduceOp op) {
T T_red_rf = 0.0;
__shared__ T red_buf[32];
__shared__ T temp_output[1]; // temp storage for output
temp_output[0] = (T) 0.0;
for (int k = 0; k < item_per_thread; ++k) {
if (threadIdx.x + k * blockDim.x + blockIdx.x * blockDim.x * item_per_thread < x_len &&
threadIdx.y + blockDim.y * blockIdx.y < y_len) {
T_red_rf += arr[threadIdx.x + k * blockDim.x + blockIdx.x * blockDim.x * item_per_thread + threadIdx.y * x_len +
blockIdx.y * blockDim.y * x_len];
}
}
__syncthreads();
AkgReduce<T, ReduceOp, 32, REDUCE2D_X>(op, &temp_output[0], red_buf, T_red_rf);
__syncthreads();
if (threadIdx.x == 0) {
AkgAtomicReturn<T, ReduceOp>(temp_output[0], &output[blockIdx.y * blockDim.y + threadIdx.y], op);
}
}
template <typename T, typename ReduceOp>
__global__ void ComputeResultAlongYGPUSingleBlock(int x_len, int y_len, T *arr, T *output, int item_per_thread,
ReduceOp op, int sharedmem_x) {
T T_red_rf = 0.0;
__shared__ T red_buf[32];
__shared__ T temp_output[1]; // temp storage for output, size is blockDimx.x
temp_output[0] = (T) 0.0;
for (int k = 0; k < item_per_thread; ++k) {
if (threadIdx.x + blockIdx.x * blockDim.x < x_len && threadIdx.y + blockDim.y * k < y_len) {
T_red_rf += arr[threadIdx.x + blockIdx.x * blockDim.x + threadIdx.y * x_len + k * blockDim.y * x_len];
}
}
__syncthreads();
AkgReduce<T, ReduceOp, 32, REDUCE2D_Y>(op, &temp_output[threadIdx.x], red_buf, T_red_rf, sharedmem_x);
__syncthreads();
if (threadIdx.y == 0) {
AkgAtomicReturn<T, ReduceOp>(temp_output[threadIdx.x], &output[blockIdx.x * blockDim.x + threadIdx.x], op);
}
}
template <typename T, typename ReduceOp>
__global__ void ComputeResultAlongYGPUMultiBlock(int x_len, int y_len, T *arr, T *output, int item_per_thread,
ReduceOp op, int sharedmem_x) {
T T_red_rf = 0.0;
__shared__ T red_buf[32];
__shared__ T temp_output[1]; // temp storage for output, size is blockDimx.x
temp_output[0] = (T) 0.0;
for (int k = 0; k < item_per_thread; ++k) {
if (threadIdx.x + blockIdx.x * blockDim.x < x_len &&
threadIdx.y + blockDim.y * k + blockIdx.y * blockDim.y * item_per_thread < y_len) {
T_red_rf += arr[threadIdx.x + blockIdx.x * blockDim.x + threadIdx.y * x_len + k * blockDim.y * x_len +
blockIdx.y * blockDim.y * item_per_thread * x_len];
}
}
__syncthreads();
AkgReduce<T, ReduceOp, 32, REDUCE2D_Y>(op, &temp_output[threadIdx.x], red_buf, T_red_rf, sharedmem_x);
__syncthreads();
if (threadIdx.y == 0) {
AkgAtomicReturn<T, ReduceOp>(temp_output[threadIdx.x], &output[blockIdx.x * blockDim.x + threadIdx.x], op);
}
}
template <typename T>
void TestReduce2DAlongX(int x_len, int y_len, string type_name, bool single_block = true, bool verbose = false) {
printf("--- TEST CASE Reduce2DAlongX ---\n X = %d, Y = %d, TYPE = %s\n", x_len, y_len, type_name.c_str());
int input_bytes = x_len * y_len * sizeof(T);
int output_bytes = y_len * sizeof(T);
T *h_I, *d_I, *h_O, *d_O, *expected_h_O, *expected_d_O;
h_I = (T *)malloc(input_bytes);
h_O = (T *)malloc(output_bytes);
expected_h_O = (T *)malloc(output_bytes);
// random initialize
srand(time(0));
for (auto i = 0; i < x_len * y_len; i++) {
h_I[i] = TypeTransform<T, double>((rand() % 10000000) / 10000000.0);
}
if (verbose) {
printf("[VERBOSE] random Input data:\n");
for (auto j = 0; j < y_len; j++) {
for (auto i = 0; i < x_len; i++) {
printf("%f ", TypeTransform<double, T>(h_I[i + j * x_len]));
}
printf("\n");
}
}
for (auto i = 0; i < y_len; i++) {
h_O[i] = TypeTransform<T, double>(0.0);
expected_h_O[i] = TypeTransform<T, double>(0.0);
}
// host to device
GetGpuErr(cudaMalloc((void **)&d_I, input_bytes));
GetGpuErr(cudaMemcpy((void *)d_I, (void *)h_I, input_bytes, cudaMemcpyHostToDevice));
GetGpuErr(cudaMalloc((void **)&d_O, output_bytes));
GetGpuErr(cudaMemcpy((void *)d_O, (void *)h_O, output_bytes, cudaMemcpyHostToDevice));
GetGpuErr(cudaMalloc((void **)&expected_d_O, output_bytes));
GetGpuErr(cudaMemcpy((void *)expected_d_O, (void *)expected_h_O, output_bytes, cudaMemcpyHostToDevice));
// compute single thread resutls
ComputeResultAlongXSingleThread<T><<<1, 1>>>(x_len, y_len, d_I, expected_d_O);
GetGpuErr(cudaMemcpy((void *)expected_h_O, (void *)expected_d_O, output_bytes, cudaMemcpyDeviceToHost));
if (single_block) {
// compute GPU resutls
dim3 gridSize(1, 8);
dim3 blockSize(32, 1);
int item_per_thread = (x_len - 1) / blockSize.x + 1;
ComputeResultAlongXGPUSingleBlock<T, akg_reduce::SumOp>
<<<gridSize, blockSize>>>(x_len, y_len, d_I, d_O, item_per_thread, akg_reduce::SumOp());
GetGpuErr(cudaMemcpy((void *)h_O, (void *)d_O, output_bytes, cudaMemcpyDeviceToHost));
// compare GPU with CPU
CompareResults<T>(h_O, expected_h_O, y_len);
} else {
dim3 gridSize(2, 8);
dim3 blockSize(32, 1);
int item_per_block = (x_len - 1) / gridSize.x + 1;
int item_per_thread = (item_per_block - 1) / blockSize.x + 1;
ComputeResultAlongXGPUMultiBlock<T, akg_reduce::SumOp>
<<<gridSize, blockSize>>>(x_len, y_len, d_I, d_O, item_per_thread, akg_reduce::SumOp());
GetGpuErr(cudaMemcpy((void *)h_O, (void *)d_O, output_bytes, cudaMemcpyDeviceToHost));
// compare GPU with CPU
CompareResults<T>(h_O, expected_h_O, y_len);
}
GetGpuErr(cudaFree(expected_d_O));
GetGpuErr(cudaFree(d_O));
GetGpuErr(cudaFree(d_I));
free(expected_h_O);
free(h_O);
free(h_I);
printf("--- CASE END ---\n\n");
}
template <typename T>
void TestReduce2DAlongY(int x_len, int y_len, string type_name, bool single_block = true, bool verbose = false) {
printf("--- TEST CASE Reduce2DAlongY ---\n X = %d, Y = %d, TYPE = %s\n", x_len, y_len, type_name.c_str());
int input_bytes = x_len * y_len * sizeof(T);
int output_bytes = x_len * sizeof(T);
T *h_I, *d_I, *h_O, *d_O, *expected_h_O, *expected_d_O;
h_I = (T *)malloc(input_bytes);
h_O = (T *)malloc(output_bytes);
expected_h_O = (T *)malloc(output_bytes);
// random initialize
srand(time(0));
for (auto i = 0; i < x_len * y_len; i++) {
h_I[i] = TypeTransform<T, double>((rand() % 10000000) / 10000000.0);
}
if (verbose) {
printf("[VERBOSE] random Input data:\n");
for (auto j = 0; j < y_len; j++) {
for (auto i = 0; i < x_len; i++) {
printf("%f ", TypeTransform<double, T>(h_I[i + j * x_len]));
}
printf("\n");
}
}
for (auto i = 0; i < x_len; i++) {
h_O[i] = TypeTransform<T, double>(0.0);
expected_h_O[i] = TypeTransform<T, double>(0.0);
}
// host to device
GetGpuErr(cudaMalloc((void **)&d_I, input_bytes));
GetGpuErr(cudaMemcpy((void *)d_I, (void *)h_I, input_bytes, cudaMemcpyHostToDevice));
GetGpuErr(cudaMalloc((void **)&d_O, output_bytes));
GetGpuErr(cudaMemcpy((void *)d_O, (void *)h_O, output_bytes, cudaMemcpyHostToDevice));
GetGpuErr(cudaMalloc((void **)&expected_d_O, output_bytes));
GetGpuErr(cudaMemcpy((void *)expected_d_O, (void *)expected_h_O, output_bytes, cudaMemcpyHostToDevice));
// compute single thread results
ComputeResultAlongYSingleThread<T><<<1, 1>>>(x_len, y_len, d_I, expected_d_O);
GetGpuErr(cudaMemcpy((void *)expected_h_O, (void *)expected_d_O, output_bytes, cudaMemcpyDeviceToHost));
if (single_block) {
// compute GPU results
dim3 gridSize(8, 1);
dim3 blockSize(1, 32);
int item_per_thread = (y_len - 1) / blockSize.y + 1;
int sharedmem_x = 1;
ComputeResultAlongYGPUSingleBlock<T, akg_reduce::SumOp>
<<<gridSize, blockSize>>>(x_len, y_len, d_I, d_O, item_per_thread, akg_reduce::SumOp(), sharedmem_x);
GetGpuErr(cudaMemcpy((void *)h_O, (void *)d_O, output_bytes, cudaMemcpyDeviceToHost));
} else {
// compute GPU results
dim3 gridSize(8, 2);
dim3 blockSize(1, 32);
int item_per_block = (y_len - 1) / gridSize.y + 1;
int item_per_thread = (item_per_block - 1) / blockSize.y + 1;
int sharedmem_x = 1;
ComputeResultAlongYGPUMultiBlock<T, akg_reduce::SumOp>
<<<gridSize, blockSize>>>(x_len, y_len, d_I, d_O, item_per_thread, akg_reduce::SumOp(), sharedmem_x);
GetGpuErr(cudaMemcpy((void *)h_O, (void *)d_O, output_bytes, cudaMemcpyDeviceToHost));
}
// compare GPU with CPU
CompareResults<T>(h_O, expected_h_O, x_len);
GetGpuErr(cudaFree(expected_d_O));
GetGpuErr(cudaFree(d_O));
GetGpuErr(cudaFree(d_I));
free(expected_h_O);
free(h_O);
free(h_I);
printf("--- CASE END ---\n\n");
}
int main() {
TestReduce2DAlongX<int>(128, 8, "int", true);
TestReduce2DAlongX<half>(128, 8, "half", true);
TestReduce2DAlongX<float>(128, 8, "float", true);
TestReduce2DAlongX<double>(128, 8, "double", true);
TestReduce2DAlongX<int>(128, 8, "int", false);
TestReduce2DAlongX<float>(128, 8, "float", false);
TestReduce2DAlongX<double>(128, 8, "double", false);
TestReduce2DAlongY<int>(8, 128, "int", true);
TestReduce2DAlongY<half>(8, 128, "half", true);
TestReduce2DAlongY<float>(8, 128, "float", true);
TestReduce2DAlongY<double>(8, 128, "double", true);
TestReduce2DAlongY<int>(8, 128, "int", false);
TestReduce2DAlongY<half>(8, 128, "half", false);
TestReduce2DAlongY<float>(8, 128, "float", false);
TestReduce2DAlongY<double>(8, 128, "double", false);
return 0;
}
|
atomic.cu | /**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../utils/util.cuh"
#include "../operators/reduce_operators.cuh"
#include <cstdio>
using namespace akg_reduce;
using namespace std;
// check whether the op-atomic transformation is correct.
// compile code: nvcc test_atomic.cu -arch=sm_70
template <typename T>
__global__ void AtomicTestSum(T *dest, T val) {
SumOp<T> op;
AtomicOp<T, op.identifier> atomic_op;
atomic_op.Compute(&dest[threadIdx.x], val);
}
template <typename T>
__global__ void AtomicTestMax(T *dest, T val) {
MaxOp<T> op;
AtomicOp<T, op.identifier> atomic_op;
atomic_op.Compute(&dest[threadIdx.x], val);
}
template <typename T>
__global__ void AtomicTestMin(T *dest, T val) {
MinOp<T> op;
AtomicOp<T, op.identifier> atomic_op;
atomic_op.Compute(&dest[threadIdx.x], val);
}
template <typename T>
void TestAtomicSum() {
cout << "TestAtomicSum" << endl;
int items = 1000;
int bytes = items * sizeof(T);
T *h_a, *d_a;
h_a = (T *)malloc(bytes);
for (auto i = 0; i < items; i++) {
if (sizeof(T) == 2) {
h_a[i] = __float2half(0.0);
} else {
h_a[i] = 0.0;
}
}
GetGpuErr(cudaMalloc((void **)&d_a, bytes));
GetGpuErr(cudaMemcpy((void *)d_a, (void *)h_a, bytes, cudaMemcpyHostToDevice));
dim3 grid(1000);
dim3 block(1000);
AtomicTestSum<T><<<grid, block>>>(d_a, 1.0);
GetGpuErr(cudaPeekAtLastError());
GetGpuErr(cudaMemcpy((void *)h_a, (void *)d_a, bytes, cudaMemcpyDeviceToHost));
for (auto i = 0; i < 10; i++) {
double tmp;
if (sizeof(T) == 2) {
tmp = __half2float(h_a[i]);
} else {
tmp = h_a[i];
}
printf("%f ", tmp);
}
printf("\n");
GetGpuErr(cudaFree(d_a));
free(h_a);
}
template <typename T>
void TestAtomicMax() {
cout << "TestAtomicMax" << endl;
int items = 10;
int bytes = items * sizeof(T);
T *h_a, *d_a;
h_a = (T *)malloc(bytes);
for (auto i = 0; i < items; i++) {
if (sizeof(T) == 2) {
h_a[i] = __float2half(i);
} else {
h_a[i] = i;
}
}
GetGpuErr(cudaMalloc((void **)&d_a, bytes));
GetGpuErr(cudaMemcpy((void *)d_a, (void *)h_a, bytes, cudaMemcpyHostToDevice));
double val = 1.234567891012345;
dim3 grid(10000);
dim3 block(items);
AtomicTestMax<T><<<grid, block>>>(d_a, val);
GetGpuErr(cudaPeekAtLastError());
GetGpuErr(cudaMemcpy((void *)h_a, (void *)d_a, bytes, cudaMemcpyDeviceToHost));
for (auto i = 0; i < 5; i++) {
double tmp;
if (sizeof(T) == 2) {
tmp = __half2float(h_a[i]);
} else {
tmp = h_a[i];
}
printf("%.12f ", tmp);
}
printf("\n");
GetGpuErr(cudaFree(d_a));
free(h_a);
}
template <typename T>
void TestAtomicMin() {
cout << "TestAtomicMin" << endl;
int items = 10;
int bytes = items * sizeof(T);
T *h_a, *d_a;
h_a = (T *)malloc(bytes);
for (auto i = 0; i < items; i++) {
h_a[i] = __float2half(i);
}
GetGpuErr(cudaMalloc((void **)&d_a, bytes));
GetGpuErr(cudaMemcpy((void *)d_a, (void *)h_a, bytes, cudaMemcpyHostToDevice));
double val = 1.234567891012345;
dim3 grid(10000);
dim3 block(items);
AtomicTestMin<T><<<grid, block>>>(d_a, val);
GetGpuErr(cudaPeekAtLastError());
GetGpuErr(cudaMemcpy((void *)h_a, (void *)d_a, bytes, cudaMemcpyDeviceToHost));
for (auto i = 0; i < 5; i++) {
double tmp;
if (sizeof(T) == 2) {
tmp = __half2float(h_a[i]);
} else {
tmp = h_a[i];
}
printf("%.12f ", tmp);
}
printf("\n");
GetGpuErr(cudaFree(d_a));
free(h_a);
}
int main() {
TestAtomicSum<float>();
TestAtomicSum<double>();
TestAtomicSum<half>();
TestAtomicMax<float>();
TestAtomicMax<double>();
TestAtomicMax<half>();
TestAtomicMin<float>();
TestAtomicMin<double>();
TestAtomicMin<half>();
return 0;
}
|
dectect.cu | /**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../utils/util.cuh"
using namespace akg_reduce;
using namespace std;
int main() {
int items = 100;
int bytes = items * sizeof(float);
float *h_I, *d_I;
h_I = (float *)malloc(bytes);
GetGpuErr(cudaMalloc((void **)&d_I, bytes));
// check if GetGpuErr can detect and return properly.
GetGpuErr(cudaMemcpy((void *)d_I, (void *)h_I, bytes + 99, cudaMemcpyHostToDevice));
GetGpuErr(cudaFree(d_I));
free(h_I);
return 0;
}
|
reduce1D_logical.cu | /**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../utils/util.cuh"
#include "../algorithm/shared_reduce.cuh"
#include "../store/store.cuh"
#include "../reduce.cuh"
#include <cstdlib>
#include <ctime>
#include <cstdio>
#include <cmath>
using namespace akg_reduce;
using namespace std;
template <typename T>
void CompareResults(T *arr1, T *arr2, int len) {
double total_err = 0.0;
bool flag = true;
for (auto i = 0; i < len; i++) {
if (std::abs(TypeTransform<double, T>(arr1[i]) - TypeTransform<double, T>(arr2[i])) > 1e-03) {
flag = false;
}
total_err += std::abs(TypeTransform<double, T>(arr1[i]) - TypeTransform<double, T>(arr2[i]));
}
if (flag) {
printf("[CORRECT] Output is equal to Expected.\n");
} else {
printf("[INCORRECT] Output is not equal to Expected\n");
}
printf("Ouput (show few results):\n");
for (auto i = 0; i < std::min(10, len); i++) {
printf("%f ", TypeTransform<double, T>(arr1[i]));
}
printf("\n");
printf("Expected:\n");
for (auto i = 0; i < std::min(10, len); i++) {
printf("%f ", TypeTransform<double, T>(arr2[i]));
}
printf("AVERAGE_ERROR = %f\n", total_err / (double)len);
printf("\n");
}
template <typename T, typename ReduceOp>
__global__ void ComputeResultSingleThread1D(int x_len, T *arr, T *output, ReduceOp op) {
bool result;
if (ReduceOp::identifier == 3) {
result = true; // and
} else {
result = false; // or
}
for (auto i = 0; i < x_len; i++) {
result = op(result, arr[i]);
}
output[0] = result;
}
template <typename T, typename ReduceOp>
__global__ void ComputeResultGPUSingleBlock1D(int x_len, T *arr, T *output, int item_per_thread, ReduceOp op) {
T temp_rf;
if (ReduceOp::identifier == 3) {
temp_rf = true; // and
} else {
temp_rf = false; // or
}
__shared__ T red_buf[64];
__shared__ T temp_output[1];
temp_output[0] = temp_rf;
for (int k = 0; k < item_per_thread; ++k) {
if ((int)threadIdx.x + k * blockDim.x < x_len) {
temp_rf = op(temp_rf, arr[(int)threadIdx.x + k * blockDim.x]);
}
}
__syncthreads();
AkgReduce<T, ReduceOp, 64, ALL_REDUCE>(op, &temp_output[0], red_buf, temp_rf);
__syncthreads();
output[0] = temp_output[0];
}
template <typename T, typename ReduceOp>
void TestReduce1D(int x_len, string type_name, ReduceOp op, bool single_block = true, bool verbose = false) {
printf("--- TEST CASE Reduce1D ---\n X = %d, TYPE = %s\n", x_len, type_name.c_str());
int input_bytes = x_len * sizeof(T);
int output_bytes = 1 * sizeof(T);
T *h_I, *d_I, *h_O, *d_O, *expected_h_O, *expected_d_O;
h_I = (T *)malloc(input_bytes);
h_O = (T *)malloc(output_bytes);
expected_h_O = (T *)malloc(output_bytes);
// random initialize
srand(time(0));
for (auto i = 0; i < x_len; i++) {
h_I[i] = (rand()%2);
}
if (verbose) {
printf("[VERBOSE] random Input data:\n");
for (auto i = 0; i < x_len; i++) {
printf(h_I[i] ? "true " : "false ");
}
printf("\n");
}
h_O[0] = TypeTransform<T, double>(0.0);
expected_h_O[0] = TypeTransform<T, double>(0.0);
// host to device
GetGpuErr(cudaMalloc((void **)&d_I, input_bytes));
GetGpuErr(cudaMemcpy((void *)d_I, (void *)h_I, input_bytes, cudaMemcpyHostToDevice));
GetGpuErr(cudaMalloc((void **)&d_O, output_bytes));
GetGpuErr(cudaMemcpy((void *)d_O, (void *)h_O, output_bytes, cudaMemcpyHostToDevice));
GetGpuErr(cudaMalloc((void **)&expected_d_O, output_bytes));
GetGpuErr(cudaMemcpy((void *)expected_d_O, (void *)expected_h_O, output_bytes, cudaMemcpyHostToDevice));
// compute single thread results
ComputeResultSingleThread1D<T, ReduceOp><<<1, 1>>>(x_len, d_I, expected_d_O, op);
GetGpuErr(cudaMemcpy((void *)expected_h_O, (void *)expected_d_O, output_bytes, cudaMemcpyDeviceToHost));
// compute GPU single-block results, only support single block in reduce-axis
dim3 gridSize(1);
dim3 blockSize(64);
int item_per_thread = (x_len - 1) / blockSize.x + 1;
ComputeResultGPUSingleBlock1D<T, ReduceOp><<<gridSize, blockSize>>>(x_len, d_I, d_O, item_per_thread, op);
GetGpuErr(cudaMemcpy((void *)h_O, (void *)d_O, output_bytes, cudaMemcpyDeviceToHost));
// compare GPU with CPU
CompareResults<T>(h_O, expected_h_O, 1);
GetGpuErr(cudaFree(expected_d_O));
GetGpuErr(cudaFree(d_O));
GetGpuErr(cudaFree(d_I));
free(expected_h_O);
free(h_O);
free(h_I);
printf("--- CASE END ---\n\n");
}
int main() {
TestReduce1D<bool, akg_reduce::AndOp>(128, "and", akg_reduce::AndOp(), true);
TestReduce1D<bool, akg_reduce::OrOp>(128, "or", akg_reduce::OrOp(), true);
return 0;
}
|
reduce2D_multi_acc.cu | /**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../utils/util.cuh"
#include "../algorithm/shared_reduce.cuh"
#include "../reduce.cuh"
#include "../store/store.cuh"
#include <cstdlib>
#include <ctime>
#include <cstdio>
using namespace akg_reduce;
using namespace std;
// file to test multi-aggerated values reduce in single thread.
// including single-block reduce/multi-block reduce by x/y directions.
template <typename T>
void CompareResults(T *arr1, T *arr2, int len) {
double total_err = 0.0;
bool flag = true;
for (auto i = 0; i < len; i++) {
if (std::abs(TypeTransform<double, T>(arr1[i]) - TypeTransform<double, T>(arr2[i])) > 1e-03) {
flag = false;
}
total_err += std::abs(TypeTransform<double, T>(arr1[i]) - TypeTransform<double, T>(arr2[i]));
}
if (flag) {
printf("[CORRECT] Output is equal to Expected.\n");
} else {
printf("[INCORRECT] Output is not equal to Expected\n");
printf("Ouput (show few results):\n");
for (auto i = 0; i < std::min(10, len); i++) {
printf("%f ", TypeTransform<double, T>(arr1[i]));
}
printf("\n");
printf("Expected:\n");
for (auto i = 0; i < std::min(10, len); i++) {
printf("%f ", TypeTransform<double, T>(arr2[i]));
}
printf("\n");
}
printf("AVERAGE_ERROR = %f\n", total_err / (double)len);
}
// Kahan summation for single thread Sum implement.
// More info in 'test_kahan.cc'
template <typename T>
__global__ void ComputeResultAlongXSingleThread(int x_len, int y_len, T *arr, T *output) {
for (auto j = 0; j < y_len; j++) {
T sum = 0.0;
T low_bits = 0.0;
T lower_val, cropped_sum;
for (auto i = 0; i < x_len; i++) {
lower_val = arr[i + j * x_len] - low_bits;
cropped_sum = sum + lower_val;
low_bits = (cropped_sum - sum) - lower_val;
sum = cropped_sum;
}
output[j] = sum;
}
}
template <typename T>
__global__ void ComputeResultAlongYSingleThread(int x_len, int y_len, T *arr, T *output) {
for (auto i = 0; i < x_len; i++) {
T sum = 0.0;
T low_bits = 0.0;
T lower_val, cropped_sum;
for (auto j = 0; j < y_len; j++) {
lower_val = arr[i + j * x_len] - low_bits;
cropped_sum = sum + lower_val;
low_bits = (cropped_sum - sum) - lower_val;
sum = cropped_sum;
}
output[i] = sum;
}
}
template <typename T, typename ReduceOp>
__global__ void ComputeResultAlongXGPUMultiBlock(int x_len, int y_len, T *arr, T *output, int item_per_thread_x,
int item_per_thread_y, ReduceOp op) {
T T_red_rf[4]; // must be explict 16384 = 4096 * 4 * 1
__shared__ T red_buf[4][1024];
__shared__ T temp_output[4]; // temp storage for output
for (int i = 0; i < 4; ++i) {
temp_output[i] = (T)0.0;
}
for (int i = 0; i < item_per_thread_y; ++i) {
T_red_rf[i] = 0.0;
for (int k = 0; k < item_per_thread_x; ++k) {
if (threadIdx.x + k * blockDim.x + blockIdx.x * blockDim.x * item_per_thread_x < x_len &&
threadIdx.y + i * blockDim.y + blockIdx.y * blockDim.y * item_per_thread_y < y_len) {
T_red_rf[i] += arr[threadIdx.x + k * blockDim.x + blockIdx.x * blockDim.x * item_per_thread_x +
(threadIdx.y + i * blockDim.y + blockIdx.y * blockDim.y * item_per_thread_y) * x_len];
}
}
}
__syncthreads();
for (int i = 0; i < item_per_thread_y; ++i) {
AkgReduce<T, ReduceOp, 1024, REDUCE2D_X>(op, &temp_output[i * blockDim.y + 0], &red_buf[i][0], T_red_rf[i]);
}
__syncthreads();
if (threadIdx.x == 0) {
for (int i = 0; i < item_per_thread_y; ++i) {
AkgAtomicReturn<T, ReduceOp>(
temp_output[i], &output[blockIdx.y * blockDim.y * item_per_thread_y + i * blockDim.y + threadIdx.y], op);
}
}
}
template <typename T, typename ReduceOp>
__global__ void ComputeResultAlongYGPUMultiBlock(int x_len, int y_len, T *arr, T *output, int item_per_thread_x,
int item_per_thread_y, ReduceOp op, int sharedmem_x) {
T T_red_rf[4];
__shared__ T red_buf[4 * 1024];
__shared__ T temp_output[32 * 4];
for (int i = 0; i < 32 * 4; ++i) {
temp_output[i] = (T)0.0;
}
for (int i = 0; i < item_per_thread_x; ++i) { // x is non-reduce-axis
T_red_rf[i] = 0.0;
for (int k = 0; k < item_per_thread_y; ++k) { // here y is reduce-axis
if (threadIdx.x + blockDim.x * i + blockIdx.x * blockDim.x * item_per_thread_x < x_len &&
threadIdx.y + blockDim.y * k + blockIdx.y * blockDim.y * item_per_thread_y < y_len) {
T_red_rf[i] += arr[threadIdx.x + blockDim.x * i + blockIdx.x * blockDim.x * item_per_thread_x +
(threadIdx.y + blockDim.y * k + blockIdx.y * blockDim.y * item_per_thread_y) * y_len];
}
}
}
__syncthreads();
for (int i = 0; i < item_per_thread_x; ++i) {
AkgReduce<T, ReduceOp, 32, REDUCE2D_Y>(op, &temp_output[i * blockDim.x + threadIdx.x], &red_buf[i * 1024],
T_red_rf[i], sharedmem_x);
}
__syncthreads();
if (threadIdx.y == 0) {
for (int i = 0; i < item_per_thread_x; ++i) {
AkgAtomicReturn<T, ReduceOp>(temp_output[i * blockDim.x + threadIdx.x],
&output[blockIdx.x * blockDim.x * item_per_thread_x + blockDim.x * i + threadIdx.x],
op);
}
}
}
template <typename T>
void TestReduce2DAlongX(int x_len, int y_len, string type_name, bool single_block = true, bool verbose = false) {
printf("--- TEST CASE Reduce2DAlongX ---\n X = %d, Y = %d, TYPE = %s\n", x_len, y_len, type_name.c_str());
int input_bytes = x_len * y_len * sizeof(T);
int output_bytes = y_len * sizeof(T);
T *h_I, *d_I, *h_O, *d_O, *expected_h_O, *expected_d_O;
h_I = (T *)malloc(input_bytes);
h_O = (T *)malloc(output_bytes);
expected_h_O = (T *)malloc(output_bytes);
// random initialize
srand(time(0));
for (auto i = 0; i < x_len * y_len; i++) {
h_I[i] = TypeTransform<T, double>((rand() % 10000000) / 10000000.0);
}
if (verbose) {
printf("[VERBOSE] random Input data:\n");
for (auto j = 0; j < y_len; j++) {
for (auto i = 0; i < x_len; i++) {
printf("%f ", TypeTransform<double, T>(h_I[i + j * x_len]));
}
printf("\n");
}
}
for (auto i = 0; i < y_len; i++) {
h_O[i] = TypeTransform<T, double>(0.0);
expected_h_O[i] = TypeTransform<T, double>(0.0);
}
// host to device
GetGpuErr(cudaMalloc((void **)&d_I, input_bytes));
GetGpuErr(cudaMemcpy((void *)d_I, (void *)h_I, input_bytes, cudaMemcpyHostToDevice));
GetGpuErr(cudaMalloc((void **)&d_O, output_bytes));
GetGpuErr(cudaMemcpy((void *)d_O, (void *)h_O, output_bytes, cudaMemcpyHostToDevice));
GetGpuErr(cudaMalloc((void **)&expected_d_O, output_bytes));
GetGpuErr(cudaMemcpy((void *)expected_d_O, (void *)expected_h_O, output_bytes, cudaMemcpyHostToDevice));
// compute single thread resutls
ComputeResultAlongXSingleThread<T><<<1, 1>>>(x_len, y_len, d_I, expected_d_O);
GetGpuErr(cudaMemcpy((void *)expected_h_O, (void *)expected_d_O, output_bytes, cudaMemcpyDeviceToHost));
dim3 gridSize(8, 4096);
dim3 blockSize(1024, 1);
int item_per_block_x = (x_len - 1) / gridSize.x + 1;
int item_per_thread_x = (item_per_block_x - 1) / blockSize.x + 1;
int item_per_block_y = (y_len - 1) / gridSize.y + 1;
int item_per_thread_y = (item_per_block_y - 1) / blockSize.y + 1;
ComputeResultAlongXGPUMultiBlock<T, akg_reduce::SumOp>
<<<gridSize, blockSize>>>(x_len, y_len, d_I, d_O, item_per_thread_x, item_per_thread_y, akg_reduce::SumOp());
GetGpuErr(cudaMemcpy((void *)h_O, (void *)d_O, output_bytes, cudaMemcpyDeviceToHost));
// compare GPU with CPU
CompareResults<T>(h_O, expected_h_O, y_len);
GetGpuErr(cudaFree(expected_d_O));
GetGpuErr(cudaFree(d_O));
GetGpuErr(cudaFree(d_I));
free(expected_h_O);
free(h_O);
free(h_I);
printf("--- CASE END ---\n\n");
}
template <typename T>
void TestReduce2DAlongY(int x_len, int y_len, string type_name, bool single_block = true, bool verbose = false) {
printf("--- TEST CASE Reduce2DAlongY ---\n X = %d, Y = %d, TYPE = %s\n", x_len, y_len, type_name.c_str());
int input_bytes = x_len * y_len * sizeof(T);
int output_bytes = x_len * sizeof(T);
T *h_I, *d_I, *h_O, *d_O, *expected_h_O, *expected_d_O;
h_I = (T *)malloc(input_bytes);
h_O = (T *)malloc(output_bytes);
expected_h_O = (T *)malloc(output_bytes);
// random initialize
srand(time(0));
for (auto i = 0; i < x_len * y_len; i++) {
h_I[i] = TypeTransform<T, double>((rand() % 10000000) / 10000000.0);
}
if (verbose) {
printf("[VERBOSE] random Input data:\n");
for (auto j = 0; j < y_len; j++) {
for (auto i = 0; i < x_len; i++) {
printf("%f ", TypeTransform<double, T>(h_I[i + j * x_len]));
}
printf("\n");
}
}
for (auto i = 0; i < x_len; i++) {
h_O[i] = TypeTransform<T, double>(0.0);
expected_h_O[i] = TypeTransform<T, double>(0.0);
}
// host to device
GetGpuErr(cudaMalloc((void **)&d_I, input_bytes));
GetGpuErr(cudaMemcpy((void *)d_I, (void *)h_I, input_bytes, cudaMemcpyHostToDevice));
GetGpuErr(cudaMalloc((void **)&d_O, output_bytes));
GetGpuErr(cudaMemcpy((void *)d_O, (void *)h_O, output_bytes, cudaMemcpyHostToDevice));
GetGpuErr(cudaMalloc((void **)&expected_d_O, output_bytes));
GetGpuErr(cudaMemcpy((void *)expected_d_O, (void *)expected_h_O, output_bytes, cudaMemcpyHostToDevice));
// compute single thread results
ComputeResultAlongYSingleThread<T><<<1, 1>>>(x_len, y_len, d_I, expected_d_O);
GetGpuErr(cudaMemcpy((void *)expected_h_O, (void *)expected_d_O, output_bytes, cudaMemcpyDeviceToHost));
dim3 gridSize(128, 128);
dim3 blockSize(32, 32);
int item_per_block_x = (x_len - 1) / gridSize.x + 1;
int item_per_thread_x = (item_per_block_x - 1) / blockSize.x + 1;
int item_per_block_y = (y_len - 1) / gridSize.y + 1;
int item_per_thread_y = (item_per_block_y - 1) / blockSize.y + 1;
int sharedmem_x = 32;
ComputeResultAlongYGPUMultiBlock<T, akg_reduce::SumOp><<<gridSize, blockSize>>>(
x_len, y_len, d_I, d_O, item_per_thread_x, item_per_thread_y, akg_reduce::SumOp(), sharedmem_x);
GetGpuErr(cudaMemcpy((void *)h_O, (void *)d_O, output_bytes, cudaMemcpyDeviceToHost));
// compare GPU with CPU
CompareResults<T>(h_O, expected_h_O, x_len);
GetGpuErr(cudaFree(expected_d_O));
GetGpuErr(cudaFree(d_O));
GetGpuErr(cudaFree(d_I));
free(expected_h_O);
free(h_O);
free(h_I);
printf("--- CASE END ---\n\n");
}
int main() {
// TestReduce2DAlongX<int>(128, 8, "int", true);
// TestReduce2DAlongX<half>(128, 8, "half", true);
// TestReduce2DAlongX<float>(128, 8, "float", true);
// TestReduce2DAlongX<double>(128, 8, "double", true);
// TestReduce2DAlongX<int>(128, 8, "int", false);
TestReduce2DAlongX<float>(16384, 16384, "float", false);
// TestReduce2DAlongX<double>(128, 8, "double", false);
// TestReduce2DAlongY<int>(8, 128, "int", true);
// TestReduce2DAlongY<half>(8, 128, "half", true);
// TestReduce2DAlongY<float>(8, 128, "float", true);
// TestReduce2DAlongY<double>(8, 128, "double", true);
// TestReduce2DAlongY<int>(8, 128, "int", false);
// TestReduce2DAlongY<half>(8, 128, "half", false);
TestReduce2DAlongY<float>(16384, 16384, "float", false);
// TestReduce2DAlongY<double>(8, 128, "double", false);
return 0;
}
|
kahan.cu | /**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <iostream>
#include <cstdlib>
#include <ctime>
#include "../reduce.cuh"
using namespace std;
using namespace akg_reduce;
// This implement shows the difference between
// 'Kahan summation algorithm' and 'Direct summation'
// See more in https://en.wikipedia.org/wiki/Kahan_summation_algorithm
template <typename T>
T ComputeKahanCPU(T *arr, int len) {
T sum = 0.0;
T low_bits = 0.0;
T lower_val, cropped_sum;
for (auto i = 0; i < len; i++) {
lower_val = arr[i] - low_bits;
cropped_sum = sum + lower_val;
low_bits = (cropped_sum - sum) - lower_val;
sum = cropped_sum;
}
return sum;
}
template <typename T>
T ComputeDirectCPU(T *arr, int len) {
T sum = 0.0;
for (auto i = 0; i < len; i++) {
sum += arr[i];
}
return sum;
}
template<typename T>
__global__ void ComputeKahanAdd(int len,T *arr, T* output){
T sum = 0.0;
T low_bits = 0.0;
T lower_val, cropped_sum;
for (auto i = 0; i < len; i ++){
AkgKahanAdd(sum, arr[i], low_bits, lower_val, cropped_sum);
}
output[0] = sum;
}
template <typename T>
T TestKahanGPU(T *arr, int len) {
int input_bytes = len * sizeof(T);
int output_bytes = 1 * sizeof(T);
T *d_I, *h_O, *d_O;
h_O = (T *)malloc(output_bytes);
GetGpuErr(cudaMalloc((void **)&d_I, input_bytes));
GetGpuErr(cudaMemcpy((void *)d_I, (void *)arr, input_bytes, cudaMemcpyHostToDevice));
GetGpuErr(cudaMalloc((void **)&d_O, output_bytes));
ComputeKahanAdd<T><<<1, 1>>>(len, d_I, d_O);
GetGpuErr(cudaMemcpy((void *)h_O, (void *)d_O, output_bytes, cudaMemcpyDeviceToHost));
T ans = h_O[0];
GetGpuErr(cudaFree(d_O));
GetGpuErr(cudaFree(d_I));
free(h_O);
return ans;
}
int main() {
srand(time(0));
float arr[1000000];
for (auto i = 0; i < 1000000; i++) {
arr[i] = (float)(rand() % 1000000) / 1000000.0;
}
printf("Kahan result: %f\n", ComputeKahanCPU<float>(arr, 1000000));
printf("Direct result: %f\n", ComputeDirectCPU<float>(arr, 1000000));
printf("Kahan result in GPU: %f\n", TestKahanGPU<float>(arr, 1000000));
return 0;
} |
select.cu | /**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "./utils/util.cuh"
#include <typeinfo>
using namespace akg_reduce;
using namespace std;
int main() {
typedef typename Select<false, double, float>::Type atype;
atype a = 1.0;
cout << typeid(a).name() << endl;
typedef typename Select<(1 > 0), double, float>::Type btype;
btype b = 1.0;
cout << typeid(b).name() << endl;
return 0;
}
|
sgemm_v3.cu | // optimize sgemm
#include <stdio.h>
#include <stdlib.h>
#include "assert.h"
// CUDA runtime
#include <cuda_runtime.h>
#include <cublas_v2.h>
// cal offset from row col and ld , in row-major matrix, ld is the width of the matrix
#define OFFSET(row, col, ld) ((row) * (ld) + (col))
// transfer float4
#define FETCH_FLOAT4(pointer) (reinterpret_cast<float4*>(&(pointer))[0])
#define checkCudaErrors(func) \
{ \
cudaError_t e = (func); \
if(e != cudaSuccess) \
printf ("%s %d CUDA: %s\n", __FILE__, __LINE__, cudaGetErrorString(e)); \
}
// K: ldA
// N: ldB
template <
const int BLOCK_SIZE_M, // height of block of C that each thread block calculate
const int BLOCK_SIZE_K, // width of block of A that each thread block load into shared memory
const int BLOCK_SIZE_N, // width of block of C that each thread block calculate
const int THREAD_SIZE_Y, // height of block of C that each thread calculate
const int THREAD_SIZE_X, // width of block of C that each thread calculate
const bool ENABLE_DOUBLE_BUFFER // whether enable double buffering or not
>
__global__ void Sgemm(
float * __restrict__ A,
float * __restrict__ B,
float * __restrict__ C,
const int M,
const int N,
const int K) {
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
// the threads number in Block of X,Y
const int THREAD_X_PER_BLOCK = BLOCK_SIZE_N / THREAD_SIZE_X;
const int THREAD_Y_PER_BLOCK = BLOCK_SIZE_M / THREAD_SIZE_Y;
const int THREAD_NUM_PER_BLOCK = THREAD_X_PER_BLOCK * THREAD_Y_PER_BLOCK;
// thread id in cur Block
const int tid = ty * THREAD_X_PER_BLOCK + tx;
// shared memory
__shared__ float As[2][BLOCK_SIZE_K][BLOCK_SIZE_M];
__shared__ float Bs[2][BLOCK_SIZE_K][BLOCK_SIZE_N];
// registers for C
float accum[THREAD_SIZE_Y][THREAD_SIZE_X];
#pragma unroll
for(int i=0; i<THREAD_SIZE_Y; i++){
#pragma unroll
for(int j=0; j<THREAD_SIZE_X; j++){
accum[i][j]=0.0;
}
}
// registers for A and B
float frag_a[2][THREAD_SIZE_Y];
float frag_b[2][THREAD_SIZE_X];
// registers load global memory
const int ldg_num_a = BLOCK_SIZE_M * BLOCK_SIZE_K / (THREAD_NUM_PER_BLOCK * 4);
const int ldg_num_b = BLOCK_SIZE_K * BLOCK_SIZE_N / (THREAD_NUM_PER_BLOCK * 4);
float ldg_a_reg[4*ldg_num_a];
float ldg_b_reg[4*ldg_num_b];
// threads number in one row
const int A_TILE_THREAD_PER_ROW = BLOCK_SIZE_K / 4;
const int B_TILE_THREAD_PER_ROW = BLOCK_SIZE_N / 4;
// row number and col number that needs to be loaded by this thread
const int A_TILE_ROW_START = tid / A_TILE_THREAD_PER_ROW;
const int B_TILE_ROW_START = tid / B_TILE_THREAD_PER_ROW;
const int A_TILE_COL = tid % A_TILE_THREAD_PER_ROW * 4;
const int B_TILE_COL = tid % B_TILE_THREAD_PER_ROW * 4;
// row stride that thread uses to load multiple rows of a tile
const int A_TILE_ROW_STRIDE = THREAD_NUM_PER_BLOCK / A_TILE_THREAD_PER_ROW;
const int B_TILE_ROW_STRIDE = THREAD_NUM_PER_BLOCK / B_TILE_THREAD_PER_ROW;
A = &A[(BLOCK_SIZE_M * by)* K];
B = &B[BLOCK_SIZE_N * bx];
//load index of the tile
const int warp_id = tid / 32;
const int lane_id = tid % 32;
const int a_tile_index = warp_id/2*16 + lane_id/8*4; //warp_id * 8 + (lane_id / 16)*4; // (warp_id/4)*32 + ((lane_id%16)/2)*4;
const int b_tile_index = warp_id%2*32 + lane_id%8*4; //(lane_id % 16) * 4; // (warp_id%4)*16 + (lane_id/16)*8 + (lane_id%2)*4;
//transfer first tile from global mem to shared mem
// load A from global memory to shared memory
#pragma unroll
for ( int i = 0 ; i < BLOCK_SIZE_M ; i += A_TILE_ROW_STRIDE) {
int ldg_index = i / A_TILE_ROW_STRIDE * 4;
FETCH_FLOAT4(ldg_a_reg[ldg_index]) = FETCH_FLOAT4(A[OFFSET(
A_TILE_ROW_START + i, // row
A_TILE_COL, // col
K )]);
As[0][A_TILE_COL][A_TILE_ROW_START + i]=ldg_a_reg[ldg_index];
As[0][A_TILE_COL+1][A_TILE_ROW_START + i]=ldg_a_reg[ldg_index+1];
As[0][A_TILE_COL+2][A_TILE_ROW_START + i]=ldg_a_reg[ldg_index+2];
As[0][A_TILE_COL+3][A_TILE_ROW_START + i]=ldg_a_reg[ldg_index+3];
}
// load B from global memory to shared memory
#pragma unroll
for ( int i = 0 ; i < BLOCK_SIZE_K; i += B_TILE_ROW_STRIDE) {
FETCH_FLOAT4(Bs[0][B_TILE_ROW_START + i][B_TILE_COL]) = FETCH_FLOAT4(B[OFFSET(
B_TILE_ROW_START + i, // row
B_TILE_COL, // col
N )]);
}
__syncthreads();
// load A from shared memory to register
FETCH_FLOAT4(frag_a[0][0]) = FETCH_FLOAT4(As[0][0][a_tile_index]);
FETCH_FLOAT4(frag_a[0][4]) = FETCH_FLOAT4(As[0][0][a_tile_index + 64]);
// load B from shared memory to register
FETCH_FLOAT4(frag_b[0][0]) = FETCH_FLOAT4(Bs[0][0][b_tile_index]);
FETCH_FLOAT4(frag_b[0][4]) = FETCH_FLOAT4(Bs[0][0][b_tile_index + 64]);
int write_stage_idx = 1;
int tile_idx = 0;
do{
// next tile index
tile_idx += BLOCK_SIZE_K;
// load next tile from global mem
if(tile_idx< K){
#pragma unroll
for ( int i = 0 ; i < BLOCK_SIZE_M ; i += A_TILE_ROW_STRIDE) {
int ldg_index = i / A_TILE_ROW_STRIDE * 4;
FETCH_FLOAT4(ldg_a_reg[ldg_index]) = FETCH_FLOAT4(A[OFFSET(
A_TILE_ROW_START + i, // row
A_TILE_COL + tile_idx, // col
K )]);
}
#pragma unroll
for ( int i = 0 ; i < BLOCK_SIZE_K; i += B_TILE_ROW_STRIDE) {
int ldg_index = i / B_TILE_ROW_STRIDE * 4;
FETCH_FLOAT4(ldg_b_reg[ldg_index]) = FETCH_FLOAT4(B[OFFSET(
tile_idx + B_TILE_ROW_START + i, // row
B_TILE_COL, // col
N )]);
}
}
int load_stage_idx = write_stage_idx ^ 1;
#pragma unroll
for(int j=0; j<BLOCK_SIZE_K - 1; ++j){
// load next tile from shared mem to register
// load A from shared memory to register
FETCH_FLOAT4(frag_a[(j+1)%2][0]) = FETCH_FLOAT4(As[load_stage_idx][(j+1)][a_tile_index]);
FETCH_FLOAT4(frag_a[(j+1)%2][4]) = FETCH_FLOAT4(As[load_stage_idx][(j+1)][a_tile_index + 64]);
// load B from shared memory to register
FETCH_FLOAT4(frag_b[(j+1)%2][0]) = FETCH_FLOAT4(Bs[load_stage_idx][(j+1)][b_tile_index]);
FETCH_FLOAT4(frag_b[(j+1)%2][4]) = FETCH_FLOAT4(Bs[load_stage_idx][(j+1)][b_tile_index + 64]);
// compute C THREAD_SIZE_X x THREAD_SIZE_Y
#pragma unroll
for (int thread_y = 0; thread_y < THREAD_SIZE_Y; ++thread_y) {
#pragma unroll
for (int thread_x = 0; thread_x < THREAD_SIZE_X; ++thread_x) {
accum[thread_y][thread_x] += frag_a[j%2][thread_y] * frag_b[j%2][thread_x];
}
}
}
if(tile_idx < K){
// load A from global memory to shared memory
#pragma unroll
for ( int i = 0 ; i < BLOCK_SIZE_M ; i += A_TILE_ROW_STRIDE) {
int ldg_index = i / A_TILE_ROW_STRIDE * 4;
As[write_stage_idx][A_TILE_COL][A_TILE_ROW_START + i]=ldg_a_reg[ldg_index];
As[write_stage_idx][A_TILE_COL+1][A_TILE_ROW_START + i]=ldg_a_reg[ldg_index+1];
As[write_stage_idx][A_TILE_COL+2][A_TILE_ROW_START + i]=ldg_a_reg[ldg_index+2];
As[write_stage_idx][A_TILE_COL+3][A_TILE_ROW_START + i]=ldg_a_reg[ldg_index+3];
}
// load B from global memory to shared memory
#pragma unroll
for ( int i = 0 ; i < BLOCK_SIZE_K; i += B_TILE_ROW_STRIDE) {
int ldg_index = i / B_TILE_ROW_STRIDE * 4;
FETCH_FLOAT4(Bs[write_stage_idx][B_TILE_ROW_START + i][B_TILE_COL]) = FETCH_FLOAT4(ldg_b_reg[ldg_index]);
}
// use double buffer, only need one sync
__syncthreads();
// switch
write_stage_idx ^= 1;
}
// load first tile from shared mem to register of next iter
// load A from shared memory to register
FETCH_FLOAT4(frag_a[0][0]) = FETCH_FLOAT4(As[load_stage_idx^1][0][a_tile_index]);
FETCH_FLOAT4(frag_a[0][4]) = FETCH_FLOAT4(As[load_stage_idx^1][0][a_tile_index + 64]);
// load B from shared memory to register
FETCH_FLOAT4(frag_b[0][0]) = FETCH_FLOAT4(Bs[load_stage_idx^1][0][b_tile_index]);
FETCH_FLOAT4(frag_b[0][4]) = FETCH_FLOAT4(Bs[load_stage_idx^1][0][b_tile_index + 64]);
// compute C THREAD_SIZE_X x THREAD_SIZE_Y
#pragma unroll
for (int thread_y = 0; thread_y < THREAD_SIZE_Y; ++thread_y) {
#pragma unroll
for (int thread_x = 0; thread_x < THREAD_SIZE_X; ++thread_x) {
accum[thread_y][thread_x] += frag_a[1][thread_y] * frag_b[1][thread_x];
}
}
}while(tile_idx< K);
const int c_block_row = a_tile_index;
const int c_block_col = b_tile_index;
//store C00 block
for(int i=0; i<4; i++){
FETCH_FLOAT4(C[OFFSET(
BLOCK_SIZE_M * by + c_block_row + i,
BLOCK_SIZE_N * bx + c_block_col,
N)]) = FETCH_FLOAT4(accum[i][0]);
}
//store C01 block
for(int i=0; i<4; i++){
FETCH_FLOAT4(C[OFFSET(
BLOCK_SIZE_M * by + c_block_row + i,
BLOCK_SIZE_N * bx + c_block_col + 64,
N)]) = FETCH_FLOAT4(accum[i][4]);
}
//store C10 block
for(int i=0; i<4; i++){
FETCH_FLOAT4(C[OFFSET(
BLOCK_SIZE_M * by + c_block_row + 64 + i,
BLOCK_SIZE_N * bx + c_block_col,
N)]) = FETCH_FLOAT4(accum[i+4][0]);
}
//store C11 block
for(int i=0; i<4; i++){
FETCH_FLOAT4(C[OFFSET(
BLOCK_SIZE_M * by + c_block_row + 64 + i,
BLOCK_SIZE_N * bx + c_block_col + 64,
N)]) = FETCH_FLOAT4(accum[i+4][4]);
}
}
int main(int argc, char** argv) {
if (argc != 4) {
printf("usage: ./main [M] [K] [N]\n");
exit(0);
}
size_t M = atoi(argv[1]);
size_t K = atoi(argv[2]);
size_t N = atoi(argv[3]);
assert( M%8 == 0);
assert( N%8 == 0);
assert( K%8 == 0);
size_t bytes_A = sizeof(float) * M * K;
size_t bytes_B = sizeof(float) * K * N;
size_t bytes_C = sizeof(float) * M * N;
float* h_A = (float*)malloc(bytes_A);
float* h_B = (float*)malloc(bytes_B);
float* h_C = (float*)malloc(bytes_C);
float* h_C1 = (float*)malloc(bytes_C);
float* d_A;
float* d_B;
float* d_C;
checkCudaErrors(cudaMalloc(&d_A, bytes_A));
checkCudaErrors(cudaMalloc(&d_B, bytes_B));
checkCudaErrors(cudaMalloc(&d_C, bytes_C));
double msecPerMatrixMul[2] = {0, 0};
double gigaFlops[2] = {0, 0};
double flopsPerMatrixMul = 2.0 * M * N * K;
// don't edit it
const int BLOCK_SIZE_M = 128;
const int BLOCK_SIZE_K = 8;
const int BLOCK_SIZE_N = 128;
const int THREAD_SIZE_X = 8;
const int THREAD_SIZE_Y = 8;
const bool ENABLE_DOUBLE_BUFFER = false;
// ็ๆA็ๆฐๆฎ
for( int i = 0; i < M * K; i++ ) {
h_A[i] = i / 13;
}
// ็ๆB็ๆฐๆฎ
for( int i = 0; i < K * N; i++ ) {
h_B[i] = i % 13;
}
checkCudaErrors(cudaMemcpy( d_A, h_A, bytes_A, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy( d_B, h_B, bytes_B, cudaMemcpyHostToDevice));
cudaEvent_t start, stop;
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&stop));
float msecTotal = 0;
int nIter = 1000;
checkCudaErrors(cudaMemcpy( d_C, h_C, bytes_C, cudaMemcpyHostToDevice));
checkCudaErrors(cudaEventRecord(start));
for (int run = 0 ; run < nIter; run ++ ) {
dim3 dimBlock(BLOCK_SIZE_N / THREAD_SIZE_X, BLOCK_SIZE_M / THREAD_SIZE_Y);
dim3 dimGrid(N / BLOCK_SIZE_N, M / BLOCK_SIZE_M);
Sgemm<BLOCK_SIZE_M, BLOCK_SIZE_K, BLOCK_SIZE_N, THREAD_SIZE_Y, THREAD_SIZE_X, ENABLE_DOUBLE_BUFFER>
<<< dimGrid, dimBlock >>>(d_A, d_B, d_C, M, N, K);
}
checkCudaErrors(cudaEventRecord(stop));
checkCudaErrors(cudaEventSynchronize(stop));
checkCudaErrors(cudaEventElapsedTime(&msecTotal, start, stop));
checkCudaErrors(cudaMemcpy( h_C, d_C, bytes_C, cudaMemcpyDeviceToHost));
msecPerMatrixMul[0] = msecTotal / nIter;
gigaFlops[0] = (flopsPerMatrixMul * 1.0e-9f) / (msecPerMatrixMul[0] / 1000.0f);
printf( "My gemm Performance= %.2f GFlop/s, Time= %.3f msec, Size= %.0f Ops,\n",
gigaFlops[0],
msecPerMatrixMul[0],
flopsPerMatrixMul);
// cublas
cublasHandle_t blas_handle;
cublasCreate(&blas_handle);
float alpha = 1.0;
float beta = 0;
checkCudaErrors(cudaMemcpy( d_C, h_C, bytes_C, cudaMemcpyHostToDevice));
checkCudaErrors(cudaEventRecord(start));
for (int run = 0 ; run < nIter; run ++ ) {
cublasSgemm (blas_handle, CUBLAS_OP_T, CUBLAS_OP_T,
M, N, K, &alpha,
d_A, K, d_B, N, &beta, d_C, N
);
}
checkCudaErrors(cudaEventRecord(stop));
checkCudaErrors(cudaEventSynchronize(stop));
checkCudaErrors(cudaEventElapsedTime(&msecTotal, start, stop));
checkCudaErrors(cudaMemcpy( h_C1, d_C, bytes_C, cudaMemcpyDeviceToHost));
msecPerMatrixMul[1] = msecTotal / nIter;
gigaFlops[1] = (flopsPerMatrixMul * 1.0e-9f) / (msecPerMatrixMul[1] / 1000.0f);
printf( "CuBlas Performance= %.2f GFlop/s, Time= %.3f msec, Size= %.0f Ops,\n",
gigaFlops[1],
msecPerMatrixMul[1],
flopsPerMatrixMul);
cublasDestroy(blas_handle);
double eps = 1.e-6; // machine zero
bool correct = true;
for (int i = 0; i < M * N; i++) {
int row = i / N;
int col = i % N;
double abs_err = fabs(h_C[i] - h_C1[col * M + row]);
double dot_length = M;
double abs_val = fabs(h_C[i]);
double rel_err = abs_err / abs_val / dot_length;
if (rel_err > eps) {
printf("Error! Matrix[%d][%d]=%.8f, ref=%.8f error term is > %E\n",
row, col, h_C[i], h_C1[col * M + row], eps);
correct = false;
break;
}
}
printf("%s\n", correct ? "Result= PASS" : "Result= FAIL");
printf("ratio= %f\n", gigaFlops[0] / gigaFlops[1]);
// Free Memory
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
free(h_A);
free(h_B);
free(h_C);
free(h_C1);
}
|
reduce_v4_unroll_last_warp.cu | #include <bits/stdc++.h>
#include <cuda.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <time.h>
#include <sys/time.h>
#define THREAD_PER_BLOCK 256
__device__ void warpReduce(volatile float* cache, unsigned int tid){
cache[tid]+=cache[tid+32];
//__syncthreads();
cache[tid]+=cache[tid+16];
//__syncthreads();
cache[tid]+=cache[tid+8];
//__syncthreads();
cache[tid]+=cache[tid+4];
//__syncthreads();
cache[tid]+=cache[tid+2];
//__syncthreads();
cache[tid]+=cache[tid+1];
//__syncthreads();
}
__global__ void reduce4(float *d_in,float *d_out){
__shared__ float sdata[THREAD_PER_BLOCK];
// each thread loads one element from global to shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * (blockDim.x * 2) + threadIdx.x;
sdata[tid] = d_in[i] + d_in[i + blockDim.x];
__syncthreads();
// do reduction in shared mem
for (unsigned int s=blockDim.x/2; s>32; s>>=1) {
if (tid < s) {
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
// write result for this block to global mem
if (tid < 32) warpReduce(sdata, tid);
if (tid == 0) d_out[blockIdx.x] = sdata[0];
}
bool check(float *out,float *res,int n){
for(int i=0;i<n;i++){
if(out[i]!=res[i])
return false;
}
return true;
}
int main(){
const int N=32*1024*1024;
float *a=(float *)malloc(N*sizeof(float));
float *d_a;
cudaMalloc((void **)&d_a,N*sizeof(float));
int NUM_PER_BLOCK = 2*THREAD_PER_BLOCK;
int block_num = N / NUM_PER_BLOCK;
float *out=(float *)malloc(block_num*sizeof(float));
float *d_out;
cudaMalloc((void **)&d_out,block_num*sizeof(float));
float *res=(float *)malloc(block_num*sizeof(float));
for(int i=0;i<N;i++){
a[i]=1;
}
for(int i=0;i<block_num;i++){
float cur=0;
for(int j=0;j<NUM_PER_BLOCK;j++){
cur+=a[i * NUM_PER_BLOCK + j];
}
res[i]=cur;
}
cudaMemcpy(d_a,a,N*sizeof(float),cudaMemcpyHostToDevice);
dim3 Grid( block_num, 1);
dim3 Block( THREAD_PER_BLOCK, 1);
reduce4<<<Grid,Block>>>(d_a,d_out);
cudaMemcpy(out,d_out,block_num*sizeof(float),cudaMemcpyDeviceToHost);
if(check(out,res,block_num))printf("the ans is right\n");
else{
printf("the ans is wrong\n");
for(int i=0;i<block_num;i++){
printf("%lf ",out[i]);
}
printf("\n");
}
cudaFree(d_a);
cudaFree(d_out);
}
|
reduce_v6_multi_add.cu | #include <bits/stdc++.h>
#include <cuda.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <time.h>
#include <sys/time.h>
#define THREAD_PER_BLOCK 256
template <unsigned int blockSize>
__device__ void warpReduce(volatile float* cache, unsigned int tid){
if (blockSize >= 64)cache[tid]+=cache[tid+32];
if (blockSize >= 32)cache[tid]+=cache[tid+16];
if (blockSize >= 16)cache[tid]+=cache[tid+8];
if (blockSize >= 8)cache[tid]+=cache[tid+4];
if (blockSize >= 4)cache[tid]+=cache[tid+2];
if (blockSize >= 2)cache[tid]+=cache[tid+1];
}
template <unsigned int blockSize, int NUM_PER_THREAD>
__global__ void reduce6(float *d_in,float *d_out, unsigned int n){
__shared__ float sdata[blockSize];
// each thread loads NUM_PER_THREAD element from global to shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * (blockSize * NUM_PER_THREAD) + threadIdx.x;
sdata[tid] = 0;
#pragma unroll
for(int iter=0; iter<NUM_PER_THREAD; iter++){
sdata[tid] += d_in[i+iter*blockSize];
}
__syncthreads();
// do reduction in shared mem
if (blockSize >= 512) {
if (tid < 256) {
sdata[tid] += sdata[tid + 256];
}
__syncthreads();
}
if (blockSize >= 256) {
if (tid < 128) {
sdata[tid] += sdata[tid + 128];
}
__syncthreads();
}
if (blockSize >= 128) {
if (tid < 64) {
sdata[tid] += sdata[tid + 64];
}
__syncthreads();
}
if (tid < 32) warpReduce<blockSize>(sdata, tid);
// write result for this block to global mem
if (tid == 0) d_out[blockIdx.x] = sdata[0];
}
bool check(float *out,float *res,int n){
for(int i=0;i<n;i++){
if(out[i]!=res[i])
return false;
}
return true;
}
int main(){
const int N=32*1024*1024;
float *a=(float *)malloc(N*sizeof(float));
float *d_a;
cudaMalloc((void **)&d_a,N*sizeof(float));
const int block_num = 1024;
const int NUM_PER_BLOCK = N / block_num;
const int NUM_PER_THREAD = NUM_PER_BLOCK/THREAD_PER_BLOCK;
float *out=(float *)malloc(block_num*sizeof(float));
float *d_out;
cudaMalloc((void **)&d_out,block_num*sizeof(float));
float *res=(float *)malloc(block_num*sizeof(float));
for(int i=0;i<N;i++){
a[i]=i%456;
}
for(int i=0;i<block_num;i++){
float cur=0;
for(int j=0;j<NUM_PER_BLOCK;j++){
if(i * NUM_PER_BLOCK + j < N){
cur+=a[i * NUM_PER_BLOCK + j];
}
}
res[i]=cur;
}
cudaMemcpy(d_a,a,N*sizeof(float),cudaMemcpyHostToDevice);
dim3 Grid( block_num, 1);
dim3 Block( THREAD_PER_BLOCK, 1);
reduce6<THREAD_PER_BLOCK, NUM_PER_THREAD><<<Grid,Block>>>(d_a, d_out, N);
cudaMemcpy(out,d_out,block_num*sizeof(float),cudaMemcpyDeviceToHost);
if(check(out,res,block_num))printf("the ans is right\n");
else{
printf("the ans is wrong\n");
for(int i=0;i<block_num;i++){
printf("%lf ",out[i]);
}
printf("\n");
}
cudaFree(d_a);
cudaFree(d_out);
}
|
Sgemv_v2.cu | #include <stdio.h>
#include <stdlib.h>
// CUDA runtime
#include <cuda_runtime.h>
#include <cublas_v2.h>
#include <math.h>
// cal offset from row col and ld , in row-major matrix, ld is the width of the matrix
#define OFFSET(row, col, ld) ((row) * (ld) + (col))
// transfer float4
#define FETCH_FLOAT4(pointer) (reinterpret_cast<float4*>(&(pointer))[0])
#define checkCudaErrors(func) \
{ \
cudaError_t e = (func); \
if(e != cudaSuccess) \
printf ("%s %d CUDA: %s\n", __FILE__, __LINE__, cudaGetErrorString(e)); \
}
template <unsigned int WarpSize>
__device__ __forceinline__ float warpReduceSum(float sum) {
if (WarpSize >= 32)sum += __shfl_down_sync(0xffffffff, sum, 16); // 0-16, 1-17, 2-18, etc.
if (WarpSize >= 16)sum += __shfl_down_sync(0xffffffff, sum, 8);// 0-8, 1-9, 2-10, etc.
if (WarpSize >= 8)sum += __shfl_down_sync(0xffffffff, sum, 4);// 0-4, 1-5, 2-6, etc.
if (WarpSize >= 4)sum += __shfl_down_sync(0xffffffff, sum, 2);// 0-2, 1-3, 4-6, 5-7, etc.
if (WarpSize >= 2)sum += __shfl_down_sync(0xffffffff, sum, 1);// 0-1, 2-3, 4-5, etc.
return sum;
}
// if N <= 16
template <
const int ROW_PER_WARP
>
__global__ void Sgemv_v2(
float * __restrict__ A,
float * __restrict__ x,
float * __restrict__ y,
const int M,
const int N) {
// Block index
int bx = blockIdx.x;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
const int warp_size=32;
int laneId= tx % warp_size;
int current_warp_row = (blockDim.y * bx + ty) * ROW_PER_WARP;
const int kWarp_size = warp_size / ROW_PER_WARP;
int kLaneId = laneId % kWarp_size;
int current_thread_row = current_warp_row + laneId / kWarp_size;
if(current_thread_row < M){
float res=0;
int current_col = kLaneId;
res += A[current_thread_row * N + current_col] * x[current_col];
res = warpReduceSum<kWarp_size>(res);
if(kLaneId==0) y[current_thread_row]=res;
}
}
int main(int argc, char** argv) {
if (argc != 3) {
printf("usage: ./main [M] [N]\n");
exit(0);
}
size_t M = atoi(argv[1]);
size_t N = atoi(argv[2]);
size_t bytes_A = sizeof(float) * M * N;
size_t bytes_x = sizeof(float) * N;
size_t bytes_y = sizeof(float) * M;
float* h_A = (float*)malloc(bytes_A);
float* h_x = (float*)malloc(bytes_x);
float* h_y = (float*)malloc(bytes_y);
float* h_y1 = (float*)malloc(bytes_y);
float* d_A;
float* d_x;
float* d_y;
checkCudaErrors(cudaMalloc(&d_A, bytes_A));
checkCudaErrors(cudaMalloc(&d_x, bytes_x));
checkCudaErrors(cudaMalloc(&d_y, bytes_y));
const int WARP_SIZE=32;
const int ROW_PER_WARP=2;
const int THREAD_PER_BLOCK=128;
const int WARP_PER_BLOCK=THREAD_PER_BLOCK/WARP_SIZE;
const int ROW_PER_BLOCK=WARP_PER_BLOCK * ROW_PER_WARP;
// ็ๆA็ๆฐๆฎ
for( int i = 0; i < M * N; i++ ) {
h_A[i] = (float)i/N;
}
// ็ๆx็ๆฐๆฎ
for( int i = 0; i < N; i++ ) {
h_x[i] = 1;
}
memset(h_y,0,M*sizeof(float));
memset(h_y1,0,M*sizeof(float));
int nIter = 1000;
checkCudaErrors(cudaMemcpy( d_A, h_A, bytes_A, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy( d_x, h_x, bytes_x, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy( d_y, h_y, bytes_y, cudaMemcpyHostToDevice));
for (int run = 0 ; run < nIter; run ++ ) {
dim3 dimGrid(M/ROW_PER_BLOCK);
dim3 dimBlock(32,THREAD_PER_BLOCK/WARP_SIZE);
Sgemv_v2<ROW_PER_WARP><<< dimGrid, dimBlock >>>(d_A, d_x, d_y, M, N);
}
checkCudaErrors(cudaMemcpy( h_y, d_y, bytes_y, cudaMemcpyDeviceToHost));
// cublas
cublasHandle_t blas_handle;
cublasCreate(&blas_handle);
float alpha = 1.0;
float beta = 0;
checkCudaErrors(cudaMemcpy( d_y, h_y1, bytes_y, cudaMemcpyHostToDevice));
for (int run = 0 ; run < nIter; run ++ ) {
cublasSgemv (blas_handle, CUBLAS_OP_T,
N, M, &alpha,
d_A, N, d_x, 1, &beta, d_y, 1
);
}
checkCudaErrors(cudaMemcpy( h_y1, d_y, bytes_y, cudaMemcpyDeviceToHost));
cublasDestroy(blas_handle);
double eps = 1.e-6; // machine zero
bool correct = true;
for (int i = 0; i < M; i++) {
double abs_err = fabs(h_y[i] - h_y1[i]);
double dot_length = M;
double abs_val = fabs(h_y[i]);
double rel_err = abs_err / abs_val / dot_length;
if (rel_err > eps) {
printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n",
i, h_y[i], h_y1[i], eps);
correct = false;
break;
}
}
printf("%s\n", correct ? "Result= PASS" : "Result= FAIL");
// Free Memory
cudaFree(d_A);
cudaFree(d_x);
cudaFree(d_y);
free(h_A);
free(h_x);
free(h_y);
free(h_y1);
}
|
elementwise_add.cu | #include <bits/stdc++.h>
#include <cuda.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <time.h>
#include <sys/time.h>
#define THREAD_PER_BLOCK 256
// transfer vector
#define FETCH_FLOAT2(pointer) (reinterpret_cast<float2*>(&(pointer))[0])
#define FETCH_FLOAT4(pointer) (reinterpret_cast<float4*>(&(pointer))[0])
__global__ void add(float* a, float* b, float* c)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
c[idx] = a[idx] + b[idx];
}
__global__ void vec2_add(float* a, float* b, float* c)
{
int idx = (threadIdx.x + blockIdx.x * blockDim.x)*2;
//c[idx] = a[idx] + b[idx];
float2 reg_a = FETCH_FLOAT2(a[idx]);
float2 reg_b = FETCH_FLOAT2(b[idx]);
float2 reg_c;
reg_c.x = reg_a.x + reg_b.x;
reg_c.y = reg_a.y + reg_b.y;
FETCH_FLOAT2(c[idx]) = reg_c;
}
__global__ void vec4_add(float* a, float* b, float* c)
{
int idx = (threadIdx.x + blockIdx.x * blockDim.x)*4;
//c[idx] = a[idx] + b[idx];
float4 reg_a = FETCH_FLOAT4(a[idx]);
float4 reg_b = FETCH_FLOAT4(b[idx]);
float4 reg_c;
reg_c.x = reg_a.x + reg_b.x;
reg_c.y = reg_a.y + reg_b.y;
reg_c.z = reg_a.z + reg_b.z;
reg_c.w = reg_a.w + reg_b.w;
FETCH_FLOAT4(c[idx]) = reg_c;
}
bool check(float *out,float *res,int n){
for(int i=0;i<n;i++){
if(out[i]!=res[i])
return false;
}
return true;
}
int main(){
const int N=32*1024*1024;
float *a=(float *)malloc(N*sizeof(float));
float *b=(float *)malloc(N*sizeof(float));
float *out=(float *)malloc(N*sizeof(float));
float *d_a;
float *d_b;
float *d_out;
cudaMalloc((void **)&d_a,N*sizeof(float));
cudaMalloc((void **)&d_b,N*sizeof(float));
cudaMalloc((void **)&d_out,N*sizeof(float));
float *res=(float *)malloc(N*sizeof(float));
for(int i=0;i<N;i++){
a[i]=1;
b[i]=i;
res[i]=a[i]+b[i];
}
cudaMemcpy(d_a,a,N*sizeof(float),cudaMemcpyHostToDevice);
cudaMemcpy(d_b,b,N*sizeof(float),cudaMemcpyHostToDevice);
dim3 Grid( N/THREAD_PER_BLOCK/4, 1);
dim3 Block( THREAD_PER_BLOCK, 1);
int iter = 2000;
for(int i=0; i<iter; i++){
vec4_add<<<Grid,Block>>>(d_a, d_b, d_out);
}
cudaMemcpy(out,d_out,N*sizeof(float),cudaMemcpyDeviceToHost);
if(check(out,res,N))printf("the ans is right\n");
else{
printf("the ans is wrong\n");
for(int i=0;i<N;i++){
printf("%lf ",out[i]);
}
printf("\n");
}
cudaFree(d_a);
cudaFree(d_out);
}
|
spmv.cu | #include <bits/stdc++.h>
#include <cuda.h>
#include "device_launch_parameters.h"
#include <time.h>
#include <sys/time.h>
#include <cuda_runtime_api.h>
#include <cusparse.h>
using namespace std;
#define checkCudaErrors(func) \
{ \
cudaError_t e = (func); \
if(e != cudaSuccess) \
printf ("%s %d CUDA: %s\n", __FILE__, __LINE__, cudaGetErrorString(e)); \
}
#define CHECK_CUDA(func) \
{ \
cudaError_t status = (func); \
if (status != cudaSuccess) { \
printf("CUDA API failed at line %d with error: %s (%d)\n", \
__LINE__, cudaGetErrorString(status), status); \
return EXIT_FAILURE; \
} \
}
#define CHECK_CUSPARSE(func) \
{ \
cusparseStatus_t status = (func); \
if (status != CUSPARSE_STATUS_SUCCESS) { \
printf("CUSPARSE API failed at line %d with error: %s (%d)\n", \
__LINE__, cusparseGetErrorString(status), status); \
return EXIT_FAILURE; \
} \
}
void add(int a, int b, float c,
int *h, int *e, int *ne, float *w, int &idx)
{
e[idx] = b, w[idx] = c, ne[idx] = h[a], h[a] = idx++;
}
void readVerEdges(int &is_weighted, int &n, int &t, int &m, std::string &file)
{
std::ifstream input;
input.open("matrix/" + file + ".mtx");
while (input.peek() == '%')
input.ignore(2048, '\n');
input >> n >> t >> m;
std::string str;
input.ignore();
getline(input, str);
int cnt =0;
for(auto c:str){
if(c==' '){
cnt++;
}
}
if(cnt == 1){
is_weighted = 0;
}
else if(cnt == 2){
is_weighted = 1;
}
else{
std::cout<<"error! you need to get right mtx input\n";
exit(0);
}
input.close();
}
void readMtxFile(int is_weighted, int n, int m,
int *row_offset, int *col_index, float *val,
std::string &file)
{
ifstream input;
input.open("matrix/" + file + ".mtx");
while (input.peek() == '%')
input.ignore(2048, '\n');
int t;
input >> n >> t >> m;
int *h = (int *)malloc((n + 10) * sizeof(int));
memset(h, -1, sizeof(int) * (n + 10));
int *e = (int *)malloc((m + 10) * sizeof(int));
int *ne = (int *)malloc((m + 10) * sizeof(int));
float *w = (float *)malloc((m + 10) * sizeof(float));
int idx = 0;
int a, b;
double c;
srand((int)time(0));
if(is_weighted == 0){
while (input >> a >> b)
{
a--;
b--;
c = a%13;
float tc = static_cast<float>(c);
add(a, b, tc, h, e, ne, w, idx);
}
}
else if(is_weighted == 1){
while (input >> a >> b >> c)
{
a--;
b--;
float tc = static_cast<float>(c);
add(a, b, tc, h, e, ne, w, idx);
}
}
else{
std::cout<<"error! you need to get right mtx input\n";
exit(0);
}
row_offset[0] = 0;
int nnz_num = 0;
for (int i = 0; i < n; i++)
{
int count = 0;
for (int j = h[i]; j != -1; j = ne[j])
{
count++;
int nextNode = e[j];
float nextWeight = w[j];
col_index[nnz_num] = nextNode;
val[nnz_num] = nextWeight;
nnz_num++;
}
row_offset[i + 1] = row_offset[i] + count;
}
input.close();
free(h);
free(e);
free(ne);
free(w);
}
template <unsigned int WarpSize>
__device__ __forceinline__ float warpReduceSum(float sum) {
if (WarpSize >= 32)sum += __shfl_down_sync(0xffffffff, sum, 16); // 0-16, 1-17, 2-18, etc.
if (WarpSize >= 16)sum += __shfl_down_sync(0xffffffff, sum, 8);// 0-8, 1-9, 2-10, etc.
if (WarpSize >= 8)sum += __shfl_down_sync(0xffffffff, sum, 4);// 0-4, 1-5, 2-6, etc.
if (WarpSize >= 4)sum += __shfl_down_sync(0xffffffff, sum, 2);// 0-2, 1-3, 4-6, 5-7, etc.
if (WarpSize >= 2)sum += __shfl_down_sync(0xffffffff, sum, 1);// 0-1, 2-3, 4-5, etc.
return sum;
}
template <typename IndexType, typename ValueType, unsigned int VECTORS_PER_BLOCK, unsigned int THREADS_PER_VECTOR>
__global__ void My_spmv_csr_kernel(const IndexType row_num,
const IndexType * A_row_offset,
const IndexType * A_col_index,
const ValueType * A_value,
const ValueType * x,
ValueType * y)
{
const IndexType THREADS_PER_BLOCK = VECTORS_PER_BLOCK * THREADS_PER_VECTOR;
const IndexType thread_id = THREADS_PER_BLOCK * blockIdx.x + threadIdx.x; // global thread index
const IndexType thread_lane = threadIdx.x & (THREADS_PER_VECTOR - 1); // thread index within the vector
const IndexType row_id = thread_id / THREADS_PER_VECTOR; // global vector index
if(row_id < row_num){
const IndexType row_start = A_row_offset[row_id]; //same as: row_start = Ap[row];
const IndexType row_end = A_row_offset[row_id+1];
// initialize local sum
ValueType sum = 0;
// accumulate local sums
for(IndexType jj = row_start + thread_lane; jj < row_end; jj += THREADS_PER_VECTOR)
sum += A_value[jj] * x[ A_col_index[jj] ];
sum = warpReduceSum<THREADS_PER_VECTOR>(sum);
if (thread_lane == 0){
y[row_id] = sum;
}
}
}
template<typename T>
void vec_print(vector<T> array){
for(auto x: array){
cout<<x<<" ";
}
cout<<std::endl;
}
template <typename IndexType, typename ValueType>
void spmv_cpu_kernel(vector<IndexType> &row_offset,
vector<IndexType> &col_index,
vector<ValueType> &value,
vector<ValueType> &x,
vector<ValueType> &y,
IndexType row_num)
{
for(int i=0; i<row_num; i++){
ValueType res = 0;
IndexType num = row_offset[i+1] - row_offset[i];
for(int j=0; j<num; j++){
IndexType index = row_offset[i] + j;
res += value[index]*x[col_index[index]];
}
y[i] = res;
}
}
int main(int argc, char **argv)
{
if (argc != 3) {
printf("usage: ./spmv -f [matrix]\n");
exit(0);
}
string file;
for (int i = 1; i < argc; i++)
{
if (strcmp(argv[i], "-f") == 0)
{
file = argv[i + 1];
}
}
// read mtx file and convert to csr
int is_weighted = -1;
int row_num;
int col_num;
int nnz_num;
readVerEdges(is_weighted, row_num, col_num, nnz_num, file);
vector<int> row_offset(row_num + 1);
vector<int> col_index(nnz_num);
vector<float> value(nnz_num);
vector<float> x(col_num,1.0);
vector<float> y(row_num);
vector<float> y_res(row_num);
vector<float> y_cusparse_res(row_num);
int iter = 2000;
readMtxFile(is_weighted, row_num, nnz_num, row_offset.data(), col_index.data(), value.data(), file);
// check input
// std::cout<<" The row_offset is: "<<std::endl;
// vec_print<int>(row_offset);
// std::cout<<" The col_index is: "<<std::endl;
// vec_print<int>(col_index);
// std::cout<<" The value is: "<<std::endl;
// vec_print<float>(value);
// allocate memory in GPU device
int* d_A_row_offset;
int* d_A_col_index;
float* d_A_value;
float* d_x;
float* d_y;
float* d_y_cusparse;
checkCudaErrors(cudaMalloc(&d_A_row_offset, (row_num + 1)*sizeof(int)));
checkCudaErrors(cudaMalloc(&d_A_col_index, nnz_num*sizeof(int)));
checkCudaErrors(cudaMalloc(&d_A_value, nnz_num*sizeof(float)));
checkCudaErrors(cudaMalloc(&d_x, col_num*sizeof(float)));
checkCudaErrors(cudaMalloc(&d_y, row_num*sizeof(float)));
checkCudaErrors(cudaMalloc(&d_y_cusparse, row_num*sizeof(float)));
checkCudaErrors(cudaMemcpy( d_A_row_offset, row_offset.data(), (row_num + 1)*sizeof(int), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy( d_A_col_index, col_index.data(), nnz_num*sizeof(int), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy( d_A_value, value.data(), nnz_num*sizeof(float), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy( d_x, x.data(), col_num*sizeof(float), cudaMemcpyHostToDevice));
// spmv
// 32 thread for a row
int mean_col_num = (nnz_num + (row_num - 1))/ row_num;
std::cout<< "The average col num is: "<< mean_col_num << std::endl;
// const int THREADS_PER_VECTOR = 32;
// const unsigned int VECTORS_PER_BLOCK = 256 / THREADS_PER_VECTOR;
// const unsigned int THREADS_PER_BLOCK = VECTORS_PER_BLOCK * THREADS_PER_VECTOR;
// const unsigned int NUM_BLOCKS = static_cast<unsigned int>((row_num + (VECTORS_PER_BLOCK - 1)) / VECTORS_PER_BLOCK);
// My_spmv_csr_kernel<int, float, VECTORS_PER_BLOCK, THREADS_PER_VECTOR> <<<NUM_BLOCKS, THREADS_PER_BLOCK>>>
// (row_num, d_A_row_offset, d_A_col_index, d_A_value, d_x, d_y);
for(int i=0; i<iter; i++){
if(mean_col_num <= 2){
const int THREADS_PER_VECTOR = 2;
const unsigned int VECTORS_PER_BLOCK = 128;
const unsigned int NUM_BLOCKS = static_cast<unsigned int>((row_num + (VECTORS_PER_BLOCK - 1)) / VECTORS_PER_BLOCK);
My_spmv_csr_kernel<int, float, VECTORS_PER_BLOCK, THREADS_PER_VECTOR> <<<NUM_BLOCKS, 256>>>
(row_num, d_A_row_offset, d_A_col_index, d_A_value, d_x, d_y);
}
else if(mean_col_num > 2 && mean_col_num <= 4){
const int THREADS_PER_VECTOR = 4;
const unsigned int VECTORS_PER_BLOCK = 64;
const unsigned int NUM_BLOCKS = static_cast<unsigned int>((row_num + (VECTORS_PER_BLOCK - 1)) / VECTORS_PER_BLOCK);
My_spmv_csr_kernel<int, float, VECTORS_PER_BLOCK, THREADS_PER_VECTOR> <<<NUM_BLOCKS, 256>>>
(row_num, d_A_row_offset, d_A_col_index, d_A_value, d_x, d_y);
}
else if(mean_col_num > 4 && mean_col_num <= 8){
const int THREADS_PER_VECTOR = 8;
const unsigned int VECTORS_PER_BLOCK = 32;
const unsigned int NUM_BLOCKS = static_cast<unsigned int>((row_num + (VECTORS_PER_BLOCK - 1)) / VECTORS_PER_BLOCK);
My_spmv_csr_kernel<int, float, VECTORS_PER_BLOCK, THREADS_PER_VECTOR> <<<NUM_BLOCKS, 256>>>
(row_num, d_A_row_offset, d_A_col_index, d_A_value, d_x, d_y);
}
else if(mean_col_num > 8 && mean_col_num <= 16){
const int THREADS_PER_VECTOR = 16;
const unsigned int VECTORS_PER_BLOCK = 16;
const unsigned int NUM_BLOCKS = static_cast<unsigned int>((row_num + (VECTORS_PER_BLOCK - 1)) / VECTORS_PER_BLOCK);
My_spmv_csr_kernel<int, float, VECTORS_PER_BLOCK, THREADS_PER_VECTOR> <<<NUM_BLOCKS, 256>>>
(row_num, d_A_row_offset, d_A_col_index, d_A_value, d_x, d_y);
}
else if(mean_col_num > 16){
const int THREADS_PER_VECTOR = 32;
const unsigned int VECTORS_PER_BLOCK = 8;
const unsigned int NUM_BLOCKS = static_cast<unsigned int>((row_num + (VECTORS_PER_BLOCK - 1)) / VECTORS_PER_BLOCK);
My_spmv_csr_kernel<int, float, VECTORS_PER_BLOCK, THREADS_PER_VECTOR> <<<NUM_BLOCKS, 256>>>
(row_num, d_A_row_offset, d_A_col_index, d_A_value, d_x, d_y);
}
}
checkCudaErrors(cudaMemcpy(y.data(), d_y, row_num*sizeof(float), cudaMemcpyDeviceToHost));
// cusparse spmv
//--------------------------------------------------------------------------
// CUSPARSE APIs
float alpha = 1.0f;
float beta = 0.0f;
cusparseHandle_t handle = NULL;
cusparseSpMatDescr_t matA;
cusparseDnVecDescr_t vecX, vecY;
void* dBuffer = NULL;
size_t bufferSize = 0;
CHECK_CUSPARSE( cusparseCreate(&handle) )
// Create sparse matrix A in CSR format
CHECK_CUSPARSE( cusparseCreateCsr(&matA, row_num, col_num, nnz_num,
d_A_row_offset, d_A_col_index, d_A_value,
CUSPARSE_INDEX_32I, CUSPARSE_INDEX_32I,
CUSPARSE_INDEX_BASE_ZERO, CUDA_R_32F) )
// Create dense vector X
CHECK_CUSPARSE( cusparseCreateDnVec(&vecX, col_num, d_x, CUDA_R_32F) )
// Create dense vector y
CHECK_CUSPARSE( cusparseCreateDnVec(&vecY, row_num, d_y_cusparse, CUDA_R_32F) )
// allocate an external buffer if needed
CHECK_CUSPARSE( cusparseSpMV_bufferSize(
handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
&alpha, matA, vecX, &beta, vecY, CUDA_R_32F,
CUSPARSE_MV_ALG_DEFAULT, &bufferSize) )
CHECK_CUDA( cudaMalloc(&dBuffer, bufferSize) )
// execute SpMV
for(int i=0; i<iter; i++){
CHECK_CUSPARSE( cusparseSpMV(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
&alpha, matA, vecX, &beta, vecY, CUDA_R_32F,
CUSPARSE_MV_ALG_DEFAULT, dBuffer) )
}
// destroy matrix/vector descriptors
CHECK_CUSPARSE( cusparseDestroySpMat(matA) )
CHECK_CUSPARSE( cusparseDestroyDnVec(vecX) )
CHECK_CUSPARSE( cusparseDestroyDnVec(vecY) )
CHECK_CUSPARSE( cusparseDestroy(handle) )
//--------------------------------------------------------------------------
// device result check
CHECK_CUDA( cudaMemcpy(y_cusparse_res.data(), d_y_cusparse, row_num * sizeof(float),
cudaMemcpyDeviceToHost) )
bool check_result = true;
for(int i=0; i<row_num; i++){
if(fabs(y[i]-y_cusparse_res[i])>1e-3){
std::cout<<"The result is error!"<<std::endl;
printf("The row is: %d the y is:%f and the cusparse_y is:%f\n", i, y[i], y_cusparse_res[i]);
check_result = false;
break;
}
}
if(check_result){
std::cout<<"The result is right!"<<std::endl;
}
// Free Memory
cudaFree(d_A_row_offset);
cudaFree(d_A_col_index);
cudaFree(d_A_value);
cudaFree(d_x);
cudaFree(d_y);
return 0;
}
|
ComplexHalfGemv.cu | #include <stdio.h>
#include <stdlib.h>
// CUDA runtime
#include <cuda_runtime.h>
#include <cublas_v2.h>
#include <math.h>
#include <cuComplex.h>
#include <thrust/complex.h>
#include "cuHalfComplex.cuh"
#define checkCudaErrors(func) \
{ \
cudaError_t e = (func); \
if(e != cudaSuccess) \
printf ("%s %d CUDA: %s\n", __FILE__, __LINE__, cudaGetErrorString(e)); \
}
#define FETCH_FLOAT4(pointer) (reinterpret_cast<float4*>(&(pointer))[0])
template <unsigned int WarpSize>
__device__ __forceinline__ half warpReduceSum(half sum) {
if (WarpSize >= 32)sum += __shfl_down_sync(0xffffffff, sum, 16); // 0-16, 1-17, 2-18, etc.
if (WarpSize >= 16)sum += __shfl_down_sync(0xffffffff, sum, 8);// 0-8, 1-9, 2-10, etc.
if (WarpSize >= 8)sum += __shfl_down_sync(0xffffffff, sum, 4);// 0-4, 1-5, 2-6, etc.
if (WarpSize >= 4)sum += __shfl_down_sync(0xffffffff, sum, 2);// 0-2, 1-3, 4-6, 5-7, etc.
if (WarpSize >= 2)sum += __shfl_down_sync(0xffffffff, sum, 1);// 0-1, 2-3, 4-5, etc.
return sum;
}
__global__ void ComplexHalfGemv(
cuHalfComplex * __restrict__ A,
cuHalfComplex * __restrict__ x,
cuHalfComplex * __restrict__ y,
const int M,
const int N) {
// Block index
int bx = blockIdx.x;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
const int warp_size=32;
int laneId= tx % warp_size;
int current_row = blockDim.y * bx + ty;
if(current_row < M){
cuHalfComplex res = cuHalfComplex(0,0);
int kIteration = (N/warp_size)/4;
if(kIteration==0) kIteration=1;
A = &A[current_row*N];
#pragma unroll
for(int i=0; i< kIteration; i++){
int current_col_vec = (i*warp_size + laneId)/4;
float4 current_val= reinterpret_cast<float4 *>(A)[current_col_vec]; //FETCH_FLOAT4(A[current_col_vec]);
float4 current_x = reinterpret_cast<float4 *>(x)[current_col_vec];
cuHalfComplex val0 = reinterpret_cast<cuHalfComplex *>(¤t_val)[0];
cuHalfComplex val1 = reinterpret_cast<cuHalfComplex *>(¤t_val)[1];
cuHalfComplex val2 = reinterpret_cast<cuHalfComplex *>(¤t_val)[2];
cuHalfComplex val3 = reinterpret_cast<cuHalfComplex *>(¤t_val)[3];
cuHalfComplex x0 = reinterpret_cast<cuHalfComplex *>(¤t_x)[0];
cuHalfComplex x1 = reinterpret_cast<cuHalfComplex *>(¤t_x)[1];
cuHalfComplex x2 = reinterpret_cast<cuHalfComplex *>(¤t_x)[2];
cuHalfComplex x3 = reinterpret_cast<cuHalfComplex *>(¤t_x)[3];
res = res + val0 * x0;
res = res + val1 * x1;
res = res + val2 * x2;
res = res + val3 * x3;
}
half res_r = res.r;
half res_i = res.i;
res_r = warpReduceSum<warp_size>(res_r);
res_i = warpReduceSum<warp_size>(res_i);
if(laneId == 0) y[current_row]=cuHalfComplex(res_r,res_i);
}
}
int main(int argc, char** argv) {
if (argc != 3) {
printf("usage: ./main [M] [N]\n");
exit(0);
}
size_t M = atoi(argv[1]);
size_t N = atoi(argv[2]);
size_t bytes_A = sizeof(cuHalfComplex) * M * N;
size_t bytes_x = sizeof(cuHalfComplex) * N;
size_t bytes_y = sizeof(cuHalfComplex) * M;
size_t bytes_y1 = sizeof(float2) * M;
cuHalfComplex* h_A = (cuHalfComplex*)malloc(bytes_A);
cuHalfComplex* h_x = (cuHalfComplex*)malloc(bytes_x);
cuHalfComplex* h_y = (cuHalfComplex*)malloc(bytes_y);
float2* h_y1 = (float2*)malloc(bytes_y1);
cuHalfComplex* d_A;
cuHalfComplex* d_x;
cuHalfComplex* d_y;
checkCudaErrors(cudaMalloc((void**)&d_A, bytes_A));
checkCudaErrors(cudaMalloc((void**)&d_x, bytes_x));
checkCudaErrors(cudaMalloc((void**)&d_y, bytes_y));
// ็ๆA็ๆฐๆฎ
for( int i = 0; i < M * N; i++ ) {
half x = 1;
half y = 1;
h_A[i] = cuHalfComplex(x,y);
}
// ็ๆx็ๆฐๆฎ
for( int i = 0; i < N; i++ ) {
half x = 1;
half y = 1;
h_x[i] = cuHalfComplex(x,y);
}
memset(h_y, 0, M * sizeof(cuHalfComplex));
memset(h_y1, 0, M * sizeof(float2));
int nIter = 1000;
checkCudaErrors(cudaMemcpy( d_A, h_A, bytes_A, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy( d_x, h_x, bytes_x, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy( d_y, h_y, bytes_y, cudaMemcpyHostToDevice));
for (int run = 0 ; run < nIter; run ++ ) {
dim3 dimGrid(M/4);
dim3 dimBlock(32,4);
ComplexHalfGemv<<< dimGrid, dimBlock >>>(d_A, d_x, d_y, M, N);
}
checkCudaErrors(cudaMemcpy( h_y, d_y, bytes_y, cudaMemcpyDeviceToHost));
// compute the result in cpu
// fp16 is not support in CPU, so use float
for(int i=0; i<M; i++){
float result_r = 0;
float result_i = 0;
for(int j=0; j<N; j++){
float a_r = h_A[i*N+j].r;
float a_i = h_A[i*N+j].i;
float b_r = h_x[j].r;
float b_i = h_x[j].i;
float res_r = a_r*b_r - a_i*b_i;
float res_i = a_i*b_r + a_r*b_i;
result_r += res_r;
result_i += res_i;
}
float2 result;
result.x = result_r;
result.y = result_i;
h_y1[i] = result;
}
// simple check, not reasonable
double eps = 1.e-3;
bool correct = true;
for (int i = 0; i < M; i++) {
double abs_err = fabs((float)(h_y[i].r) - h_y1[i].x)+fabs((float)(h_y[i].i) - h_y1[i].y);
if (abs_err > eps) {
printf("Error! Matrix[%05d]=(%.8f,%.8f), ref=(%.8f,%.8f) error term is > %E\n",
i, (float)(h_y[i].r), (float)(h_y[i].i), (h_y1[i].x), (h_y1[i].y), eps);
correct = false;
break;
}
}
printf("%s\n", correct ? "Result= PASS" : "Result= FAIL");
// Free Memory
cudaFree(d_A);
cudaFree(d_x);
cudaFree(d_y);
free(h_A);
free(h_x);
free(h_y);
free(h_y1);
}
|
reduce_v3_add_during_load.cu | #include <bits/stdc++.h>
#include <cuda.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <time.h>
#include <sys/time.h>
#define THREAD_PER_BLOCK 256
// dim3 Grid( N/(2*THREAD_PER_BLOCK),1);
__global__ void reduce3(float *d_in,float *d_out){
__shared__ float sdata[THREAD_PER_BLOCK];
// each thread loads one element from global to shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * (blockDim.x * 2) + threadIdx.x;
sdata[tid] = d_in[i] + d_in[i + blockDim.x];
__syncthreads();
// do reduction in shared mem
for (unsigned int s=blockDim.x/2; s>0; s>>=1) {
if (tid < s) {
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) d_out[blockIdx.x] = sdata[0];
}
bool check(float *out,float *res,int n){
for(int i=0;i<n;i++){
if(out[i]!=res[i])
return false;
}
return true;
}
int main(){
const int N=32*1024*1024;
float *a=(float *)malloc(N*sizeof(float));
float *d_a;
cudaMalloc((void **)&d_a,N*sizeof(float));
int NUM_PER_BLOCK = 2*THREAD_PER_BLOCK;
int block_num = N / NUM_PER_BLOCK;
float *out=(float *)malloc(block_num*sizeof(float));
float *d_out;
cudaMalloc((void **)&d_out,block_num*sizeof(float));
float *res=(float *)malloc(block_num*sizeof(float));
for(int i=0;i<N;i++){
a[i]=1;
}
for(int i=0;i<block_num;i++){
float cur=0;
for(int j=0;j<NUM_PER_BLOCK;j++){
cur+=a[i * NUM_PER_BLOCK + j];
}
res[i]=cur;
}
cudaMemcpy(d_a,a,N*sizeof(float),cudaMemcpyHostToDevice);
dim3 Grid( block_num, 1);
dim3 Block( THREAD_PER_BLOCK, 1);
reduce3<<<Grid,Block>>>(d_a,d_out);
cudaMemcpy(out,d_out,block_num*sizeof(float),cudaMemcpyDeviceToHost);
if(check(out,res,block_num))printf("the ans is right\n");
else{
printf("the ans is wrong\n");
for(int i=0;i<block_num;i++){
printf("%lf ",out[i]);
}
printf("\n");
}
cudaFree(d_a);
cudaFree(d_out);
}
|
spmm.cu | #include <bits/stdc++.h>
#include <cuda.h>
#include "device_launch_parameters.h"
#include <time.h>
#include <sys/time.h>
#include <cuda_runtime_api.h>
#include <cuda_runtime.h>
#include <cusparse.h>
#include "sputnik/sputnik.h"
using namespace std;
#define checkCudaErrors(func) \
{ \
cudaError_t e = (func); \
if(e != cudaSuccess) \
printf ("%s %d CUDA: %s\n", __FILE__, __LINE__, cudaGetErrorString(e)); \
}
#define CHECK_CUDA(func) \
{ \
cudaError_t status = (func); \
if (status != cudaSuccess) { \
printf("CUDA API failed at line %d with error: %s (%d)\n", \
__LINE__, cudaGetErrorString(status), status); \
return EXIT_FAILURE; \
} \
}
#define CHECK_CUSPARSE(func) \
{ \
cusparseStatus_t status = (func); \
if (status != CUSPARSE_STATUS_SUCCESS) { \
printf("CUSPARSE API failed at line %d with error: %s (%d)\n", \
__LINE__, cusparseGetErrorString(status), status); \
return EXIT_FAILURE; \
} \
}
void ReadFile(std::string &file, int &row_num, int &col_num, int &nnz_num,
std::vector<int> &A_row_offset, std::vector<int> &A_col_index,
std::vector<float> &A_value, std::vector<float> &B,
std::vector<float> &C)
{
std::ifstream input;
input.open("matrix/" + file + ".smtx");
while (input.peek() == '%')
input.ignore(2048, '\n');
std::string s;
getline(input, s);
// parse the first line
std::stringstream s_stream(s);
std::string current_str;
getline(s_stream, current_str, ',');
row_num = atoi(current_str.c_str());
getline(s_stream, current_str, ',');
col_num = atoi(current_str.c_str());
getline(s_stream, current_str, ',');
nnz_num = atoi(current_str.c_str());
A_row_offset.resize(row_num+1);
A_col_index.resize(nnz_num);
A_value.resize(nnz_num);
for(int i=0; i<row_num+1; i++){
input >> A_row_offset[i];
}
for(int i=0; i<nnz_num; i++){
input >> A_col_index[i];
}
input.close();
B.resize(col_num * row_num);
C.resize(row_num * row_num);
// init A
for(int i=0; i<A_value.size(); i++){
A_value[i]=i%17;
}
// init B
for(int i=0; i<B.size(); i++){
B[i]=i%13;
}
}
template<typename T>
void vec_print(std::vector<T> array){
for(auto x: array){
cout<<x<<" ";
}
cout<<std::endl;
}
template <typename IndexType, typename ValueType>
void spmm_cpu_kernel(std::vector<IndexType> &row_offset,
std::vector<IndexType> &col_index,
std::vector<ValueType> &value,
std::vector<ValueType> &B,
std::vector<ValueType> &C,
IndexType row_num,
IndexType col_num)
{
for(int i=0; i<row_num; i++){
for(int j=0; j<row_num; j++){
ValueType res = 0;
IndexType num = row_offset[i+1] - row_offset[i];
for(int k=0; k<num; k++){
IndexType index = row_offset[i] + k;
IndexType current_col = col_index[index];
res += value[index]* B[current_col*row_num + j];
}
C[i*row_num+j] = res;
}
}
}
// dim3 dimBlock(THREAD_NUM_PER_BLOCK);
// dim3 dimGrid(row_num/THREAD_NUM_PER_BLOCK, row_num);
template <unsigned int THREAD_NUM_PER_BLOCK>
__global__ void My_spmm_csr_vector_kernel_v0(const int num_rows,
const int * A_row_offset,
const int * A_col_index,
const float * A_value,
const float * B,
float * C,
const int ldb,
const int ldc){
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
// matrix C row_index
int C_row_index = by;
int C_col_index = bx * THREAD_NUM_PER_BLOCK + tx;
if(C_row_index < num_rows && C_col_index < ldc){
int row_start = A_row_offset[C_row_index];
int row_end = A_row_offset[C_row_index + 1];
int iter_num = row_end - row_start;
float sum = 0.0;
for(int i=0; i<iter_num; i++){
int index = row_start + i;
int current_col = A_col_index[index];
float current_val = A_value[index];
float reg_B = B[ current_col * ldb + C_col_index];
sum += current_val * reg_B;
}
C[C_row_index * ldc + C_col_index] = sum;
}
}
// dim3 dimBlock(THREAD_NUM_PER_BLOCK);
// dim3 dimGrid(row_num/THREAD_NUM_PER_BLOCK, row_num);
// useless optimize
template <
const int BLOCK_SIZE_X,
const int BLOCK_SIZE_K,
const int THREAD_NUM_PER_BLOCK
>
__global__ void My_spmm_csr_vector_kernel_v1(const int num_rows,
const int * A_row_offset,
const int * A_col_index,
const float * A_value,
const float * B,
float * C,
const int M,
const int N,
const int K){
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
// matrix C row_index
int C_row_index = by;
int C_col_index = bx * THREAD_NUM_PER_BLOCK + tx;
// shared mem for A
__shared__ int As_col[BLOCK_SIZE_K];
__shared__ float As_value[BLOCK_SIZE_K];
int NUM_A_PER_THREAD = BLOCK_SIZE_K/THREAD_NUM_PER_BLOCK;
if(C_row_index < num_rows && C_col_index < N){
int row_start = A_row_offset[C_row_index];
int row_end = A_row_offset[C_row_index + 1];
int iter_num = row_end - row_start;
float sum = 0.0;
for(int k=0; k<iter_num; k+=BLOCK_SIZE_K){
// store A to shared mem
int global_index = row_start + k*BLOCK_SIZE_K;
int local_index = NUM_A_PER_THREAD * tx;
for(int i=0; i< NUM_A_PER_THREAD; i++){
if(global_index + local_index + i < row_end){
As_col[local_index + i] = A_col_index[global_index + local_index +i];
As_value[local_index + i] = A_value[global_index + local_index +i];
}
else{
As_col[local_index + i] = -1;
As_value[local_index + i] = 0.0;
}
}
__syncthreads();
// load A from shared mem
for(int i=0; i< BLOCK_SIZE_K; i++){
int current_col = As_col[i];
float current_val = As_value[i];
if(current_col != -1){
float reg_B = B[ current_col * N + C_col_index];
sum += current_val * reg_B;
}
}
}
C[C_row_index * N + C_col_index] = sum;
}
}
// A(row_num,col_num)
// B(col_num,row_num)
// C(row_num,row_num)
int main(int argc, char **argv)
{
if (argc != 3) {
printf("usage: ./spmm -f [matrix]\n");
exit(0);
}
string file;
for (int i = 1; i < argc; i++)
{
if (strcmp(argv[i], "-f") == 0)
{
file = argv[i + 1];
}
}
// load csr data from .smtx file
int row_num = 0;
int col_num = 0;
int nnz_num = 0;
std::vector<int> A_row_offset;
std::vector<int> A_col_index;
std::vector<float> A_value;
std::vector<float> B;
std::vector<float> C;
ReadFile(file, row_num, col_num, nnz_num, A_row_offset, A_col_index, A_value, B, C);
std::vector<float> C_cusparse(C.size());
// used in sputnik
// TODO: it's useless?
std::vector<int> row_indices(row_num);
// init row_indices
for(int i=0; i<row_num; i++){
row_indices[i] = A_row_offset[i+1] - A_row_offset[i];
}
//debug case
/*
int row_num = 4;
int col_num = 4;
int nnz_num = 9;
int hA_csrOffsets[] = { 0, 3, 4, 7, 9 };
int hA_columns[] = { 0, 2, 3, 1, 0, 2, 3, 1, 3 };
float hA_values[] = { 1.0f, 2.0f, 3.0f, 4.0f, 5.0f,
6.0f, 7.0f, 8.0f, 9.0f };
float hB[] = { 1.0f, 5.0f, 9.0f, 0.0f,
2.0f, 6.0f, 10.0f, 0.0f,
3.0f, 7.0f, 11.0f, 0.0f,
4.0f, 8.0f, 12.0f, 0.0f};
std::vector<int> A_row_offset(hA_csrOffsets, hA_csrOffsets + sizeof(hA_csrOffsets));
std::vector<int> A_col_index(hA_columns, hA_columns + sizeof(hA_columns));
std::vector<float> A_value(hA_values, hA_values + sizeof(hA_values));
std::vector<float> B(hB, hB + sizeof(hB));
std::vector<float> C(16, 0);
std::vector<float> C_cusparse(16, 0);
*/
// check input
std::cout<<"The row_num is:" <<row_num <<std::endl;
std::cout<<"The col_num is:" <<col_num <<std::endl;
std::cout<<"The nnz_num is:" <<nnz_num <<std::endl;
// allocate memory in GPU device
int* d_A_row_offset;
int* d_A_col_index;
float* d_A_value;
float* d_B;
float* d_C;
float* d_C_cusparse;
int* d_row_indices;
int B_num = B.size();
int C_num = C.size();
checkCudaErrors(cudaMalloc(&d_A_row_offset, (row_num + 1)*sizeof(int)));
checkCudaErrors(cudaMalloc(&d_A_col_index, nnz_num*sizeof(int)));
checkCudaErrors(cudaMalloc(&d_A_value, nnz_num*sizeof(float)));
checkCudaErrors(cudaMalloc(&d_B, B_num*sizeof(float)));
checkCudaErrors(cudaMalloc(&d_C, C_num*sizeof(float)));
checkCudaErrors(cudaMalloc(&d_C_cusparse, C_num*sizeof(float)));
checkCudaErrors(cudaMalloc(&d_row_indices, row_num*sizeof(int)));
checkCudaErrors(cudaMemcpy( d_A_row_offset, A_row_offset.data(), (row_num + 1)*sizeof(int), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy( d_A_col_index, A_col_index.data(), nnz_num*sizeof(int), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy( d_A_value, A_value.data(), nnz_num*sizeof(float), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy( d_B, B.data(), B_num*sizeof(float), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy( d_row_indices, row_indices.data(), row_num*sizeof(int), cudaMemcpyHostToDevice));
int iter = 2000;
// My spmm
// cpu version
// spmm_cpu_kernel<int,float>(A_row_offset, A_col_index, A_value, B, C, row_num, col_num);
constexpr unsigned int THREAD_NUM_PER_BLOCK = 128;
dim3 dimBlock(THREAD_NUM_PER_BLOCK);
dim3 dimGrid(row_num/THREAD_NUM_PER_BLOCK, row_num);
for(int i=0; i<iter; i++){
My_spmm_csr_vector_kernel<128, 512, THREAD_NUM_PER_BLOCK> <<< dimGrid, dimBlock >>>
(row_num, d_A_row_offset, d_A_col_index, d_A_value, d_B, d_C, row_num, row_num, col_num);
}
//checkCudaErrors(cudaMemcpy(C.data(), d_C, C_num*sizeof(float), cudaMemcpyDeviceToHost));
// sputnik
cudaStream_t s0 = 0;
for(int i=0; i<iter; i++){
sputnik::CudaSpmm(row_num, row_num, col_num,
nnz_num, d_row_indices,
d_A_value, d_A_row_offset, d_A_col_index,
d_B, d_C, s0);
}
cudaStreamSynchronize(s0);
checkCudaErrors(cudaMemcpy(C.data(), d_C, C_num * sizeof(float), cudaMemcpyDeviceToHost));
// cusparse spmm
//--------------------------------------------------------------------------
// CUSPARSE APIs
int ldb = row_num;
int ldc = row_num;
float alpha = 1.0f;
float beta = 0.0f;
cusparseHandle_t handle = NULL;
cusparseSpMatDescr_t matA;
cusparseDnMatDescr_t matB, matC;
void* dBuffer = NULL;
size_t bufferSize = 0;
CHECK_CUSPARSE( cusparseCreate(&handle) )
// Create sparse matrix A in CSR format
CHECK_CUSPARSE( cusparseCreateCsr(&matA, row_num, col_num, nnz_num,
d_A_row_offset, d_A_col_index, d_A_value,
CUSPARSE_INDEX_32I, CUSPARSE_INDEX_32I,
CUSPARSE_INDEX_BASE_ZERO, CUDA_R_32F) )
// Create dense matrix B
CHECK_CUSPARSE( cusparseCreateDnMat(&matB, col_num, row_num, ldb, d_B,
CUDA_R_32F, CUSPARSE_ORDER_ROW) )
// Create dense matrix C
CHECK_CUSPARSE( cusparseCreateDnMat(&matC, row_num, row_num, ldc, d_C_cusparse,
CUDA_R_32F, CUSPARSE_ORDER_ROW) )
// allocate an external buffer if needed
CHECK_CUSPARSE( cusparseSpMM_bufferSize(
handle,
CUSPARSE_OPERATION_NON_TRANSPOSE,
CUSPARSE_OPERATION_NON_TRANSPOSE,
&alpha, matA, matB, &beta, matC, CUDA_R_32F,
CUSPARSE_SPMM_ALG_DEFAULT, &bufferSize) )
CHECK_CUDA( cudaMalloc(&dBuffer, bufferSize) )
// execute SpMM
for(int i=0; i<iter; i++){
CHECK_CUSPARSE( cusparseSpMM(handle,
CUSPARSE_OPERATION_NON_TRANSPOSE,
CUSPARSE_OPERATION_NON_TRANSPOSE,
&alpha, matA, matB, &beta, matC, CUDA_R_32F,
CUSPARSE_SPMM_ALG_DEFAULT, dBuffer) )
}
// destroy matrix/vector descriptors
CHECK_CUSPARSE( cusparseDestroySpMat(matA) )
CHECK_CUSPARSE( cusparseDestroyDnMat(matB) )
CHECK_CUSPARSE( cusparseDestroyDnMat(matC) )
CHECK_CUSPARSE( cusparseDestroy(handle) )
//--------------------------------------------------------------------------
// device result check
CHECK_CUDA( cudaMemcpy(C_cusparse.data(), d_C_cusparse, C_num * sizeof(float),
cudaMemcpyDeviceToHost) )
bool check_result = true;
for(int i=0; i<C.size(); i++){
if(fabs(C[i]-C_cusparse[i])>1e-6){
std::cout<<"The result is error!"<<std::endl;
printf("The error case is (%d %d %f %f)\n", i/row_num, i%row_num, C[i], C_cusparse[i]);
check_result = false;
break;
}
}
if(check_result){
std::cout<<"The result is right!"<<std::endl;
}
// Free Memory
cudaFree(d_A_row_offset);
cudaFree(d_A_col_index);
cudaFree(d_A_value);
cudaFree(d_B);
cudaFree(d_C);
cudaFree(d_C_cusparse);
return 0;
} |
reduce_v1_no_divergence_branch.cu | #include <bits/stdc++.h>
#include <cuda.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <time.h>
#include <sys/time.h>
#define THREAD_PER_BLOCK 256
// bank conflict
__global__ void reduce1(float *d_in,float *d_out){
__shared__ float sdata[THREAD_PER_BLOCK];
// each thread loads one element from global to shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
sdata[tid] = d_in[i];
__syncthreads();
// do reduction in shared mem
for(unsigned int s=1; s < blockDim.x; s *= 2) {
int index = 2 * s * tid;
if (index < blockDim.x) {
sdata[index] += sdata[index + s];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) d_out[blockIdx.x] = sdata[0];
}
bool check(float *out,float *res,int n){
for(int i=0;i<n;i++){
if(out[i]!=res[i])
return false;
}
return true;
}
int main(){
const int N=32*1024*1024;
float *a=(float *)malloc(N*sizeof(float));
float *d_a;
cudaMalloc((void **)&d_a,N*sizeof(float));
int block_num=N/THREAD_PER_BLOCK;
float *out=(float *)malloc((N/THREAD_PER_BLOCK)*sizeof(float));
float *d_out;
cudaMalloc((void **)&d_out,(N/THREAD_PER_BLOCK)*sizeof(float));
float *res=(float *)malloc((N/THREAD_PER_BLOCK)*sizeof(float));
for(int i=0;i<N;i++){
a[i]=1;
}
for(int i=0;i<block_num;i++){
float cur=0;
for(int j=0;j<THREAD_PER_BLOCK;j++){
cur+=a[i*THREAD_PER_BLOCK+j];
}
res[i]=cur;
}
cudaMemcpy(d_a,a,N*sizeof(float),cudaMemcpyHostToDevice);
dim3 Grid( N/THREAD_PER_BLOCK,1);
dim3 Block( THREAD_PER_BLOCK,1);
reduce1<<<Grid,Block>>>(d_a,d_out);
cudaMemcpy(out,d_out,block_num*sizeof(float),cudaMemcpyDeviceToHost);
if(check(out,res,block_num))printf("the ans is right\n");
else{
printf("the ans is wrong\n");
for(int i=0;i<block_num;i++){
printf("%lf ",out[i]);
}
printf("\n");
}
cudaFree(d_a);
cudaFree(d_out);
}
|
reduce_v5_completely_unroll.cu | #include <bits/stdc++.h>
#include <cuda.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <time.h>
#include <sys/time.h>
#define THREAD_PER_BLOCK 256
template <unsigned int blockSize>
__device__ void warpReduce(volatile float* cache, unsigned int tid){
if (blockSize >= 64)cache[tid]+=cache[tid+32];
if (blockSize >= 32)cache[tid]+=cache[tid+16];
if (blockSize >= 16)cache[tid]+=cache[tid+8];
if (blockSize >= 8)cache[tid]+=cache[tid+4];
if (blockSize >= 4)cache[tid]+=cache[tid+2];
if (blockSize >= 2)cache[tid]+=cache[tid+1];
}
template <unsigned int blockSize>
__global__ void reduce5(float *d_in,float *d_out){
__shared__ float sdata[THREAD_PER_BLOCK];
// each thread loads one element from global to shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * (blockDim.x * 2) + threadIdx.x;
sdata[tid] = d_in[i] + d_in[i + blockDim.x];
__syncthreads();
// do reduction in shared mem
if (blockSize >= 512) {
if (tid < 256) {
sdata[tid] += sdata[tid + 256];
}
__syncthreads();
}
if (blockSize >= 256) {
if (tid < 128) {
sdata[tid] += sdata[tid + 128];
}
__syncthreads();
}
if (blockSize >= 128) {
if (tid < 64) {
sdata[tid] += sdata[tid + 64];
}
__syncthreads();
}
if (tid < 32) warpReduce<blockSize>(sdata, tid);
// write result for this block to global mem
if (tid == 0) d_out[blockIdx.x] = sdata[0];
}
bool check(float *out,float *res,int n){
for(int i=0;i<n;i++){
if(out[i]!=res[i])
return false;
}
return true;
}
int main(){
const int N=32*1024*1024;
float *a=(float *)malloc(N*sizeof(float));
float *d_a;
cudaMalloc((void **)&d_a,N*sizeof(float));
int NUM_PER_BLOCK = 2*THREAD_PER_BLOCK;
int block_num = N / NUM_PER_BLOCK;
float *out=(float *)malloc(block_num*sizeof(float));
float *d_out;
cudaMalloc((void **)&d_out,block_num*sizeof(float));
float *res=(float *)malloc(block_num*sizeof(float));
for(int i=0;i<N;i++){
a[i]=1;
}
for(int i=0;i<block_num;i++){
float cur=0;
for(int j=0;j<NUM_PER_BLOCK;j++){
cur+=a[i * NUM_PER_BLOCK + j];
}
res[i]=cur;
}
cudaMemcpy(d_a,a,N*sizeof(float),cudaMemcpyHostToDevice);
dim3 Grid( block_num, 1);
dim3 Block( THREAD_PER_BLOCK, 1);
reduce5<THREAD_PER_BLOCK><<<Grid,Block>>>(d_a,d_out);
cudaMemcpy(out,d_out,block_num*sizeof(float),cudaMemcpyDeviceToHost);
if(check(out,res,block_num))printf("the ans is right\n");
else{
printf("the ans is wrong\n");
for(int i=0;i<block_num;i++){
printf("%lf ",out[i]);
}
printf("\n");
}
cudaFree(d_a);
cudaFree(d_out);
}
|
reduce_v2_no_bank_conflict.cu | #include <bits/stdc++.h>
#include <cuda.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <time.h>
#include <sys/time.h>
#define THREAD_PER_BLOCK 256
// idle Threads
__global__ void reduce2(float *d_in,float *d_out){
__shared__ float sdata[THREAD_PER_BLOCK];
// each thread loads one element from global to shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
sdata[tid] = d_in[i];
__syncthreads();
// do reduction in shared mem
for (unsigned int s=blockDim.x/2; s>0; s>>=1) {
if (tid < s) {
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) d_out[blockIdx.x] = sdata[0];
}
bool check(float *out,float *res,int n){
for(int i=0;i<n;i++){
if(out[i]!=res[i])
return false;
}
return true;
}
int main(){
const int N=32*1024*1024;
float *a=(float *)malloc(N*sizeof(float));
float *d_a;
cudaMalloc((void **)&d_a,N*sizeof(float));
int block_num=N/THREAD_PER_BLOCK;
float *out=(float *)malloc((N/THREAD_PER_BLOCK)*sizeof(float));
float *d_out;
cudaMalloc((void **)&d_out,(N/THREAD_PER_BLOCK)*sizeof(float));
float *res=(float *)malloc((N/THREAD_PER_BLOCK)*sizeof(float));
for(int i=0;i<N;i++){
a[i]=1;
}
for(int i=0;i<block_num;i++){
float cur=0;
for(int j=0;j<THREAD_PER_BLOCK;j++){
cur+=a[i*THREAD_PER_BLOCK+j];
}
res[i]=cur;
}
cudaMemcpy(d_a,a,N*sizeof(float),cudaMemcpyHostToDevice);
dim3 Grid( N/THREAD_PER_BLOCK,1);
dim3 Block( THREAD_PER_BLOCK,1);
reduce2<<<Grid,Block>>>(d_a,d_out);
cudaMemcpy(out,d_out,block_num*sizeof(float),cudaMemcpyDeviceToHost);
if(check(out,res,block_num))printf("the ans is right\n");
else{
printf("the ans is wrong\n");
for(int i=0;i<block_num;i++){
printf("%lf ",out[i]);
}
printf("\n");
}
cudaFree(d_a);
cudaFree(d_out);
}
|
sgemm_v1.cu | // optimize sgemm
#include <stdio.h>
#include <stdlib.h>
#include "assert.h"
// CUDA runtime
#include <cuda_runtime.h>
#include <cublas_v2.h>
// cal offset from row col and ld , in row-major matrix, ld is the width of the matrix
#define OFFSET(row, col, ld) ((row) * (ld) + (col))
// transfer float4
#define FETCH_FLOAT4(pointer) (reinterpret_cast<float4*>(&(pointer))[0])
#define checkCudaErrors(func) \
{ \
cudaError_t e = (func); \
if(e != cudaSuccess) \
printf ("%s %d CUDA: %s\n", __FILE__, __LINE__, cudaGetErrorString(e)); \
}
// K: ldA
// N: ldB
template <
const int BLOCK_SIZE_M, // height of block of C that each thread block calculate
const int BLOCK_SIZE_K, // width of block of A that each thread block load into shared memory
const int BLOCK_SIZE_N, // width of block of C that each thread block calculate
const int THREAD_SIZE_Y, // height of block of C that each thread calculate
const int THREAD_SIZE_X, // width of block of C that each thread calculate
const bool ENABLE_DOUBLE_BUFFER // whether enable double buffering or not
>
__global__ void Sgemm(
float * __restrict__ A,
float * __restrict__ B,
float * __restrict__ C,
const int M,
const int N,
const int K) {
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
// the threads number in Block of X,Y
const int THREAD_X_PER_BLOCK = BLOCK_SIZE_N / THREAD_SIZE_X;
const int THREAD_Y_PER_BLOCK = BLOCK_SIZE_M / THREAD_SIZE_Y;
const int THREAD_NUM_PER_BLOCK = THREAD_X_PER_BLOCK * THREAD_Y_PER_BLOCK;
// thread id in cur Block
const int tid = ty * THREAD_X_PER_BLOCK + tx;
// shared memory
__shared__ float As[2][BLOCK_SIZE_K][BLOCK_SIZE_M];
__shared__ float Bs[2][BLOCK_SIZE_K][BLOCK_SIZE_N];
// registers for C
float accum[THREAD_SIZE_Y][THREAD_SIZE_X] = {0};
// registers for A and B
float frag_a[2][THREAD_SIZE_Y];
float frag_b[2][THREAD_SIZE_X];
// registers load global memory
const int ldg_num_a = BLOCK_SIZE_M * BLOCK_SIZE_K / (THREAD_NUM_PER_BLOCK * 4);
const int ldg_num_b = BLOCK_SIZE_K * BLOCK_SIZE_N / (THREAD_NUM_PER_BLOCK * 4);
float ldg_a_reg[4*ldg_num_a];
float ldg_b_reg[4*ldg_num_b];
// threads number in one row
const int A_TILE_THREAD_PER_ROW = BLOCK_SIZE_K / 4;
const int B_TILE_THREAD_PER_ROW = BLOCK_SIZE_N / 4;
// row number and col number that needs to be loaded by this thread
const int A_TILE_ROW_START = tid / A_TILE_THREAD_PER_ROW;
const int B_TILE_ROW_START = tid / B_TILE_THREAD_PER_ROW;
const int A_TILE_COL = tid % A_TILE_THREAD_PER_ROW * 4;
const int B_TILE_COL = tid % B_TILE_THREAD_PER_ROW * 4;
// row stride that thread uses to load multiple rows of a tile
const int A_TILE_ROW_STRIDE = THREAD_NUM_PER_BLOCK / A_TILE_THREAD_PER_ROW;
const int B_TILE_ROW_STRIDE = THREAD_NUM_PER_BLOCK / B_TILE_THREAD_PER_ROW;
A = &A[(BLOCK_SIZE_M * by)* K];
B = &B[BLOCK_SIZE_N * bx];
//transfer first tile from global mem to shared mem
// load A from global memory to shared memory
#pragma unroll
for ( int i = 0 ; i < BLOCK_SIZE_M ; i += A_TILE_ROW_STRIDE) {
int ldg_index = i / A_TILE_ROW_STRIDE * 4;
FETCH_FLOAT4(ldg_a_reg[ldg_index]) = FETCH_FLOAT4(A[OFFSET(
A_TILE_ROW_START + i, // row
A_TILE_COL, // col
K )]);
As[0][A_TILE_COL][A_TILE_ROW_START + i]=ldg_a_reg[ldg_index];
As[0][A_TILE_COL+1][A_TILE_ROW_START + i]=ldg_a_reg[ldg_index+1];
As[0][A_TILE_COL+2][A_TILE_ROW_START + i]=ldg_a_reg[ldg_index+2];
As[0][A_TILE_COL+3][A_TILE_ROW_START + i]=ldg_a_reg[ldg_index+3];
}
// load B from global memory to shared memory
#pragma unroll
for ( int i = 0 ; i < BLOCK_SIZE_K; i += B_TILE_ROW_STRIDE) {
FETCH_FLOAT4(Bs[0][B_TILE_ROW_START + i][B_TILE_COL]) = FETCH_FLOAT4(B[OFFSET(
B_TILE_ROW_START + i, // row
B_TILE_COL, // col
N )]);
}
__syncthreads();
// load A from shared memory to register
#pragma unroll
for (int thread_y = 0; thread_y < THREAD_SIZE_Y; thread_y += 4) {
FETCH_FLOAT4(frag_a[0][thread_y]) = FETCH_FLOAT4(As[0][0][THREAD_SIZE_Y * ty + thread_y]);
}
// load B from shared memory to register
#pragma unroll
for (int thread_x = 0; thread_x < THREAD_SIZE_X; thread_x += 4) {
FETCH_FLOAT4(frag_b[0][thread_x]) = FETCH_FLOAT4(Bs[0][0][THREAD_SIZE_X * tx + thread_x]);
}
int write_stage_idx = 1;
int tile_idx = 0;
do{
tile_idx += BLOCK_SIZE_K;
// load next tile from global mem
if(tile_idx< K){
#pragma unroll
for ( int i = 0 ; i < BLOCK_SIZE_M ; i += A_TILE_ROW_STRIDE) {
int ldg_index = i / A_TILE_ROW_STRIDE * 4;
FETCH_FLOAT4(ldg_a_reg[ldg_index]) = FETCH_FLOAT4(A[OFFSET(
A_TILE_ROW_START + i, // row
A_TILE_COL + tile_idx, // col
K )]);
}
#pragma unroll
for ( int i = 0 ; i < BLOCK_SIZE_K; i += B_TILE_ROW_STRIDE) {
int ldg_index = i / B_TILE_ROW_STRIDE * 4;
FETCH_FLOAT4(ldg_b_reg[ldg_index]) = FETCH_FLOAT4(B[OFFSET(
tile_idx + B_TILE_ROW_START + i, // row
B_TILE_COL, // col
N )]);
}
}
int load_stage_idx = write_stage_idx ^ 1;
#pragma unroll
for(int j=0; j<BLOCK_SIZE_K-1; ++j){
// load next tile from shared mem to register
// load A from shared memory to register
#pragma unroll
for (int thread_y = 0; thread_y < THREAD_SIZE_Y; thread_y += 4) {
FETCH_FLOAT4(frag_a[(j+1)%2][thread_y]) = FETCH_FLOAT4(As[load_stage_idx][j+1][THREAD_SIZE_Y * ty + thread_y]);
}
// load B from shared memory to register
#pragma unroll
for (int thread_x = 0; thread_x < THREAD_SIZE_X; thread_x += 4) {
FETCH_FLOAT4(frag_b[(j+1)%2][thread_x]) = FETCH_FLOAT4(Bs[load_stage_idx][j+1][THREAD_SIZE_X * tx + thread_x]);
}
// compute C THREAD_SIZE_X x THREAD_SIZE_Y
#pragma unroll
for (int thread_y = 0; thread_y < THREAD_SIZE_Y; ++thread_y) {
#pragma unroll
for (int thread_x = 0; thread_x < THREAD_SIZE_X; ++thread_x) {
accum[thread_y][thread_x] += frag_a[j%2][thread_y] * frag_b[j%2][thread_x];
}
}
}
if(tile_idx < K){
#pragma unroll
for ( int i = 0 ; i < BLOCK_SIZE_M ; i += A_TILE_ROW_STRIDE) {
int ldg_index = i / A_TILE_ROW_STRIDE * 4;
As[write_stage_idx][A_TILE_COL][A_TILE_ROW_START + i]=ldg_a_reg[ldg_index];
As[write_stage_idx][A_TILE_COL+1][A_TILE_ROW_START + i]=ldg_a_reg[ldg_index+1];
As[write_stage_idx][A_TILE_COL+2][A_TILE_ROW_START + i]=ldg_a_reg[ldg_index+2];
As[write_stage_idx][A_TILE_COL+3][A_TILE_ROW_START + i]=ldg_a_reg[ldg_index+3];
}
// load B from global memory to shared memory
#pragma unroll
for ( int i = 0 ; i < BLOCK_SIZE_K; i += B_TILE_ROW_STRIDE) {
int ldg_index = i / B_TILE_ROW_STRIDE * 4;
FETCH_FLOAT4(Bs[write_stage_idx][B_TILE_ROW_START + i][B_TILE_COL]) = FETCH_FLOAT4(ldg_b_reg[ldg_index]);
}
// use double buffer, only need one sync
__syncthreads();
// switch
write_stage_idx ^= 1;
}
// load first tile from shared mem to register of next iter
// load A from shared memory to register
#pragma unroll
for (int thread_y = 0; thread_y < THREAD_SIZE_Y; thread_y += 4) {
FETCH_FLOAT4(frag_a[0][thread_y]) = FETCH_FLOAT4(As[load_stage_idx^1][0][THREAD_SIZE_Y * ty + thread_y]);
}
// load B from shared memory to register
#pragma unroll
for (int thread_x = 0; thread_x < THREAD_SIZE_X; thread_x += 4) {
FETCH_FLOAT4(frag_b[0][thread_x]) = FETCH_FLOAT4(Bs[load_stage_idx^1][0][THREAD_SIZE_X * tx + thread_x]);
}
//compute last tile mma THREAD_SIZE_X x THREAD_SIZE_Y
#pragma unroll
for (int thread_y = 0; thread_y < THREAD_SIZE_Y; ++thread_y) {
#pragma unroll
for (int thread_x = 0; thread_x < THREAD_SIZE_X; ++thread_x) {
accum[thread_y][thread_x] += frag_a[1][thread_y] * frag_b[1][thread_x];
}
}
}while(tile_idx< K);
// store back to C
#pragma unroll
for (int thread_y = 0; thread_y < THREAD_SIZE_Y; ++thread_y) {
#pragma unroll
for (int thread_x = 0; thread_x < THREAD_SIZE_X; thread_x+=4) {
FETCH_FLOAT4(C[OFFSET(
BLOCK_SIZE_M * by + ty * THREAD_SIZE_Y + thread_y,
BLOCK_SIZE_N * bx + tx * THREAD_SIZE_X + thread_x,
N)]) = FETCH_FLOAT4(accum[thread_y][thread_x]);
}
}
}
int main(int argc, char** argv) {
if (argc != 4) {
printf("usage: ./main [M] [K] [N]\n");
exit(0);
}
size_t M = atoi(argv[1]);
size_t K = atoi(argv[2]);
size_t N = atoi(argv[3]);
assert( M%8 == 0);
assert( N%8 == 0);
assert( K%8 == 0);
size_t bytes_A = sizeof(float) * M * K;
size_t bytes_B = sizeof(float) * K * N;
size_t bytes_C = sizeof(float) * M * N;
float* h_A = (float*)malloc(bytes_A);
float* h_B = (float*)malloc(bytes_B);
float* h_C = (float*)malloc(bytes_C);
float* h_C1 = (float*)malloc(bytes_C);
float* d_A;
float* d_B;
float* d_C;
checkCudaErrors(cudaMalloc(&d_A, bytes_A));
checkCudaErrors(cudaMalloc(&d_B, bytes_B));
checkCudaErrors(cudaMalloc(&d_C, bytes_C));
double msecPerMatrixMul[2] = {0, 0};
double gigaFlops[2] = {0, 0};
double flopsPerMatrixMul = 2.0 * M * N * K;
const int BLOCK_SIZE_M = 128;
const int BLOCK_SIZE_K = 8;
const int BLOCK_SIZE_N = 128;
const int THREAD_SIZE_X = 8;
const int THREAD_SIZE_Y = 8;
const bool ENABLE_DOUBLE_BUFFER = false;
// generate A
for( int i = 0; i < M * K; i++ ){
h_A[i] = i / 13;
}
// generate B
for( int i = 0; i < K * N; i++ ) {
h_B[i] = i % 13;
}
checkCudaErrors(cudaMemcpy( d_A, h_A, bytes_A, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy( d_B, h_B, bytes_B, cudaMemcpyHostToDevice));
cudaEvent_t start, stop;
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&stop));
float msecTotal = 0;
int nIter = 1000;
checkCudaErrors(cudaMemcpy( d_C, h_C, bytes_C, cudaMemcpyHostToDevice));
checkCudaErrors(cudaEventRecord(start));
for (int run = 0 ; run < nIter; run ++ ) {
dim3 dimBlock(BLOCK_SIZE_N / THREAD_SIZE_X, BLOCK_SIZE_M / THREAD_SIZE_Y);
dim3 dimGrid(N / BLOCK_SIZE_N, M / BLOCK_SIZE_M);
Sgemm<BLOCK_SIZE_M, BLOCK_SIZE_K, BLOCK_SIZE_N, THREAD_SIZE_Y, THREAD_SIZE_X, ENABLE_DOUBLE_BUFFER>
<<< dimGrid, dimBlock >>>(d_A, d_B, d_C, M, N, K);
}
checkCudaErrors(cudaEventRecord(stop));
checkCudaErrors(cudaEventSynchronize(stop));
checkCudaErrors(cudaEventElapsedTime(&msecTotal, start, stop));
checkCudaErrors(cudaMemcpy( h_C, d_C, bytes_C, cudaMemcpyDeviceToHost));
msecPerMatrixMul[0] = msecTotal / nIter;
gigaFlops[0] = (flopsPerMatrixMul * 1.0e-9f) / (msecPerMatrixMul[0] / 1000.0f);
printf( "My gemm Performance= %.2f GFlop/s, Time= %.3f msec, Size= %.0f Ops,\n",
gigaFlops[0],
msecPerMatrixMul[0],
flopsPerMatrixMul);
// cublas
cublasHandle_t blas_handle;
cublasCreate(&blas_handle);
float alpha = 1.0;
float beta = 0;
checkCudaErrors(cudaMemcpy( d_C, h_C, bytes_C, cudaMemcpyHostToDevice));
checkCudaErrors(cudaEventRecord(start));
for (int run = 0 ; run < nIter; run ++ ) {
cublasSgemm (blas_handle, CUBLAS_OP_T, CUBLAS_OP_T,
M, N, K, &alpha,
d_A, K, d_B, N, &beta, d_C, N
);
}
checkCudaErrors(cudaEventRecord(stop));
checkCudaErrors(cudaEventSynchronize(stop));
checkCudaErrors(cudaEventElapsedTime(&msecTotal, start, stop));
checkCudaErrors(cudaMemcpy( h_C1, d_C, bytes_C, cudaMemcpyDeviceToHost));
msecPerMatrixMul[1] = msecTotal / nIter;
gigaFlops[1] = (flopsPerMatrixMul * 1.0e-9f) / (msecPerMatrixMul[1] / 1000.0f);
printf( "CuBlas Performance= %.2f GFlop/s, Time= %.3f msec, Size= %.0f Ops,\n",
gigaFlops[1],
msecPerMatrixMul[1],
flopsPerMatrixMul);
cublasDestroy(blas_handle);
double eps = 1.e-6; // machine zero
bool correct = true;
for (int i = 0; i < M * N; i++) {
int row = i / N;
int col = i % N;
double abs_err = fabs(h_C[i] - h_C1[col * M + row]);
double dot_length = M;
double abs_val = fabs(h_C[i]);
double rel_err = abs_err / abs_val / dot_length;
if (rel_err > eps) {
printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n",
i, h_C[i], h_C1[col * M + row], eps);
correct = false;
break;
}
}
printf("%s\n", correct ? "Result= PASS" : "Result= FAIL");
printf("ratio= %f\n", gigaFlops[0] / gigaFlops[1]);
// Free Memory
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
free(h_A);
free(h_B);
free(h_C);
free(h_C1);
}
|
reduce_v7_shuffle.cu | #include <bits/stdc++.h>
#include <cuda.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <time.h>
#include <sys/time.h>
#define THREAD_PER_BLOCK 256
#define WARP_SIZE 32
template <unsigned int blockSize>
__device__ __forceinline__ float warpReduceSum(float sum) {
if (blockSize >= 32)sum += __shfl_down_sync(0xffffffff, sum, 16); // 0-16, 1-17, 2-18, etc.
if (blockSize >= 16)sum += __shfl_down_sync(0xffffffff, sum, 8);// 0-8, 1-9, 2-10, etc.
if (blockSize >= 8)sum += __shfl_down_sync(0xffffffff, sum, 4);// 0-4, 1-5, 2-6, etc.
if (blockSize >= 4)sum += __shfl_down_sync(0xffffffff, sum, 2);// 0-2, 1-3, 4-6, 5-7, etc.
if (blockSize >= 2)sum += __shfl_down_sync(0xffffffff, sum, 1);// 0-1, 2-3, 4-5, etc.
return sum;
}
template <unsigned int blockSize, int NUM_PER_THREAD>
__global__ void reduce7(float *d_in,float *d_out, unsigned int n){
float sum = 0;
// each thread loads one element from global to shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * (blockSize * NUM_PER_THREAD) + threadIdx.x;
#pragma unroll
for(int iter=0; iter<NUM_PER_THREAD; iter++){
sum += d_in[i+iter*blockSize];
}
// Shared mem for partial sums (one per warp in the block)
static __shared__ float warpLevelSums[WARP_SIZE];
const int laneId = threadIdx.x % WARP_SIZE;
const int warpId = threadIdx.x / WARP_SIZE;
sum = warpReduceSum<blockSize>(sum);
if(laneId == 0 )warpLevelSums[warpId] = sum;
__syncthreads();
// read from shared memory only if that warp existed
sum = (threadIdx.x < blockDim.x / WARP_SIZE) ? warpLevelSums[laneId] : 0;
// Final reduce using first warp
if (warpId == 0) sum = warpReduceSum<blockSize/WARP_SIZE>(sum);
// write result for this block to global mem
if (tid == 0) d_out[blockIdx.x] = sum;
}
bool check(float *out,float *res,int n){
for(int i=0;i<n;i++){
if(out[i]!=res[i])
return false;
}
return true;
}
int main(){
const int N=32*1024*1024;
float *a=(float *)malloc(N*sizeof(float));
float *d_a;
cudaMalloc((void **)&d_a,N*sizeof(float));
const int block_num = 1024;
const int NUM_PER_BLOCK = N / block_num;
const int NUM_PER_THREAD = NUM_PER_BLOCK/THREAD_PER_BLOCK;
float *out=(float *)malloc(block_num*sizeof(float));
float *d_out;
cudaMalloc((void **)&d_out,block_num*sizeof(float));
float *res=(float *)malloc(block_num*sizeof(float));
for(int i=0;i<N;i++){
a[i]=i%456;
}
for(int i=0;i<block_num;i++){
float cur=0;
for(int j=0;j<NUM_PER_BLOCK;j++){
if(i * NUM_PER_BLOCK + j < N){
cur+=a[i * NUM_PER_BLOCK + j];
}
}
res[i]=cur;
}
cudaMemcpy(d_a,a,N*sizeof(float),cudaMemcpyHostToDevice);
dim3 Grid( block_num, 1);
dim3 Block( THREAD_PER_BLOCK, 1);
int iter = 2000;
for(int i=0; i<iter; i++){
reduce7<THREAD_PER_BLOCK, NUM_PER_THREAD><<<Grid,Block>>>(d_a, d_out, N);
}
cudaMemcpy(out,d_out,block_num*sizeof(float),cudaMemcpyDeviceToHost);
if(check(out,res,block_num))printf("the ans is right\n");
else{
printf("the ans is wrong\n");
for(int i=0;i<block_num;i++){
printf("%lf ",out[i]);
}
printf("\n");
}
cudaFree(d_a);
cudaFree(d_out);
}
|
Sgemv_v1.cu | #include <stdio.h>
#include <stdlib.h>
// CUDA runtime
#include <cuda_runtime.h>
#include <cublas_v2.h>
#include <math.h>
// cal offset from row col and ld , in row-major matrix, ld is the width of the matrix
#define OFFSET(row, col, ld) ((row) * (ld) + (col))
// transfer float4
#define FETCH_FLOAT4(pointer) (reinterpret_cast<float4*>(&(pointer))[0])
#define checkCudaErrors(func) \
{ \
cudaError_t e = (func); \
if(e != cudaSuccess) \
printf ("%s %d CUDA: %s\n", __FILE__, __LINE__, cudaGetErrorString(e)); \
}
template <unsigned int WarpSize>
__device__ __forceinline__ float warpReduceSum(float sum) {
if (WarpSize >= 32)sum += __shfl_down_sync(0xffffffff, sum, 16); // 0-16, 1-17, 2-18, etc.
if (WarpSize >= 16)sum += __shfl_down_sync(0xffffffff, sum, 8);// 0-8, 1-9, 2-10, etc.
if (WarpSize >= 8)sum += __shfl_down_sync(0xffffffff, sum, 4);// 0-4, 1-5, 2-6, etc.
if (WarpSize >= 4)sum += __shfl_down_sync(0xffffffff, sum, 2);// 0-2, 1-3, 4-6, 5-7, etc.
if (WarpSize >= 2)sum += __shfl_down_sync(0xffffffff, sum, 1);// 0-1, 2-3, 4-5, etc.
return sum;
}
// if N>= 128
__global__ void Sgemv_v1(
float * __restrict__ A,
float * __restrict__ x,
float * __restrict__ y,
const int M,
const int N) {
// Block index
int bx = blockIdx.x;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
const int warp_size=32;
int laneId= tx % warp_size;
int current_row = blockDim.y * bx + ty;
if(current_row < M){
float res=0;
int kIteration = (N/warp_size)/4;
if(kIteration==0) kIteration=1;
A = &A[current_row*N];
#pragma unroll
for(int i=0; i< kIteration; i++){
int current_col_vec = (i*warp_size + laneId);
float4 current_val= reinterpret_cast<float4 *>(A)[current_col_vec];
float4 current_x = reinterpret_cast<float4 *>(x)[current_col_vec];
res += current_val.x*current_x.x;
res += current_val.y*current_x.y;
res += current_val.z*current_x.z;
res += current_val.w*current_x.w;
}
res = warpReduceSum<warp_size>(res);
if(laneId==0) y[current_row]=res;
}
}
int main(int argc, char** argv) {
if (argc != 3) {
printf("usage: ./main [M] [N]\n");
exit(0);
}
size_t M = atoi(argv[1]);
size_t N = atoi(argv[2]);
size_t bytes_A = sizeof(float) * M * N;
size_t bytes_x = sizeof(float) * N;
size_t bytes_y = sizeof(float) * M;
float* h_A = (float*)malloc(bytes_A);
float* h_x = (float*)malloc(bytes_x);
float* h_y = (float*)malloc(bytes_y);
float* h_y1 = (float*)malloc(bytes_y);
float* d_A;
float* d_x;
float* d_y;
checkCudaErrors(cudaMalloc(&d_A, bytes_A));
checkCudaErrors(cudaMalloc(&d_x, bytes_x));
checkCudaErrors(cudaMalloc(&d_y, bytes_y));
// ็ๆA็ๆฐๆฎ
for( int i = 0; i < M * N; i++ ) {
h_A[i] = (float)i/N;
}
// ็ๆx็ๆฐๆฎ
for( int i = 0; i < N; i++ ) {
h_x[i] = 1;
}
memset(h_y,0,M*sizeof(float));
memset(h_y1,0,M*sizeof(float));
int nIter = 1000;
checkCudaErrors(cudaMemcpy( d_A, h_A, bytes_A, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy( d_x, h_x, bytes_x, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy( d_y, h_y, bytes_y, cudaMemcpyHostToDevice));
for (int run = 0 ; run < nIter; run ++ ) {
dim3 dimGrid(M/4);
dim3 dimBlock(32,4);
Sgemv_v1<<< dimGrid, dimBlock >>>(d_A, d_x, d_y, M, N);
}
checkCudaErrors(cudaMemcpy( h_y, d_y, bytes_y, cudaMemcpyDeviceToHost));
// cublas
cublasHandle_t blas_handle;
cublasCreate(&blas_handle);
float alpha = 1.0;
float beta = 0;
checkCudaErrors(cudaMemcpy( d_y, h_y1, bytes_y, cudaMemcpyHostToDevice));
for (int run = 0 ; run < nIter; run ++ ) {
cublasSgemv (blas_handle, CUBLAS_OP_T,
N, M, &alpha,
d_A, N, d_x, 1, &beta, d_y, 1
);
}
checkCudaErrors(cudaMemcpy( h_y1, d_y, bytes_y, cudaMemcpyDeviceToHost));
cublasDestroy(blas_handle);
double eps = 1.e-6; // machine zero
bool correct = true;
for (int i = 0; i < M; i++) {
double abs_err = fabs(h_y[i] - h_y1[i]);
double dot_length = M;
double abs_val = fabs(h_y[i]);
double rel_err = abs_err / abs_val / dot_length;
if (rel_err > eps) {
printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n",
i, h_y[i], h_y1[i], eps);
correct = false;
break;
}
}
printf("%s\n", correct ? "Result= PASS" : "Result= FAIL");
// Free Memory
cudaFree(d_A);
cudaFree(d_x);
cudaFree(d_y);
free(h_A);
free(h_x);
free(h_y);
free(h_y1);
}
|
reduce_v0_baseline.cu | #include <bits/stdc++.h>
#include <cuda.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <time.h>
#include <sys/time.h>
#define THREAD_PER_BLOCK 256
__global__ void reduce0(float *d_in,float *d_out){
__shared__ float sdata[THREAD_PER_BLOCK];
// each thread loads one element from global to shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
sdata[tid] = d_in[i];
__syncthreads();
// do reduction in shared mem
for(unsigned int s=1; s < blockDim.x; s *= 2) {
if (tid % (2*s) == 0) {
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) d_out[blockIdx.x] = sdata[0];
}
bool check(float *out,float *res,int n){
for(int i=0;i<n;i++){
if(out[i]!=res[i])
return false;
}
return true;
}
int main(){
const int N=32*1024*1024;
float *a=(float *)malloc(N*sizeof(float));
float *d_a;
cudaMalloc((void **)&d_a,N*sizeof(float));
int block_num=N/THREAD_PER_BLOCK;
float *out=(float *)malloc((N/THREAD_PER_BLOCK)*sizeof(float));
float *d_out;
cudaMalloc((void **)&d_out,(N/THREAD_PER_BLOCK)*sizeof(float));
float *res=(float *)malloc((N/THREAD_PER_BLOCK)*sizeof(float));
for(int i=0;i<N;i++){
a[i]=1;
}
for(int i=0;i<block_num;i++){
float cur=0;
for(int j=0;j<THREAD_PER_BLOCK;j++){
cur+=a[i*THREAD_PER_BLOCK+j];
}
res[i]=cur;
}
cudaMemcpy(d_a,a,N*sizeof(float),cudaMemcpyHostToDevice);
dim3 Grid( N/THREAD_PER_BLOCK,1);
dim3 Block( THREAD_PER_BLOCK,1);
reduce0<<<Grid,Block>>>(d_a,d_out);
cudaMemcpy(out,d_out,block_num*sizeof(float),cudaMemcpyDeviceToHost);
if(check(out,res,block_num))printf("the ans is right\n");
else{
printf("the ans is wrong\n");
for(int i=0;i<block_num;i++){
printf("%lf ",out[i]);
}
printf("\n");
}
cudaFree(d_a);
cudaFree(d_out);
}
|
Sgemv_v0.cu | #include <stdio.h>
#include <stdlib.h>
// CUDA runtime
#include <cuda_runtime.h>
#include <cublas_v2.h>
#include <math.h>
// cal offset from row col and ld , in row-major matrix, ld is the width of the matrix
#define OFFSET(row, col, ld) ((row) * (ld) + (col))
// transfer float4
#define FETCH_FLOAT4(pointer) (reinterpret_cast<float4*>(&(pointer))[0])
#define checkCudaErrors(func) \
{ \
cudaError_t e = (func); \
if(e != cudaSuccess) \
printf ("%s %d CUDA: %s\n", __FILE__, __LINE__, cudaGetErrorString(e)); \
}
template <unsigned int WarpSize>
__device__ __forceinline__ float warpReduceSum(float sum) {
if (WarpSize >= 32)sum += __shfl_down_sync(0xffffffff, sum, 16); // 0-16, 1-17, 2-18, etc.
if (WarpSize >= 16)sum += __shfl_down_sync(0xffffffff, sum, 8);// 0-8, 1-9, 2-10, etc.
if (WarpSize >= 8)sum += __shfl_down_sync(0xffffffff, sum, 4);// 0-4, 1-5, 2-6, etc.
if (WarpSize >= 4)sum += __shfl_down_sync(0xffffffff, sum, 2);// 0-2, 1-3, 4-6, 5-7, etc.
if (WarpSize >= 2)sum += __shfl_down_sync(0xffffffff, sum, 1);// 0-1, 2-3, 4-5, etc.
return sum;
}
// if N == 32
__global__ void Sgemv_v0(
float * __restrict__ A,
float * __restrict__ x,
float * __restrict__ y,
const int M,
const int N) {
// Block index
int bx = blockIdx.x;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
const int warp_size=32;
int laneId= tx % warp_size;
int current_row = blockDim.y * bx + ty;
if(current_row < M){
float res=0;
int kIteration = N/warp_size;
if(kIteration==0) kIteration=1;
#pragma unroll
for(int i=0; i< kIteration; i++){
int current_col = i*warp_size + laneId;
res += A[current_row*N + current_col] * x[current_col];
}
res = warpReduceSum<warp_size>(res);
if(laneId==0) y[current_row]=res;
}
}
int main(int argc, char** argv) {
if (argc != 3) {
printf("usage: ./main [M] [N]\n");
exit(0);
}
size_t M = atoi(argv[1]);
size_t N = atoi(argv[2]);
size_t bytes_A = sizeof(float) * M * N;
size_t bytes_x = sizeof(float) * N;
size_t bytes_y = sizeof(float) * M;
float* h_A = (float*)malloc(bytes_A);
float* h_x = (float*)malloc(bytes_x);
float* h_y = (float*)malloc(bytes_y);
float* h_y1 = (float*)malloc(bytes_y);
float* d_A;
float* d_x;
float* d_y;
checkCudaErrors(cudaMalloc(&d_A, bytes_A));
checkCudaErrors(cudaMalloc(&d_x, bytes_x));
checkCudaErrors(cudaMalloc(&d_y, bytes_y));
// ็ๆA็ๆฐๆฎ
for( int i = 0; i < M * N; i++ ) {
h_A[i] = (float)i/N;
}
// ็ๆx็ๆฐๆฎ
for( int i = 0; i < N; i++ ) {
h_x[i] = 1;
}
memset(h_y,0,M*sizeof(float));
memset(h_y1,0,M*sizeof(float));
int nIter = 1000;
checkCudaErrors(cudaMemcpy( d_A, h_A, bytes_A, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy( d_x, h_x, bytes_x, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy( d_y, h_y, bytes_y, cudaMemcpyHostToDevice));
for (int run = 0 ; run < nIter; run ++ ) {
dim3 dimGrid(M/4);
dim3 dimBlock(32,4);
Sgemv_v0<<< dimGrid, dimBlock >>>(d_A, d_x, d_y, M, N);
}
checkCudaErrors(cudaMemcpy( h_y, d_y, bytes_y, cudaMemcpyDeviceToHost));
// cublas
cublasHandle_t blas_handle;
cublasCreate(&blas_handle);
float alpha = 1.0;
float beta = 0;
checkCudaErrors(cudaMemcpy( d_y, h_y1, bytes_y, cudaMemcpyHostToDevice));
for (int run = 0 ; run < nIter; run ++ ) {
cublasSgemv (blas_handle, CUBLAS_OP_T,
N, M, &alpha,
d_A, N, d_x, 1, &beta, d_y, 1
);
}
checkCudaErrors(cudaMemcpy( h_y1, d_y, bytes_y, cudaMemcpyDeviceToHost));
cublasDestroy(blas_handle);
double eps = 1.e-6; // machine zero
bool correct = true;
for (int i = 0; i < M; i++) {
double abs_err = fabs(h_y[i] - h_y1[i]);
double dot_length = M;
double abs_val = fabs(h_y[i]);
double rel_err = abs_err / abs_val / dot_length;
if (rel_err > eps) {
printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n",
i, h_y[i], h_y1[i], eps);
correct = false;
break;
}
}
printf("%s\n", correct ? "Result= PASS" : "Result= FAIL");
// Free Memory
cudaFree(d_A);
cudaFree(d_x);
cudaFree(d_y);
free(h_A);
free(h_x);
free(h_y);
free(h_y1);
}
|
noop_jacobi_27pt.cu | //--------------------------------------------------------------------------------------------
// Author: Adam Barker email: [email protected]
//
// File: noop_jacobi_27pt.cu date: June 24, 2014
//
// This program performs a simple averaging of node values using a 27-point 3D Jacobi stencil.
// This program also incorporates the use of shared memory to speed up memory accesses and
// staggers reads of the halo regions so that race conditions do not exist among threads.
//
// This program contains no advanced optimizations.
//--------------------------------------------------------------------------------------------
#include <stdio.h>
#include <output.c>
#define CURRENT ix + iy*N + iz*N*N
#define MIDDLE N/2 + N*(N/2) + N*N*(N/2)
#define Z 4
#define Y 8
#define X 8
__global__ void kernel(float * d_data, const int N)
{
// Global data coordinate variables
int ix = threadIdx.x + blockIdx.x * blockDim.x;
int iy = threadIdx.y + blockIdx.y * blockDim.y;
int iz = threadIdx.z + blockIdx.z * blockDim.z;
// Local data coordinate variables
int tx = threadIdx.x + 1;
int ty = threadIdx.y + 1;
int tz = threadIdx.z + 1;
// Shared memory allocation
__shared__ float s_data[Z+2][Y+2][X+2];
// Current node
float curr;
// Set 1: nodes 1 unit away [6]
float right;
float left;
float up;
float down;
float front;
float back;
// Set 2: nodes sqrt(2) units away (midpoints) [12]
float right_front; float right_up; float front_up;
float right_back; float right_down; float front_down;
float left_front; float left_up; float back_up;
float left_back; float left_down; float back_down;
// Set 3: nodes sqrt(3) units away (corners) [8]
float right_front_up; float left_front_up;
float right_front_down; float left_front_down;
float right_back_up; float left_back_up;
float right_back_down; float left_back_down;
// Array to hold coefficients to multiply certain node groups by [close, med, far]
float coef = (1.0f/27.0f);
// Get current node's value, place into local and shared memory
curr = d_data[CURRENT];
s_data[tz][ty][tx] = curr;
__syncthreads();
// Don't try to perform calculations on edges of data
if(ix == 0 || iy == 0 || ix == N-1 || iy == N-1 || iz == 0 || iz == N-1) return;
// Keep input static so that values go up over time. (e.g. constant heat)
if(ix + iy*N + iz*N*N == MIDDLE) return;
// Get halo regions and place them into shared memory
// Upper midpoints + corners
if(ty == 1 && iy > 0) {
if(tx == 1 && ix > 0) {
if(tz == 1 && iz > 0) s_data[tz-1][ty-1][tx-1] = d_data[CURRENT - 1 - N - N*N];
if(tz == Z && iz < N-1) s_data[tz+1][ty-1][tx-1] = d_data[CURRENT - 1 - N + N*N];
s_data[tz][ty-1][tx-1] = d_data[CURRENT - 1 - N];
}
if(tx == X && ix < N-1) {
if(tz == 1 && iz > 0) s_data[tz-1][ty-1][tx+1] = d_data[CURRENT + 1 - N - N*N];
if(tz == Z && iz < N-1) s_data[tz+1][ty-1][tx+1] = d_data[CURRENT + 1 - N + N*N];
s_data[tz][ty-1][tx+1] = d_data[CURRENT + 1 - N];
}
if(tz == 1 && iz > 0) s_data[tz-1][ty-1][tx] = d_data[CURRENT - N - N*N];
if(tz == Z && iz < N-1) s_data[tz+1][ty-1][tx] = d_data[CURRENT - N + N*N];
s_data[tz][ty-1][tx] = d_data[CURRENT - N];
}
__syncthreads();
// Lower midpoints + corners
if(ty == Y && iy < N-1) {
if(tx == 1 && ix > 0) {
if(tz == 1 && iz > 0) s_data[tz-1][ty+1][tx-1] = d_data[CURRENT - 1 + N - N*N];
if(tz == Z && iz < N-1) s_data[tz+1][ty+1][tx-1] = d_data[CURRENT - 1 + N + N*N];
s_data[tz][ty+1][tx-1] = d_data[CURRENT - 1 - N];
}
if(tx == X && ix < N-1) {
if(tz == 1 && iz > 0) s_data[tz-1][ty+1][tx+1] = d_data[CURRENT + 1 + N - N*N];
if(tz == Z && iz < N-1) s_data[tz+1][ty+1][tx+1] = d_data[CURRENT + 1 + N + N*N];
s_data[tz][ty+1][tx+1] = d_data[CURRENT + 1 - N];
}
if(tz == 1 && iz > 0) s_data[tz-1][ty+1][tx] = d_data[CURRENT + N - N*N];
if(tz == Z && iz < N-1) s_data[tz+1][ty+1][tx] = d_data[CURRENT + N + N*N];
s_data[tz][ty+1][tx] = d_data[CURRENT + N];
}
__syncthreads();
// Side midpoints
if(tx == 1 && ix > 0) {
if(tz == 1 && iz > 0) s_data[tz-1][ty][tx-1] = d_data[CURRENT - 1 - N*N];
if(tz == Z && iz < N-1) s_data[tz+1][ty][tx-1] = d_data[CURRENT - 1 + N*N];
s_data[tz][ty][tx-1] = d_data[CURRENT - 1];
}
if(tx == X && ix < N-1) {
if(tz == 1 && iz > 0) s_data[tz-1][ty][tx+1] = d_data[CURRENT + 1 - N*N];
if(tz == Z && iz < N+1) s_data[tz+1][ty][tx+1] = d_data[CURRENT + 1 + N*N];
s_data[tz][ty][tx+1] = d_data[CURRENT + 1];
}
__syncthreads();
// Front and back halos
if(tz == 1 && iz > 0) s_data[tz-1][ty][tx] = d_data[CURRENT - N*N];
if(tz == Z && iz < N-1) s_data[tz+1][ty][tx] = d_data[CURRENT + N*N];
__syncthreads();
// Place node values into local variables
// Local nodes (1 unit away)
right = s_data[tz][ty][tx+1]; __syncthreads();
left = s_data[tz][ty][tx-1]; __syncthreads();
up = s_data[tz][ty+1][tx]; __syncthreads();
down = s_data[tz][ty-1][tx]; __syncthreads();
front = s_data[tz+1][ty][tx]; __syncthreads();
back = s_data[tz-1][ty][tx]; __syncthreads();
// Midpoints (sqrt(2) units away)
right_front = s_data[tz+1][ty][tx+1]; __syncthreads();
right_back = s_data[tz-1][ty][tx+1]; __syncthreads();
right_up = s_data[tz][ty+1][tx+1]; __syncthreads();
right_down = s_data[tz][ty-1][tx+1]; __syncthreads();
left_front = s_data[tz+1][ty][tx-1]; __syncthreads();
left_back = s_data[tz-1][ty][tx-1]; __syncthreads();
left_up = s_data[tz][ty+1][tx-1]; __syncthreads();
left_down = s_data[tz][ty-1][tx-1]; __syncthreads();
front_up = s_data[tz+1][ty+1][tx]; __syncthreads();
front_down = s_data[tz+1][ty-1][tx]; __syncthreads();
back_up = s_data[tz-1][ty+1][tx]; __syncthreads();
back_down = s_data[tz-1][ty+1][tx]; __syncthreads();
// Corners (sqrt(3) units away)
right_front_up = s_data[tz+1][ty+1][tx+1]; __syncthreads();
right_front_down = s_data[tz+1][ty-1][tx+1]; __syncthreads();
right_back_up = s_data[tz-1][ty+1][tx+1]; __syncthreads();
right_back_down = s_data[tz-1][ty-1][tx+1]; __syncthreads();
left_front_up = s_data[tz+1][ty+1][tx-1]; __syncthreads();
left_front_down = s_data[tz+1][ty-1][tx-1]; __syncthreads();
left_back_up = s_data[tz-1][ty+1][tx-1]; __syncthreads();
left_back_down = s_data[tz-1][ty-1][tx-1]; __syncthreads();
// Compute output and place into curr and write to smem
curr += (right + left + up + down + front + back);
curr += (right_front + right_back + right_up + right_down +
left_front + left_back + left_up + left_down +
front_up + front_down + back_up + back_down);
curr += (right_front_up + right_front_down + right_back_up + right_back_down +
left_front_up + left_front_down + left_back_up + left_back_down);
curr *= coef;
s_data[tz][ty][tx] = curr;
__syncthreads();
//Write data back to global mem
d_data[CURRENT] = curr;
__syncthreads();
}
int main(int argc, char* *argv)
{
if(argc != 3) {printf("USAGE: %s <size> <steps>\n", argv[0]); return 10;}
const int N = atoi(argv[1]); // Data dimensions (N * N * N);
const int STEPS = atoi(argv[2]); // Number of iterations to perform.
// constants to hold grid and threadblock dimensions
const dim3 blocks ( N/X, N/Y, N/Z );
const dim3 threads( X, Y, Z );
// constant to hold size of data in bytes
const int ARRAY_BYTES = N * N * N * sizeof(float);
// arrays to hold the data to perform compuation on.
float * h_data;
float * d_data;
h_data = (float*)malloc(ARRAY_BYTES);
cudaMalloc(&d_data, ARRAY_BYTES);
// Initialize array
for(int k=0; k < N; k++) {
for(int j=0; j<N; j++) {
for(int i=0; i<N; i++) {
h_data[i + j*N + k*N*N] = 5.0f;
}
}
}
// Place a differing value into middle of array that will spread.
h_data[MIDDLE] = 10.0f;
// Copy data to the device.
cudaMemcpy(d_data, h_data, ARRAY_BYTES, cudaMemcpyHostToDevice);
for(int i=0; i<STEPS; i++) kernel<<<blocks, threads>>>(d_data, N);
// Copy data back from device to the host.
cudaMemcpy(h_data, d_data, ARRAY_BYTES, cudaMemcpyDeviceToHost);
// Output the data into out.image
output("out.image", h_data, N, N, N);
free(h_data);
cudaFree(d_data);
return 0;
}
|
noop_jacobi_7pt.cu | //--------------------------------------------------------------------------------------------
// Author: Adam Barker email: [email protected]
//
// File: noop_jacobi_7pt.cu date: July 1, 2014
//
// This program performs a simple averaging of node values using a 7-point 3D Jacobi stencil.
// This program also incorporates the use of shared memory to speed up memory accesses and
// staggers reads of the halo regions so that race conditions do not exist among threads.
//
// This program contains no advanced optimizations.
//--------------------------------------------------------------------------------------------
#include <stdio.h>
#include <output.c>
#define INIT 5.0f
#define START 10.0f
///////////////////////////////////////////////////////////////
/// This function is the actual stencil kernel that ///
/// performs an averaging 7 point stencil on the input data ///
///////////////////////////////////////////////////////////////
__global__
void stencil(float * d_data, const int dx, const int dy, const int dz, // global data dimensions
const int bx, const int by, const int bz ) // shared data dimensions
{
// Global data coordinates
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
const int iy = threadIdx.y + blockIdx.y * blockDim.y;
const int iz = threadIdx.z + blockIdx.z * blockDim.z;
// Local data coordinates with halo radius added
const int tx = threadIdx.x + 1;
const int ty = threadIdx.y + 1;
const int tz = threadIdx.z + 1;
// global and shared memory location constants
const int CURRENT_G = ix + iy*dx + iz*dx*dy;
const int CURRENT_S = tx + ty*bx + tz*bx*by;
// Dynamic shared memory declaration
extern __shared__ float s_data[];
// local node variable declarations
float curr; // Current node
float right; // node right of current node
float left; // node left of current node
float up; // node above current node
float down; // node below current node
float front; // node in front of current node
float back; // node behind current node
// number to multiply nodes by (average)
const float coef = 1.0f/7.0f;
curr = d_data[CURRENT_G]; // fetch current node value from global memory
s_data[CURRENT_S] = curr; // place into shared memory
__syncthreads();
// Don't perform calculations on edge nodes or the middle node
if( (ix == 0 || ix == dx-1 || iy == 0 || iy == dy-1 || iz == 0 || iz == dz-1)
|| (CURRENT_G == dx/2 + dx*(dy/2) + dx*dy*(dz/2)) ) return;
/*******************************
* Load halo regions into smem *
*******************************/
// halo region to the left and right of this block
if(tx == 1) s_data[CURRENT_S - 1] = d_data[CURRENT_G - 1];
if(tx == bx-2) s_data[CURRENT_S + 1] = d_data[CURRENT_G + 1];
__syncthreads();
// halo region above and below this block
if(ty == 1) s_data[CURRENT_S - bx] = d_data[CURRENT_G - dx];
if(ty == by-2) s_data[CURRENT_S + bx] = d_data[CURRENT_G + dx];
__syncthreads();
// halo region behind and in front of this block
if(tz == 1) s_data[CURRENT_S - bx*by] = d_data[CURRENT_G - dx*dy];
if(tz == bz-2) s_data[CURRENT_S + bx*by] = d_data[CURRENT_G + dx*dy];
__syncthreads();
/**********************************
* retrieve node values from smem *
**********************************/
right = s_data[CURRENT_S + 1]; __syncthreads();
left = s_data[CURRENT_S - 1]; __syncthreads();
up = s_data[CURRENT_S - bx]; __syncthreads();
down = s_data[CURRENT_S + bx]; __syncthreads();
front = s_data[CURRENT_S + bx*by]; __syncthreads();
back = s_data[CURRENT_S - bx*by]; __syncthreads();
/**********************
* Perform compuation *
**********************/
curr = coef * (coef + right + left + up + down + front + back);
__syncthreads();
// Write result back to global memory
d_data[CURRENT_G] = curr;
__syncthreads();
}
///////////////////////////////////////////////////////////////
/// This function initializes an array with set values in a ///
/// parallel fashion for speed up over CPU initialization ///
///////////////////////////////////////////////////////////////
__global__
void initialize(float * d_data, const int dx, const int dy, const int dz)
{
// Global coordinates
int ix = threadIdx.x + blockIdx.x * blockDim.x;
int iy = threadIdx.y + blockIdx.y * blockDim.y;
int iz = threadIdx.z + blockIdx.z * blockDim.z;
// Write location
int CURRENT_G = ix + iy*dx + iz*dx*dy;
// place initial value into data
d_data[CURRENT_G] = INIT;
// if at the middle of the array, write starting value
if(CURRENT_G = dx/2 + dx*(dy/2) + dx*dy*(dz/2))
d_data[CURRENT_G] = START;
}
///////////////////////////////////////////////////////////////
/// This is the main function which handles argument ///
/// parsing and kernel launching. ///
///////////////////////////////////////////////////////////////
int main(int argc, char* *argv)
{
if(argc != 8) {printf("USAGE: %s <bx> <by> <bz> <tx> <ty> <tz> <steps>\n", argv[0]); return 10;}
// set constants from command line arguments
const int bx = atoi(argv[1]);
const int by = atoi(argv[2]);
const int bz = atoi(argv[3]);
const int tx = atoi(argv[4]) + 2;
const int ty = atoi(argv[5]) + 2;
const int tz = atoi(argv[6]) + 2;
const int STEPS = atoi(argv[7]);
const int dx = bx*(tx-2);
const int dy = by*(ty-2);
const int dz = bz*(tz-2);
// number of blocks and threads per block for kernel execution
const dim3 blocks (bx, by, bz);
const dim3 threads(tx-2, ty-2, tz-2);
// Array size and shared mem size declarations
const int ARRAY_BYTES = dx * dy * dz * sizeof(float);
const int SHARED_BYTES = tx * ty * tz * sizeof(float);
// Host and device array declarations & allocations
float * h_data;
float * d_data;
printf("DATA DIMENSIONS: %d x %d x %d\n", dx, dy, dz);
h_data = (float*)malloc(ARRAY_BYTES);
cudaMalloc(&d_data, ARRAY_BYTES);
initialize<<<blocks, threads>>>(d_data, dx, dy, dz);
for(int step=0; step < STEPS; step++)
stencil<<<blocks, threads, SHARED_BYTES>>>(d_data, dx, dy, dz, tx, ty, tz);
cudaMemcpy(h_data, d_data, ARRAY_BYTES, cudaMemcpyDeviceToHost);
output("out.image", h_data, dx, dy, dz);
free(h_data);
cudaFree(d_data);
cudaDeviceReset();
return 0;
}
|
noop_jacobi_5pt.cu | //--------------------------------------------------------------------------------------------
// Author: Adam Barker email: [email protected]
//
// File: noop_jacobi_5pt.cu date: June 23, 2014
//
// This program performs a simple averaging of node values using a 5-point 2D Jacobi stencil.
// This program also incorporates the use of shared memory to speed up memory accesses and
// staggers reads of the halo regions so that race conditions do not exist among threads.
//
// This program contains no advanced optimizations.
//--------------------------------------------------------------------------------------------
#include <stdio.h>
#include <output.c>
#define K 16 // Thread block dimension (K * K)
#define MIDDLE N/2 + N*(N/2)
__global__ void kernel(float * d_data, const int N)
{
// get global x and y values
int ix = threadIdx.x + blockIdx.x * blockDim.x;
int iy = threadIdx.y + blockIdx.y * blockDim.y;
// Get local x and y values in the block and offset by halo radius.
int tx = threadIdx.x + 1;
int ty = threadIdx.y + 1;
// Place local nodes into local variables (registers)
float curr; // current node
float right; // node right of the current node
float left; // node left of the current node
float up; // node above the current node
float down; // node below the current node
// value to multiply node values by when performing stencil computation (average).
float coef = 0.2f;
// Allocate shared memory to include halo region.
__shared__ float s_data[K+2][K+2];
// get current node from global memory and place into local variable.
curr = d_data[ix + iy*N];
// place current node into shared memory so other threads can access it.
s_data[ty][tx] = curr;
__syncthreads();
// Place left halo into shared memory
if(tx == 1) {
s_data[ty][tx-1] = d_data[(ix-1) + iy*N];
}
__syncthreads();
// Place lower halo into shared memory
if(ty == 1) {
s_data[ty-1][tx] = d_data[ix + (iy-1)*N];
}
__syncthreads();
// Place right halo into shared memory
if(tx == K) {
s_data[ty][tx+1] = d_data[(ix+1) + iy*N];
}
__syncthreads();
// Place upper halo into shared memory
if(ty == K) {
s_data[ty+1][tx] = d_data[ix + (iy+1)*N];
}
__syncthreads();
// Retrieve local nodes from shared memory and place into local variables.
right = s_data[ty][tx+1];
left = s_data[ty][tx-1];
up = s_data[ty+1][tx];
down = s_data[ty-1][tx];
__syncthreads();
// Don't try to perform calculations on edges of data
if(ix == 0 || iy == 0 || ix == N-1 || iy == N-1) return;
// Keep input static so that values go up over time. (e.g. constant heat)
if(ix + iy*N == MIDDLE) return;
// Compute output value
curr = coef * (curr + right + left + up + down);
__syncthreads();
// update shared and global memory with new values.
s_data[ty][tx] = curr;
__syncthreads();
d_data[ix + iy*N] = s_data[ty][tx];
__syncthreads();
}
int main(int argc, char* *argv)
{
// Make sure enough inputs were given by the user.
if(argc != 3) { printf("USAGE: %s <block> <steps>\n", argv[0]); return 10; }
const int N = atoi(argv[1]);
const int STEPS = atoi(argv[2]);
// Set the number of blocks in the grid and the number of threads per block.
const dim3 blocks (N/K, N/K);
const dim3 threads(K, K);
const int ARRAY_BYTES = N * N * sizeof(float);
// These variables are the host array and the device array for doing computations on.
float * h_data;
float * d_data;
// Allocate data for the arrays
h_data = (float*)malloc(ARRAY_BYTES);
cudaMalloc(&d_data, ARRAY_BYTES);
// Place initial values into h_data
for(int j=0; j<N; j++) {
for(int i=0; i<N; i++) {
h_data[i + j*N] = 5.0f;
}
}
// place an initial higher value into the middle of the array.
h_data[MIDDLE] = 10.0f;
// Copy h_data onto the device's array d_data
cudaMemcpy(d_data, h_data, ARRAY_BYTES, cudaMemcpyHostToDevice);
// Start kernel and iterate for the given number of steps.
for(int i=0; i<STEPS; i++) kernel<<<blocks, threads>>>(d_data, N);
// Copy d_data back into the host's array h_data
cudaMemcpy(h_data, d_data, ARRAY_BYTES, cudaMemcpyDeviceToHost);
// Output h_data
output("out.image", h_data, N, N, 1);
free(h_data);
cudaFree(d_data);
return 0;
}
|
No dataset card yet
- Downloads last month
- 2