gpu_common.h File Reference
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
#include <iostream>

Go to the source code of this file.

Classes

struct  compile_time_device_info
 

Functions

template<typename Kernel , typename Input , typename Output >
void run_on_cpu (const Kernel &ker, int n, const Input &in, Output &out)
 
template<typename Kernel , typename Input , typename Output >
__global__ EIGEN_HIP_LAUNCH_BOUNDS_1024 void run_on_gpu_meta_kernel (const Kernel ker, int n, const Input *in, Output *out)
 
template<typename Kernel , typename Input , typename Output >
void run_on_gpu (const Kernel &ker, int n, const Input &in, Output &out)
 
template<typename Kernel , typename Input , typename Output >
void run_and_compare_to_gpu (const Kernel &ker, int n, const Input &in, Output &out)
 
void ei_test_init_gpu ()
 

Variables

dim3 threadIdx
 
dim3 blockDim
 
dim3 blockIdx
 

Function Documentation

◆ ei_test_init_gpu()

void ei_test_init_gpu ( )
118  {
119  int device = 0;
120  gpuDeviceProp_t deviceProp;
121  gpuGetDeviceProperties(&deviceProp, device);
122 
123  ArrayXi dummy(1), info(10);
124  info = -1;
126 
127  std::cout << "GPU compile-time info:\n";
128 
129 #ifdef EIGEN_CUDACC
130  std::cout << " EIGEN_CUDACC: " << int(EIGEN_CUDACC) << "\n";
131 #endif
132 
133 #ifdef EIGEN_CUDA_SDK_VER
134  std::cout << " EIGEN_CUDA_SDK_VER: " << int(EIGEN_CUDA_SDK_VER) << "\n";
135 #endif
136 
137 #if EIGEN_COMP_NVCC
138  std::cout << " EIGEN_COMP_NVCC: " << int(EIGEN_COMP_NVCC) << "\n";
139 #endif
140 
141 #ifdef EIGEN_HIPCC
142  std::cout << " EIGEN_HIPCC: " << int(EIGEN_HIPCC) << "\n";
143 #endif
144 
145  std::cout << " EIGEN_CUDA_ARCH: " << info[0] << "\n";
146  std::cout << " EIGEN_HIP_DEVICE_COMPILE: " << info[1] << "\n";
147 
148  std::cout << "GPU device info:\n";
149  std::cout << " name: " << deviceProp.name << "\n";
150  std::cout << " capability: " << deviceProp.major << "." << deviceProp.minor << "\n";
151  std::cout << " multiProcessorCount: " << deviceProp.multiProcessorCount << "\n";
152  std::cout << " maxThreadsPerMultiProcessor: " << deviceProp.maxThreadsPerMultiProcessor << "\n";
153  std::cout << " warpSize: " << deviceProp.warpSize << "\n";
154  std::cout << " regsPerBlock: " << deviceProp.regsPerBlock << "\n";
155  std::cout << " concurrentKernels: " << deviceProp.concurrentKernels << "\n";
156  std::cout << " clockRate: " << deviceProp.clockRate << "\n";
157  std::cout << " canMapHostMemory: " << deviceProp.canMapHostMemory << "\n";
158  std::cout << " computeMode: " << deviceProp.computeMode << "\n";
159 }
#define EIGEN_CUDA_SDK_VER
Definition: Macros.h:542
#define EIGEN_COMP_NVCC
Definition: Macros.h:143
void run_on_gpu(const Kernel &ker, int n, const Input &in, Output &out)
Definition: gpu_common.h:34
return int(ret)+1
int info
Definition: level2_cplx_impl.h:39
Definition: gpu_common.h:104

References EIGEN_COMP_NVCC, EIGEN_CUDA_SDK_VER, info, int(), and run_on_gpu().

◆ run_and_compare_to_gpu()

template<typename Kernel , typename Input , typename Output >
void run_and_compare_to_gpu ( const Kernel ker,
int  n,
const Input &  in,
Output &  out 
)
86  {
87  Input in_ref, in_gpu;
88  Output out_ref, out_gpu;
89 #if !defined(EIGEN_GPU_COMPILE_PHASE)
90  in_ref = in_gpu = in;
91  out_ref = out_gpu = out;
92 #else
95 #endif
96  run_on_cpu(ker, n, in_ref, out_ref);
97  run_on_gpu(ker, n, in_gpu, out_gpu);
98 #if !defined(EIGEN_GPU_COMPILE_PHASE)
99  VERIFY_IS_APPROX(in_ref, in_gpu);
100  VERIFY_IS_APPROX(out_ref, out_gpu);
101 #endif
102 }
const unsigned n
Definition: CG3DPackingUnitTest.cpp:11
#define EIGEN_UNUSED_VARIABLE(var)
Definition: Macros.h:966
void run_on_cpu(const Kernel &ker, int n, const Input &in, Output &out)
Definition: gpu_common.h:20
#define VERIFY_IS_APPROX(a, b)
Definition: integer_types.cpp:13
std::ofstream out("Result.txt")

References EIGEN_UNUSED_VARIABLE, n, out(), run_on_cpu(), run_on_gpu(), and VERIFY_IS_APPROX.

◆ run_on_cpu()

template<typename Kernel , typename Input , typename Output >
void run_on_cpu ( const Kernel ker,
int  n,
const Input &  in,
Output &  out 
)
20  {
21  for (int i = 0; i < n; i++) ker(i, in.data(), out.data());
22 }
int i
Definition: BiCGSTAB_step_by_step.cpp:9

References i, n, and out().

Referenced by Eigen::run(), run_and_compare_to_gpu(), and Eigen::run_with_hint().

◆ run_on_gpu()

template<typename Kernel , typename Input , typename Output >
void run_on_gpu ( const Kernel ker,
int  n,
const Input &  in,
Output &  out 
)
34  {
35  typename Input::Scalar* d_in;
36  typename Output::Scalar* d_out;
37  std::ptrdiff_t in_bytes = in.size() * sizeof(typename Input::Scalar);
38  std::ptrdiff_t out_bytes = out.size() * sizeof(typename Output::Scalar);
39 
40  gpuMalloc((void**)(&d_in), in_bytes);
41  gpuMalloc((void**)(&d_out), out_bytes);
42 
43  gpuMemcpy(d_in, in.data(), in_bytes, gpuMemcpyHostToDevice);
44  gpuMemcpy(d_out, out.data(), out_bytes, gpuMemcpyHostToDevice);
45 
46  // Simple and non-optimal 1D mapping assuming n is not too large
47  // That's only for unit testing!
48  dim3 Blocks(128);
49  dim3 Grids((n + int(Blocks.x) - 1) / int(Blocks.x));
50 
51  gpuDeviceSynchronize();
52 
53 #ifdef EIGEN_USE_HIP
54  hipLaunchKernelGGL(HIP_KERNEL_NAME(run_on_gpu_meta_kernel<Kernel, typename std::decay<decltype(*d_in)>::type,
55  typename std::decay<decltype(*d_out)>::type>),
56  dim3(Grids), dim3(Blocks), 0, 0, ker, n, d_in, d_out);
57 #else
58  // Various versions of clang-format incorrectly add spaces to the kernel launch brackets.
59  // clang-format off
60  run_on_gpu_meta_kernel<<<Grids, Blocks>>>(ker, n, d_in, d_out);
61  // clang-format on
62 #endif
63  // Pre-launch errors.
64  gpuError_t err = gpuGetLastError();
65  if (err != gpuSuccess) {
66  printf("%s: %s\n", gpuGetErrorName(err), gpuGetErrorString(err));
67  gpu_assert(false);
68  }
69 
70  // Kernel execution errors.
71  err = gpuDeviceSynchronize();
72  if (err != gpuSuccess) {
73  printf("%s: %s\n", gpuGetErrorName(err), gpuGetErrorString(err));
74  gpu_assert(false);
75  }
76 
77  // check inputs have not been modified
78  gpuMemcpy(const_cast<typename Input::Scalar*>(in.data()), d_in, in_bytes, gpuMemcpyDeviceToHost);
79  gpuMemcpy(out.data(), d_out, out_bytes, gpuMemcpyDeviceToHost);
80 
81  gpuFree(d_in);
82  gpuFree(d_out);
83 }
SCALAR Scalar
Definition: bench_gemm.cpp:45
__global__ EIGEN_HIP_LAUNCH_BOUNDS_1024 void run_on_gpu_meta_kernel(const Kernel ker, int n, const Input *in, Output *out)
Definition: gpu_common.h:25
type
Definition: compute_granudrum_aor.py:141

References n, out(), run_on_gpu_meta_kernel(), and compute_granudrum_aor::type.

Referenced by ei_test_init_gpu(), Eigen::run(), and run_and_compare_to_gpu().

◆ run_on_gpu_meta_kernel()

template<typename Kernel , typename Input , typename Output >
__global__ EIGEN_HIP_LAUNCH_BOUNDS_1024 void run_on_gpu_meta_kernel ( const Kernel  ker,
int  n,
const Input *  in,
Output *  out 
)
26  {
27  int i = threadIdx.x + blockIdx.x * blockDim.x;
28  if (i < n) {
29  ker(i, in, out);
30  }
31 }
dim3 threadIdx
Definition: gpu_common.h:16
dim3 blockDim
Definition: gpu_common.h:16
dim3 blockIdx
Definition: gpu_common.h:16

References blockDim, blockIdx, i, n, out(), and threadIdx.

Referenced by run_on_gpu().

Variable Documentation

◆ blockDim

◆ blockIdx

◆ threadIdx