cxx11_tensor_math_sycl.cpp File Reference
#include "main.h"
#include <unsupported/Eigen/CXX11/Tensor>

Macros

#define EIGEN_TEST_NO_LONGDOUBLE
 
#define EIGEN_TEST_NO_COMPLEX
 
#define EIGEN_DEFAULT_DENSE_INDEX_TYPE   int64_t
 
#define EIGEN_USE_SYCL
 

Functions

template<typename DataType , int DataLayout, typename IndexType >
static void test_tanh_sycl (const Eigen::SyclDevice &sycl_device)
 
template<typename DataType , int DataLayout, typename IndexType >
static void test_sigmoid_sycl (const Eigen::SyclDevice &sycl_device)
 
template<typename DataType , typename dev_Selector >
void sycl_computing_test_per_device (dev_Selector s)
 
 EIGEN_DECLARE_TEST (cxx11_tensor_math_sycl)
 

Macro Definition Documentation

◆ EIGEN_DEFAULT_DENSE_INDEX_TYPE

#define EIGEN_DEFAULT_DENSE_INDEX_TYPE   int64_t

◆ EIGEN_TEST_NO_COMPLEX

#define EIGEN_TEST_NO_COMPLEX

◆ EIGEN_TEST_NO_LONGDOUBLE

#define EIGEN_TEST_NO_LONGDOUBLE

◆ EIGEN_USE_SYCL

#define EIGEN_USE_SYCL

Function Documentation

◆ EIGEN_DECLARE_TEST()

EIGEN_DECLARE_TEST ( cxx11_tensor_math_sycl  )
97  {
98  for (const auto& device : Eigen::get_sycl_supported_devices()) {
99  CALL_SUBTEST(sycl_computing_test_per_device<half>(device));
100  CALL_SUBTEST(sycl_computing_test_per_device<float>(device));
101  }
102 }
#define CALL_SUBTEST(FUNC)
Definition: main.h:382

References CALL_SUBTEST.

◆ sycl_computing_test_per_device()

template<typename DataType , typename dev_Selector >
void sycl_computing_test_per_device ( dev_Selector  s)
88  {
89  QueueInterface queueInterface(s);
90  auto sycl_device = Eigen::SyclDevice(&queueInterface);
91  test_tanh_sycl<DataType, RowMajor, int64_t>(sycl_device);
92  test_tanh_sycl<DataType, ColMajor, int64_t>(sycl_device);
93  test_sigmoid_sycl<DataType, RowMajor, int64_t>(sycl_device);
94  test_sigmoid_sycl<DataType, ColMajor, int64_t>(sycl_device);
95 }
RealScalar s
Definition: level1_cplx_impl.h:130

References s.

◆ test_sigmoid_sycl()

template<typename DataType , int DataLayout, typename IndexType >
static void test_sigmoid_sycl ( const Eigen::SyclDevice &  sycl_device)
static
59  {
60  IndexType sizeDim1 = 4;
61  IndexType sizeDim2 = 4;
62  IndexType sizeDim3 = 1;
63  array<IndexType, 3> tensorRange = {{sizeDim1, sizeDim2, sizeDim3}};
66  Tensor<DataType, 3, DataLayout, IndexType> out_cpu(tensorRange);
67 
68  in = in.random();
69 
70  DataType* gpu_data1 = static_cast<DataType*>(sycl_device.allocate(in.size() * sizeof(DataType)));
71  DataType* gpu_data2 = static_cast<DataType*>(sycl_device.allocate(out.size() * sizeof(DataType)));
72 
73  TensorMap<Tensor<DataType, 3, DataLayout, IndexType>> gpu1(gpu_data1, tensorRange);
74  TensorMap<Tensor<DataType, 3, DataLayout, IndexType>> gpu2(gpu_data2, tensorRange);
75 
76  sycl_device.memcpyHostToDevice(gpu_data1, in.data(), (in.size()) * sizeof(DataType));
77  gpu2.device(sycl_device) = gpu1.sigmoid();
78  sycl_device.memcpyDeviceToHost(out.data(), gpu_data2, (out.size()) * sizeof(DataType));
79 
80  out_cpu = in.sigmoid();
81 
82  for (int i = 0; i < in.size(); ++i) {
83  VERIFY_IS_APPROX(out(i), out_cpu(i));
84  }
85 }
int i
Definition: BiCGSTAB_step_by_step.cpp:9
A tensor expression mapping an existing array of data.
Definition: TensorMap.h:33
The tensor class.
Definition: Tensor.h:68
#define VERIFY_IS_APPROX(a, b)
Definition: integer_types.cpp:13
std::array< T, N > array
Definition: EmulateArray.h:231
std::ofstream out("Result.txt")

References Eigen::Tensor< Scalar_, NumIndices_, Options_, IndexType_ >::data(), Eigen::TensorBase< Derived, AccessLevel >::device(), i, out(), Eigen::Tensor< Scalar_, NumIndices_, Options_, IndexType_ >::size(), and VERIFY_IS_APPROX.

◆ test_tanh_sycl()

template<typename DataType , int DataLayout, typename IndexType >
static void test_tanh_sycl ( const Eigen::SyclDevice &  sycl_device)
static
31  {
32  IndexType sizeDim1 = 4;
33  IndexType sizeDim2 = 4;
34  IndexType sizeDim3 = 1;
35  array<IndexType, 3> tensorRange = {{sizeDim1, sizeDim2, sizeDim3}};
38  Tensor<DataType, 3, DataLayout, IndexType> out_cpu(tensorRange);
39 
40  in = in.random();
41 
42  DataType* gpu_data1 = static_cast<DataType*>(sycl_device.allocate(in.size() * sizeof(DataType)));
43  DataType* gpu_data2 = static_cast<DataType*>(sycl_device.allocate(out.size() * sizeof(DataType)));
44 
45  TensorMap<Tensor<DataType, 3, DataLayout, IndexType>> gpu1(gpu_data1, tensorRange);
46  TensorMap<Tensor<DataType, 3, DataLayout, IndexType>> gpu2(gpu_data2, tensorRange);
47 
48  sycl_device.memcpyHostToDevice(gpu_data1, in.data(), (in.size()) * sizeof(DataType));
49  gpu2.device(sycl_device) = gpu1.tanh();
50  sycl_device.memcpyDeviceToHost(out.data(), gpu_data2, (out.size()) * sizeof(DataType));
51 
52  out_cpu = in.tanh();
53 
54  for (int i = 0; i < in.size(); ++i) {
55  VERIFY_IS_APPROX(out(i), out_cpu(i));
56  }
57 }

References Eigen::Tensor< Scalar_, NumIndices_, Options_, IndexType_ >::data(), Eigen::TensorBase< Derived, AccessLevel >::device(), i, out(), Eigen::Tensor< Scalar_, NumIndices_, Options_, IndexType_ >::size(), and VERIFY_IS_APPROX.