cxx11_tensor_custom_op_sycl.cpp File Reference
#include "main.h"
#include <unsupported/Eigen/CXX11/Tensor>

Classes

struct  InsertZeros< TensorType >
 
struct  BatchMatMul< TensorType >
 

Macros

#define EIGEN_TEST_NO_LONGDOUBLE
 
#define EIGEN_TEST_NO_COMPLEX
 
#define EIGEN_DEFAULT_DENSE_INDEX_TYPE   int64_t
 
#define EIGEN_USE_SYCL
 

Functions

template<typename DataType , int DataLayout, typename IndexType >
static void test_custom_unary_op_sycl (const Eigen::SyclDevice &sycl_device)
 
template<typename DataType , int DataLayout, typename IndexType >
static void test_custom_binary_op_sycl (const Eigen::SyclDevice &sycl_device)
 
template<typename DataType , typename Dev_selector >
void custom_op_perDevice (Dev_selector s)
 
 EIGEN_DECLARE_TEST (cxx11_tensor_custom_op_sycl)
 

Macro Definition Documentation

◆ EIGEN_DEFAULT_DENSE_INDEX_TYPE

#define EIGEN_DEFAULT_DENSE_INDEX_TYPE   int64_t

◆ EIGEN_TEST_NO_COMPLEX

#define EIGEN_TEST_NO_COMPLEX

◆ EIGEN_TEST_NO_LONGDOUBLE

#define EIGEN_TEST_NO_LONGDOUBLE

◆ EIGEN_USE_SYCL

#define EIGEN_USE_SYCL

Function Documentation

◆ custom_op_perDevice()

template<typename DataType , typename Dev_selector >
void custom_op_perDevice ( Dev_selector  s)
158  {
159  QueueInterface queueInterface(s);
160  auto sycl_device = Eigen::SyclDevice(&queueInterface);
161  test_custom_unary_op_sycl<DataType, RowMajor, int64_t>(sycl_device);
162  test_custom_unary_op_sycl<DataType, ColMajor, int64_t>(sycl_device);
163  test_custom_binary_op_sycl<DataType, ColMajor, int64_t>(sycl_device);
164  test_custom_binary_op_sycl<DataType, RowMajor, int64_t>(sycl_device);
165 }
RealScalar s
Definition: level1_cplx_impl.h:130

References s.

◆ EIGEN_DECLARE_TEST()

EIGEN_DECLARE_TEST ( cxx11_tensor_custom_op_sycl  )
166  {
167  for (const auto& device : Eigen::get_sycl_supported_devices()) {
168  CALL_SUBTEST(custom_op_perDevice<float>(device));
169  }
170 }
#define CALL_SUBTEST(FUNC)
Definition: main.h:382

References CALL_SUBTEST.

◆ test_custom_binary_op_sycl()

template<typename DataType , int DataLayout, typename IndexType >
static void test_custom_binary_op_sycl ( const Eigen::SyclDevice &  sycl_device)
static
109  {
110  Eigen::array<IndexType, 3> tensorRange1 = {{2, 3, 5}};
111  Eigen::array<IndexType, 3> tensorRange2 = {{3, 7, 5}};
112  Eigen::array<IndexType, 3> tensorResultRange = {{2, 7, 5}};
113 
117 
118  DataType* gpu_in1_data =
119  static_cast<DataType*>(sycl_device.allocate(in1.dimensions().TotalSize() * sizeof(DataType)));
120  DataType* gpu_in2_data =
121  static_cast<DataType*>(sycl_device.allocate(in2.dimensions().TotalSize() * sizeof(DataType)));
122  DataType* gpu_out_data =
123  static_cast<DataType*>(sycl_device.allocate(out.dimensions().TotalSize() * sizeof(DataType)));
124 
126  TensorType gpu_in1(gpu_in1_data, tensorRange1);
127  TensorType gpu_in2(gpu_in2_data, tensorRange2);
128  TensorType gpu_out(gpu_out_data, tensorResultRange);
129 
130  in1.setRandom();
131  in2.setRandom();
132 
133  sycl_device.memcpyHostToDevice(gpu_in1_data, in1.data(), (in1.dimensions().TotalSize()) * sizeof(DataType));
134  sycl_device.memcpyHostToDevice(gpu_in2_data, in2.data(), (in2.dimensions().TotalSize()) * sizeof(DataType));
135 
136  gpu_out.device(sycl_device) = gpu_in1.customOp(gpu_in2, BatchMatMul<TensorType>());
137  sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data, (out.dimensions().TotalSize()) * sizeof(DataType));
138 
139  for (IndexType i = 0; i < 5; ++i) {
141  array<DimPair, 1> dims;
142  dims[0] = DimPair(1, 0);
144  in1.template chip<2>(i).contract(in2.template chip<2>(i), dims);
146  for (IndexType j = 0; j < 2; ++j) {
147  for (IndexType k = 0; k < 7; ++k) {
148  VERIFY_IS_APPROX(val(j, k), reference(j, k));
149  }
150  }
151  }
152  sycl_device.deallocate(gpu_in1_data);
153  sycl_device.deallocate(gpu_in2_data);
154  sycl_device.deallocate(gpu_out_data);
155 }
int i
Definition: BiCGSTAB_step_by_step.cpp:9
A tensor expression mapping an existing array of data.
Definition: TensorMap.h:33
A reference to a tensor expression The expression will be evaluated lazily (as much as possible).
Definition: TensorRef.h:114
The tensor class.
Definition: Tensor.h:68
Tensor< float, 1 >::DimensionPair DimPair
Definition: cxx11_tensor_contraction.cpp:17
#define VERIFY_IS_APPROX(a, b)
Definition: integer_types.cpp:13
char char char int int * k
Definition: level2_impl.h:374
std::array< T, N > array
Definition: EmulateArray.h:231
val
Definition: calibrate.py:119
Definition: cxx11_tensor_custom_op.cpp:57
std::ofstream out("Result.txt")
std::ptrdiff_t j
Definition: tut_arithmetic_redux_minmax.cpp:2

References Eigen::Tensor< Scalar_, NumIndices_, Options_, IndexType_ >::data(), Eigen::Tensor< Scalar_, NumIndices_, Options_, IndexType_ >::dimensions(), i, j, k, out(), Eigen::TensorBase< Derived, AccessLevel >::setRandom(), Eigen::DSizes< DenseIndex, NumDims >::TotalSize(), calibrate::val, and VERIFY_IS_APPROX.

◆ test_custom_unary_op_sycl()

template<typename DataType , int DataLayout, typename IndexType >
static void test_custom_unary_op_sycl ( const Eigen::SyclDevice &  sycl_device)
static
47  {
48  IndexType sizeDim1 = 3;
49  IndexType sizeDim2 = 5;
50  Eigen::array<IndexType, 2> tensorRange = {{sizeDim1, sizeDim2}};
51  Eigen::array<IndexType, 2> tensorResultRange = {{6, 10}};
52 
55 
56  DataType* gpu_in1_data =
57  static_cast<DataType*>(sycl_device.allocate(in1.dimensions().TotalSize() * sizeof(DataType)));
58  DataType* gpu_out_data =
59  static_cast<DataType*>(sycl_device.allocate(out.dimensions().TotalSize() * sizeof(DataType)));
60 
62  TensorType gpu_in1(gpu_in1_data, tensorRange);
63  TensorType gpu_out(gpu_out_data, tensorResultRange);
64 
65  in1.setRandom();
66  sycl_device.memcpyHostToDevice(gpu_in1_data, in1.data(), (in1.dimensions().TotalSize()) * sizeof(DataType));
67  gpu_out.device(sycl_device) = gpu_in1.customOp(InsertZeros<TensorType>());
68  sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data, (out.dimensions().TotalSize()) * sizeof(DataType));
69 
70  VERIFY_IS_EQUAL(out.dimension(0), 6);
71  VERIFY_IS_EQUAL(out.dimension(1), 10);
72 
73  for (int i = 0; i < 6; i += 2) {
74  for (int j = 0; j < 10; j += 2) {
75  VERIFY_IS_EQUAL(out(i, j), in1(i / 2, j / 2));
76  }
77  }
78  for (int i = 1; i < 6; i += 2) {
79  for (int j = 1; j < 10; j += 2) {
80  VERIFY_IS_EQUAL(out(i, j), 0);
81  }
82  }
83  sycl_device.deallocate(gpu_in1_data);
84  sycl_device.deallocate(gpu_out_data);
85 }
#define VERIFY_IS_EQUAL(a, b)
Definition: main.h:367
Definition: cxx11_tensor_custom_op.cpp:16

References Eigen::Tensor< Scalar_, NumIndices_, Options_, IndexType_ >::data(), Eigen::Tensor< Scalar_, NumIndices_, Options_, IndexType_ >::dimensions(), i, j, out(), Eigen::TensorBase< Derived, AccessLevel >::setRandom(), Eigen::DSizes< DenseIndex, NumDims >::TotalSize(), and VERIFY_IS_EQUAL.