cxx11_tensor_reduction.cpp File Reference
#include "main.h"
#include <limits>
#include <numeric>
#include <Eigen/CXX11/Tensor>

Classes

struct  UserReducer
 

Functions

template<int DataLayout>
static void test_trivial_reductions ()
 
template<typename Scalar , int DataLayout>
static void test_simple_reductions ()
 
template<int DataLayout>
static void test_reductions_in_expr ()
 
template<int DataLayout>
static void test_full_reductions ()
 
template<int DataLayout>
static void test_user_defined_reductions ()
 
template<int DataLayout>
static void test_tensor_maps ()
 
template<int DataLayout>
static void test_static_dims ()
 
template<int DataLayout>
static void test_innermost_last_dims ()
 
template<int DataLayout>
static void test_innermost_first_dims ()
 
template<int DataLayout>
static void test_reduce_middle_dims ()
 
template<typename ScalarType , int num_elements, int max_mean>
void test_sum_accuracy ()
 
 EIGEN_DECLARE_TEST (cxx11_tensor_reduction)
 

Function Documentation

◆ EIGEN_DECLARE_TEST()

EIGEN_DECLARE_TEST ( cxx11_tensor_reduction  )
490  {
491  CALL_SUBTEST(test_trivial_reductions<ColMajor>());
492  CALL_SUBTEST(test_trivial_reductions<RowMajor>());
493  CALL_SUBTEST((test_simple_reductions<float, ColMajor>()));
494  CALL_SUBTEST((test_simple_reductions<float, RowMajor>()));
495  CALL_SUBTEST((test_simple_reductions<Eigen::half, ColMajor>()));
496  CALL_SUBTEST((test_simple_reductions<Eigen::bfloat16, ColMajor>()));
497  CALL_SUBTEST(test_reductions_in_expr<ColMajor>());
498  CALL_SUBTEST(test_reductions_in_expr<RowMajor>());
499  CALL_SUBTEST(test_full_reductions<ColMajor>());
500  CALL_SUBTEST(test_full_reductions<RowMajor>());
501  CALL_SUBTEST(test_user_defined_reductions<ColMajor>());
502  CALL_SUBTEST(test_user_defined_reductions<RowMajor>());
503  CALL_SUBTEST(test_tensor_maps<ColMajor>());
504  CALL_SUBTEST(test_tensor_maps<RowMajor>());
505  CALL_SUBTEST(test_static_dims<ColMajor>());
506  CALL_SUBTEST(test_static_dims<RowMajor>());
507  CALL_SUBTEST(test_innermost_last_dims<ColMajor>());
508  CALL_SUBTEST(test_innermost_last_dims<RowMajor>());
509  CALL_SUBTEST(test_innermost_first_dims<ColMajor>());
510  CALL_SUBTEST(test_innermost_first_dims<RowMajor>());
511  CALL_SUBTEST(test_reduce_middle_dims<ColMajor>());
512  CALL_SUBTEST(test_reduce_middle_dims<RowMajor>());
513  CALL_SUBTEST((test_sum_accuracy<float, 10 * 1024 * 1024, 8 * 1024>()));
514  CALL_SUBTEST((test_sum_accuracy<Eigen::bfloat16, 10 * 1024 * 1024, 8 * 1024>()));
515  // The range of half is limited to 65519 when using round-to-even,
516  // so we are severely limited in the size and mean of the tensors
517  // we can reduce without overflow.
518  CALL_SUBTEST((test_sum_accuracy<Eigen::half, 4 * 1024, 16>()));
519  CALL_SUBTEST((test_sum_accuracy<Eigen::half, 10 * 1024 * 1024, 0>()));
520 }
#define CALL_SUBTEST(FUNC)
Definition: main.h:382

References CALL_SUBTEST.

◆ test_full_reductions()

template<int DataLayout>
static void test_full_reductions ( )
static
270  {
271  Tensor<float, 2, DataLayout> tensor(2, 3);
272  tensor.setRandom();
273  array<ptrdiff_t, 2> reduction_axis;
274  reduction_axis[0] = 0;
275  reduction_axis[1] = 1;
276 
277  Tensor<float, 0, DataLayout> result = tensor.sum(reduction_axis);
278  VERIFY_IS_EQUAL(result.rank(), 0);
279 
280  float sum = 0.0f;
281  for (int i = 0; i < 2; ++i) {
282  for (int j = 0; j < 3; ++j) {
283  sum += tensor(i, j);
284  }
285  }
286  VERIFY_IS_APPROX(result(0), sum);
287 
288  result = tensor.square().sum(reduction_axis).sqrt();
289  VERIFY_IS_EQUAL(result.rank(), 0);
290 
291  sum = 0.0f;
292  for (int i = 0; i < 2; ++i) {
293  for (int j = 0; j < 3; ++j) {
294  sum += tensor(i, j) * tensor(i, j);
295  }
296  }
297  VERIFY_IS_APPROX(result(), sqrtf(sum));
298 }
int i
Definition: BiCGSTAB_step_by_step.cpp:9
The tensor class.
Definition: Tensor.h:68
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index rank() const
Definition: Tensor.h:98
#define VERIFY_IS_APPROX(a, b)
Definition: integer_types.cpp:13
#define VERIFY_IS_EQUAL(a, b)
Definition: main.h:367
std::array< T, N > array
Definition: EmulateArray.h:231
std::ptrdiff_t j
Definition: tut_arithmetic_redux_minmax.cpp:2

References i, j, Eigen::Tensor< Scalar_, NumIndices_, Options_, IndexType_ >::rank(), Eigen::TensorBase< Derived, AccessLevel >::setRandom(), VERIFY_IS_APPROX, and VERIFY_IS_EQUAL.

◆ test_innermost_first_dims()

template<int DataLayout>
static void test_innermost_first_dims ( )
static
411  {
412  Tensor<float, 4, DataLayout> in(72, 53, 97, 113);
414  in.setRandom();
415 
416  // Reduce on the innermost dimensions.
417  // This triggers the use of packets for RowMajor.
419 
420  out = in.maximum(reduction_axis);
421 
422  for (int i = 0; i < 72; ++i) {
423  for (int j = 0; j < 53; ++j) {
424  float expected = -1e10f;
425  for (int k = 0; k < 97; ++k) {
426  for (int l = 0; l < 113; ++l) {
427  expected = (std::max)(expected, in(i, j, k, l));
428  }
429  }
430  VERIFY_IS_EQUAL(out(i, j), expected);
431  }
432  }
433 }
#define max(a, b)
Definition: datatypes.h:23
char char char int int * k
Definition: level2_impl.h:374
Definition: TensorIndexList.h:271
Definition: TensorIndexList.h:39
std::ofstream out("Result.txt")

References i, j, k, max, out(), Eigen::TensorBase< Derived, AccessLevel >::setRandom(), and VERIFY_IS_EQUAL.

◆ test_innermost_last_dims()

template<int DataLayout>
static void test_innermost_last_dims ( )
static
386  {
387  Tensor<float, 4, DataLayout> in(72, 53, 97, 113);
389  in.setRandom();
390 
391  // Reduce on the innermost dimensions.
392  // This triggers the use of packets for ColMajor.
394 
395  out = in.maximum(reduction_axis);
396 
397  for (int i = 0; i < 97; ++i) {
398  for (int j = 0; j < 113; ++j) {
399  float expected = -1e10f;
400  for (int k = 0; k < 53; ++k) {
401  for (int l = 0; l < 72; ++l) {
402  expected = (std::max)(expected, in(l, k, i, j));
403  }
404  }
405  VERIFY_IS_EQUAL(out(i, j), expected);
406  }
407  }
408 }

References i, j, k, max, out(), Eigen::TensorBase< Derived, AccessLevel >::setRandom(), and VERIFY_IS_EQUAL.

◆ test_reduce_middle_dims()

template<int DataLayout>
static void test_reduce_middle_dims ( )
static
436  {
437  Tensor<float, 4, DataLayout> in(72, 53, 97, 113);
439  in.setRandom();
440 
441  // Reduce on the innermost dimensions.
442  // This triggers the use of packets for RowMajor.
444 
445  out = in.maximum(reduction_axis);
446 
447  for (int i = 0; i < 72; ++i) {
448  for (int j = 0; j < 113; ++j) {
449  float expected = -1e10f;
450  for (int k = 0; k < 53; ++k) {
451  for (int l = 0; l < 97; ++l) {
452  expected = (std::max)(expected, in(i, k, l, j));
453  }
454  }
455  VERIFY_IS_EQUAL(out(i, j), expected);
456  }
457  }
458 }

References i, j, k, max, out(), Eigen::TensorBase< Derived, AccessLevel >::setRandom(), and VERIFY_IS_EQUAL.

◆ test_reductions_in_expr()

template<int DataLayout>
static void test_reductions_in_expr ( )
static
245  {
246  Tensor<float, 4, DataLayout> tensor(2, 3, 5, 7);
247  tensor.setRandom();
248  array<ptrdiff_t, 2> reduction_axis2;
249  reduction_axis2[0] = 1;
250  reduction_axis2[1] = 3;
251 
252  Tensor<float, 2, DataLayout> result(2, 5);
253  result = result.constant(1.0f) - tensor.sum(reduction_axis2);
254  VERIFY_IS_EQUAL(result.dimension(0), 2);
255  VERIFY_IS_EQUAL(result.dimension(1), 5);
256  for (int i = 0; i < 2; ++i) {
257  for (int j = 0; j < 5; ++j) {
258  float sum = 0.0f;
259  for (int k = 0; k < 3; ++k) {
260  for (int l = 0; l < 7; ++l) {
261  sum += tensor(i, k, j, l);
262  }
263  }
264  VERIFY_IS_APPROX(result(i, j), 1.0f - sum);
265  }
266  }
267 }

References Eigen::Tensor< Scalar_, NumIndices_, Options_, IndexType_ >::dimension(), i, j, k, Eigen::TensorBase< Derived, AccessLevel >::setRandom(), VERIFY_IS_APPROX, and VERIFY_IS_EQUAL.

◆ test_simple_reductions()

template<typename Scalar , int DataLayout>
static void test_simple_reductions ( )
static
57  {
58  Tensor<Scalar, 4, DataLayout> tensor(2, 3, 5, 7);
59  tensor.setRandom();
60  // Add a little offset so that the product reductions won't be close to zero.
61  tensor += tensor.constant(Scalar(0.5f));
62  array<ptrdiff_t, 2> reduction_axis2;
63  reduction_axis2[0] = 1;
64  reduction_axis2[1] = 3;
65 
66  Tensor<Scalar, 2, DataLayout> result = tensor.sum(reduction_axis2);
67  VERIFY_IS_EQUAL(result.dimension(0), 2);
68  VERIFY_IS_EQUAL(result.dimension(1), 5);
69  for (int i = 0; i < 2; ++i) {
70  for (int j = 0; j < 5; ++j) {
71  Scalar sum = Scalar(0.0f);
72  for (int k = 0; k < 3; ++k) {
73  for (int l = 0; l < 7; ++l) {
74  sum += tensor(i, k, j, l);
75  }
76  }
77  VERIFY_IS_APPROX(result(i, j), sum);
78  }
79  }
80 
81  {
82  Tensor<Scalar, 0, DataLayout> sum1 = tensor.sum();
83  VERIFY_IS_EQUAL(sum1.rank(), 0);
84 
85  array<ptrdiff_t, 4> reduction_axis4;
86  reduction_axis4[0] = 0;
87  reduction_axis4[1] = 1;
88  reduction_axis4[2] = 2;
89  reduction_axis4[3] = 3;
90  Tensor<Scalar, 0, DataLayout> sum2 = tensor.sum(reduction_axis4);
91  VERIFY_IS_EQUAL(sum2.rank(), 0);
92 
93  VERIFY_IS_APPROX(sum1(), sum2());
94  }
95 
96  reduction_axis2[0] = 0;
97  reduction_axis2[1] = 2;
98  result = tensor.prod(reduction_axis2);
99  VERIFY_IS_EQUAL(result.dimension(0), 3);
100  VERIFY_IS_EQUAL(result.dimension(1), 7);
101  for (int i = 0; i < 3; ++i) {
102  for (int j = 0; j < 7; ++j) {
103  Scalar prod = Scalar(1.0f);
104  for (int k = 0; k < 2; ++k) {
105  for (int l = 0; l < 5; ++l) {
106  prod *= tensor(k, i, l, j);
107  }
108  }
109  VERIFY_IS_APPROX(result(i, j), prod);
110  }
111  }
112 
113  {
114  Tensor<Scalar, 0, DataLayout> prod1 = tensor.prod();
115  VERIFY_IS_EQUAL(prod1.rank(), 0);
116 
117  array<ptrdiff_t, 4> reduction_axis4;
118  reduction_axis4[0] = 0;
119  reduction_axis4[1] = 1;
120  reduction_axis4[2] = 2;
121  reduction_axis4[3] = 3;
122  Tensor<Scalar, 0, DataLayout> prod2 = tensor.prod(reduction_axis4);
123  VERIFY_IS_EQUAL(prod2.rank(), 0);
124 
125  VERIFY_IS_APPROX(prod1(), prod2());
126  }
127 
128  reduction_axis2[0] = 0;
129  reduction_axis2[1] = 2;
130  result = tensor.maximum(reduction_axis2);
131  VERIFY_IS_EQUAL(result.dimension(0), 3);
132  VERIFY_IS_EQUAL(result.dimension(1), 7);
133  for (int i = 0; i < 3; ++i) {
134  for (int j = 0; j < 7; ++j) {
135  Scalar max_val = std::numeric_limits<Scalar>::lowest();
136  for (int k = 0; k < 2; ++k) {
137  for (int l = 0; l < 5; ++l) {
138  max_val = (std::max)(max_val, tensor(k, i, l, j));
139  }
140  }
141  VERIFY_IS_APPROX(result(i, j), max_val);
142  }
143  }
144 
145  {
146  Tensor<Scalar, 0, DataLayout> max1 = tensor.maximum();
147  VERIFY_IS_EQUAL(max1.rank(), 0);
148 
149  array<ptrdiff_t, 4> reduction_axis4;
150  reduction_axis4[0] = 0;
151  reduction_axis4[1] = 1;
152  reduction_axis4[2] = 2;
153  reduction_axis4[3] = 3;
154  Tensor<Scalar, 0, DataLayout> max2 = tensor.maximum(reduction_axis4);
155  VERIFY_IS_EQUAL(max2.rank(), 0);
156 
157  VERIFY_IS_APPROX(max1(), max2());
158  }
159 
160  reduction_axis2[0] = 0;
161  reduction_axis2[1] = 1;
162  result = tensor.minimum(reduction_axis2);
163  VERIFY_IS_EQUAL(result.dimension(0), 5);
164  VERIFY_IS_EQUAL(result.dimension(1), 7);
165  for (int i = 0; i < 5; ++i) {
166  for (int j = 0; j < 7; ++j) {
168  for (int k = 0; k < 2; ++k) {
169  for (int l = 0; l < 3; ++l) {
170  min_val = (std::min)(min_val, tensor(k, l, i, j));
171  }
172  }
173  VERIFY_IS_APPROX(result(i, j), min_val);
174  }
175  }
176 
177  {
178  Tensor<Scalar, 0, DataLayout> min1 = tensor.minimum();
179  VERIFY_IS_EQUAL(min1.rank(), 0);
180 
181  array<ptrdiff_t, 4> reduction_axis4;
182  reduction_axis4[0] = 0;
183  reduction_axis4[1] = 1;
184  reduction_axis4[2] = 2;
185  reduction_axis4[3] = 3;
186  Tensor<Scalar, 0, DataLayout> min2 = tensor.minimum(reduction_axis4);
187  VERIFY_IS_EQUAL(min2.rank(), 0);
188 
189  VERIFY_IS_APPROX(min1(), min2());
190  }
191 
192  reduction_axis2[0] = 0;
193  reduction_axis2[1] = 1;
194  result = tensor.mean(reduction_axis2);
195  VERIFY_IS_EQUAL(result.dimension(0), 5);
196  VERIFY_IS_EQUAL(result.dimension(1), 7);
197  for (int i = 0; i < 5; ++i) {
198  for (int j = 0; j < 7; ++j) {
199  Scalar sum = Scalar(0.0f);
200  int count = 0;
201  for (int k = 0; k < 2; ++k) {
202  for (int l = 0; l < 3; ++l) {
203  sum += tensor(k, l, i, j);
204  ++count;
205  }
206  }
207  VERIFY_IS_APPROX(result(i, j), sum / Scalar(count));
208  }
209  }
210 
211  {
212  Tensor<Scalar, 0, DataLayout> mean1 = tensor.mean();
213  VERIFY_IS_EQUAL(mean1.rank(), 0);
214 
215  array<ptrdiff_t, 4> reduction_axis4;
216  reduction_axis4[0] = 0;
217  reduction_axis4[1] = 1;
218  reduction_axis4[2] = 2;
219  reduction_axis4[3] = 3;
220  Tensor<Scalar, 0, DataLayout> mean2 = tensor.mean(reduction_axis4);
221  VERIFY_IS_EQUAL(mean2.rank(), 0);
222 
223  VERIFY_IS_APPROX(mean1(), mean2());
224  }
225 
226  {
227  Tensor<int, 1> ints(10);
228  std::iota(ints.data(), ints.data() + ints.dimension(0), 0);
229 
231  all_ = ints.all();
232  VERIFY(!all_());
233  all_ = (ints >= ints.constant(0)).all();
234  VERIFY(all_());
235 
237  any = (ints > ints.constant(10)).any();
238  VERIFY(!any());
239  any = (ints < ints.constant(1)).any();
240  VERIFY(any());
241  }
242 }
SCALAR Scalar
Definition: bench_gemm.cpp:45
The fixed sized version of the tensor class.
Definition: TensorFixedSize.h:30
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index dimension(std::size_t n) const
Definition: Tensor.h:99
#define min(a, b)
Definition: datatypes.h:22
static constexpr Eigen::internal::all_t all
Definition: IndexedViewHelper.h:86
#define VERIFY(a)
Definition: main.h:362
EIGEN_DONT_INLINE void prod(const Lhs &a, const Rhs &b, Res &c)
Definition: product_threshold.cpp:53

References Eigen::placeholders::all, Eigen::Tensor< Scalar_, NumIndices_, Options_, IndexType_ >::data(), Eigen::Tensor< Scalar_, NumIndices_, Options_, IndexType_ >::dimension(), i, j, k, max, min, prod(), Eigen::Tensor< Scalar_, NumIndices_, Options_, IndexType_ >::rank(), Eigen::TensorBase< Derived, AccessLevel >::setRandom(), VERIFY, VERIFY_IS_APPROX, and VERIFY_IS_EQUAL.

◆ test_static_dims()

template<int DataLayout>
static void test_static_dims ( )
static
363  {
364  Tensor<float, 4, DataLayout> in(72, 53, 97, 113);
366  in.setRandom();
367 
369 
370  out = in.maximum(reduction_axis);
371 
372  for (int i = 0; i < 72; ++i) {
373  for (int j = 0; j < 97; ++j) {
374  float expected = -1e10f;
375  for (int k = 0; k < 53; ++k) {
376  for (int l = 0; l < 113; ++l) {
377  expected = (std::max)(expected, in(i, k, j, l));
378  }
379  }
380  VERIFY_IS_EQUAL(out(i, j), expected);
381  }
382  }
383 }

References i, j, k, max, out(), Eigen::TensorBase< Derived, AccessLevel >::setRandom(), and VERIFY_IS_EQUAL.

◆ test_sum_accuracy()

template<typename ScalarType , int num_elements, int max_mean>
void test_sum_accuracy ( )
461  {
462  Tensor<double, 1> double_tensor(num_elements);
463  Tensor<ScalarType, 1> tensor(num_elements);
464  for (double prescribed_mean = 0; prescribed_mean <= max_mean;
465  prescribed_mean = numext::maxi(1.0, prescribed_mean * 3.99)) {
466  // FIXME: NormalRandomGenerator doesn't work in bfloat and half.
467  double_tensor.setRandom<Eigen::internal::NormalRandomGenerator<double>>();
468  double_tensor += double_tensor.constant(prescribed_mean);
469  tensor = double_tensor.cast<ScalarType>();
470 
472  sum = tensor.sum();
473 
474  // Compute the reference value in double precsion.
475  double expected_sum = 0.0;
476  double abs_sum = 0.0;
477  for (int i = 0; i < num_elements; ++i) {
478  expected_sum += static_cast<double>(tensor(i));
479  abs_sum += static_cast<double>(numext::abs(tensor(i)));
480  }
481  // Test against probabilistic forward error bound. In reality, the error is much smaller
482  // when we use tree summation.
483  double err = Eigen::numext::abs(static_cast<double>(sum()) - expected_sum);
484  double tol = numext::sqrt(static_cast<double>(num_elements)) *
485  static_cast<double>(NumTraits<ScalarType>::epsilon()) * abs_sum;
486  VERIFY_LE(err, tol);
487  }
488 }
AnnoyingScalar abs(const AnnoyingScalar &x)
Definition: AnnoyingScalar.h:135
AnnoyingScalar sqrt(const AnnoyingScalar &x)
Definition: AnnoyingScalar.h:134
Definition: TensorRandom.h:229
#define VERIFY_LE(a, b)
Definition: main.h:365
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE T maxi(const T &x, const T &y)
Definition: MathFunctions.h:926
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE std::enable_if_t< NumTraits< T >::IsSigned||NumTraits< T >::IsComplex, typename NumTraits< T >::Real > abs(const T &x)
Definition: MathFunctions.h:1355
double epsilon
Definition: osc_ring_sarah_asymptotics.h:43

References abs(), Eigen::numext::abs(), oomph::SarahBL::epsilon, i, Eigen::numext::maxi(), Eigen::TensorBase< Derived, AccessLevel >::setRandom(), sqrt(), and VERIFY_LE.

◆ test_tensor_maps()

template<int DataLayout>
static void test_tensor_maps ( )
static
332  {
333  int inputs[2 * 3 * 5 * 7];
334  TensorMap<Tensor<int, 4, DataLayout>> tensor_map(inputs, 2, 3, 5, 7);
335  TensorMap<Tensor<const int, 4, DataLayout>> tensor_map_const(inputs, 2, 3, 5, 7);
336  const TensorMap<Tensor<const int, 4, DataLayout>> tensor_map_const_const(inputs, 2, 3, 5, 7);
337 
338  tensor_map.setRandom();
339  array<ptrdiff_t, 2> reduction_axis;
340  reduction_axis[0] = 1;
341  reduction_axis[1] = 3;
342 
343  Tensor<int, 2, DataLayout> result = tensor_map.sum(reduction_axis);
344  Tensor<int, 2, DataLayout> result2 = tensor_map_const.sum(reduction_axis);
345  Tensor<int, 2, DataLayout> result3 = tensor_map_const_const.sum(reduction_axis);
346 
347  for (int i = 0; i < 2; ++i) {
348  for (int j = 0; j < 5; ++j) {
349  int sum = 0;
350  for (int k = 0; k < 3; ++k) {
351  for (int l = 0; l < 7; ++l) {
352  sum += tensor_map(i, k, j, l);
353  }
354  }
355  VERIFY_IS_EQUAL(result(i, j), sum);
356  VERIFY_IS_EQUAL(result2(i, j), sum);
357  VERIFY_IS_EQUAL(result3(i, j), sum);
358  }
359  }
360 }
A tensor expression mapping an existing array of data.
Definition: TensorMap.h:33

References i, j, k, Eigen::TensorBase< Derived, AccessLevel >::setRandom(), and VERIFY_IS_EQUAL.

◆ test_trivial_reductions()

template<int DataLayout>
static void test_trivial_reductions ( )
static
18  {
19  {
21  tensor.setRandom();
22  array<ptrdiff_t, 0> reduction_axis;
23 
24  Tensor<float, 0, DataLayout> result = tensor.sum(reduction_axis);
25  VERIFY_IS_EQUAL(result(), tensor());
26  }
27 
28  {
30  tensor.setRandom();
31  array<ptrdiff_t, 0> reduction_axis;
32 
33  Tensor<float, 1, DataLayout> result = tensor.sum(reduction_axis);
34  VERIFY_IS_EQUAL(result.dimension(0), 7);
35  for (int i = 0; i < 7; ++i) {
36  VERIFY_IS_EQUAL(result(i), tensor(i));
37  }
38  }
39 
40  {
41  Tensor<float, 2, DataLayout> tensor(2, 3);
42  tensor.setRandom();
43  array<ptrdiff_t, 0> reduction_axis;
44 
45  Tensor<float, 2, DataLayout> result = tensor.sum(reduction_axis);
46  VERIFY_IS_EQUAL(result.dimension(0), 2);
47  VERIFY_IS_EQUAL(result.dimension(1), 3);
48  for (int i = 0; i < 2; ++i) {
49  for (int j = 0; j < 3; ++j) {
50  VERIFY_IS_EQUAL(result(i, j), tensor(i, j));
51  }
52  }
53  }
54 }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived & setRandom()
Definition: TensorBase.h:1049

References Eigen::Tensor< Scalar_, NumIndices_, Options_, IndexType_ >::dimension(), i, j, Eigen::TensorBase< Derived, AccessLevel >::setRandom(), and VERIFY_IS_EQUAL.

◆ test_user_defined_reductions()

template<int DataLayout>
static void test_user_defined_reductions ( )
static
312  {
313  Tensor<float, 2, DataLayout> tensor(5, 7);
314  tensor.setRandom();
315  array<ptrdiff_t, 1> reduction_axis;
316  reduction_axis[0] = 1;
317 
318  UserReducer reducer(10.0f);
319  Tensor<float, 1, DataLayout> result = tensor.reduce(reduction_axis, reducer);
320  VERIFY_IS_EQUAL(result.dimension(0), 5);
321  for (int i = 0; i < 5; ++i) {
322  float expected = 10.0f;
323  for (int j = 0; j < 7; ++j) {
324  expected += tensor(i, j) * tensor(i, j);
325  }
326  expected = 1.0f / expected;
327  VERIFY_IS_APPROX(result(i), expected);
328  }
329 }
Definition: cxx11_tensor_reduction.cpp:300

References Eigen::Tensor< Scalar_, NumIndices_, Options_, IndexType_ >::dimension(), i, j, Eigen::TensorBase< Derived, AccessLevel >::setRandom(), VERIFY_IS_APPROX, and VERIFY_IS_EQUAL.