tensor_benchmarks.h
Go to the documentation of this file.
1 #ifndef THIRD_PARTY_EIGEN3_TENSOR_BENCHMARKS_H_
2 #define THIRD_PARTY_EIGEN3_TENSOR_BENCHMARKS_H_
3 
4 typedef int TensorIndex;
5 #define EIGEN_DEFAULT_DENSE_INDEX_TYPE int
6 
7 #include "unsupported/Eigen/CXX11/Tensor"
8 #include "benchmark.h"
9 
10 #define BENCHMARK_RANGE(bench, lo, hi) BENCHMARK(bench)->Range(lo, hi)
11 
12 using Eigen::Tensor;
13 using Eigen::TensorMap;
14 
15 // TODO(bsteiner): also templatize on the input type since we have users
16 // for int8 as well as floats.
17 template <typename Device, typename T>
19  public:
20  BenchmarkSuite(const Device& device, size_t m, size_t k, size_t n) : m_(m), k_(k), n_(n), device_(device) {
21  initialize();
22  }
23 
24  BenchmarkSuite(const Device& device, size_t m) : m_(m), k_(m), n_(m), device_(device) { initialize(); }
25 
26  BenchmarkSuite(const Device& device, size_t m, size_t k) : m_(1), k_(k), n_(m), device_(device) { initialize(); }
27 
29  device_.deallocate(a_);
30  device_.deallocate(b_);
31  device_.deallocate(c_);
32  }
33 
34  void memcpy(int num_iters) {
35  eigen_assert(m_ == k_ && k_ == n_);
36 #ifdef EIGEN_USE_SYCL // warmup for sycl
37  for (int iter = 0; iter < 10; ++iter) {
38  device_.memcpy(c_, a_, m_ * m_ * sizeof(T));
39  }
40 #endif
42  for (int iter = 0; iter < num_iters; ++iter) {
43  device_.memcpy(c_, a_, m_ * m_ * sizeof(T));
44  }
45  // Record the number of values copied per second
46  finalizeBenchmark(static_cast<int64_t>(m_) * m_ * num_iters);
47  }
48 
49  void typeCasting(int num_iters) {
50  eigen_assert(m_ == n_);
52  if (sizeof(T) >= sizeof(int)) {
53  sizes[0] = m_;
54  sizes[1] = k_;
55  } else {
56  sizes[0] = m_ * sizeof(T) / sizeof(int);
57  sizes[1] = k_ * sizeof(T) / sizeof(int);
58  }
61 #ifdef EIGEN_USE_SYCL // warmup for sycl
62  for (int iter = 0; iter < 10; ++iter) {
63  B.device(device_) = A.template cast<T>();
64  }
65 #endif
67  for (int iter = 0; iter < num_iters; ++iter) {
68  B.device(device_) = A.template cast<T>();
69  }
70  // Record the number of values copied per second
71  finalizeBenchmark(static_cast<int64_t>(m_) * k_ * num_iters);
72  }
73 
74  void random(int num_iters) {
75  eigen_assert(m_ == k_ && k_ == n_);
77  sizes[0] = m_;
78  sizes[1] = m_;
80 #ifdef EIGEN_USE_SYCL // warmup for sycl
81  for (int iter = 0; iter < 10; ++iter) {
82  C.device(device_) = C.random();
83  }
84 #endif
86  for (int iter = 0; iter < num_iters; ++iter) {
87  C.device(device_) = C.random();
88  }
89  // Record the number of random numbers generated per second
90  finalizeBenchmark(static_cast<int64_t>(m_) * m_ * num_iters);
91  }
92 
93  void slicing(int num_iters) {
94  eigen_assert(m_ == k_ && k_ == n_);
96  sizes[0] = m_;
97  sizes[1] = m_;
101 
102  const Eigen::DSizes<TensorIndex, 2> quarter_sizes(m_ / 2, m_ / 2);
103  const Eigen::DSizes<TensorIndex, 2> first_quadrant(0, 0);
104  const Eigen::DSizes<TensorIndex, 2> second_quadrant(0, m_ / 2);
105  const Eigen::DSizes<TensorIndex, 2> third_quadrant(m_ / 2, 0);
106  const Eigen::DSizes<TensorIndex, 2> fourth_quadrant(m_ / 2, m_ / 2);
107 #ifdef EIGEN_USE_SYCL // warmup for sycl
108  for (int iter = 0; iter < 10; ++iter) {
109  C.slice(first_quadrant, quarter_sizes).device(device_) = A.slice(first_quadrant, quarter_sizes);
110  C.slice(second_quadrant, quarter_sizes).device(device_) = B.slice(second_quadrant, quarter_sizes);
111  C.slice(third_quadrant, quarter_sizes).device(device_) = A.slice(third_quadrant, quarter_sizes);
112  C.slice(fourth_quadrant, quarter_sizes).device(device_) = B.slice(fourth_quadrant, quarter_sizes);
113  }
114 #endif
116  for (int iter = 0; iter < num_iters; ++iter) {
117  C.slice(first_quadrant, quarter_sizes).device(device_) = A.slice(first_quadrant, quarter_sizes);
118  C.slice(second_quadrant, quarter_sizes).device(device_) = B.slice(second_quadrant, quarter_sizes);
119  C.slice(third_quadrant, quarter_sizes).device(device_) = A.slice(third_quadrant, quarter_sizes);
120  C.slice(fourth_quadrant, quarter_sizes).device(device_) = B.slice(fourth_quadrant, quarter_sizes);
121  }
122  // Record the number of values copied from the rhs slice to the lhs slice
123  // each second
124  finalizeBenchmark(static_cast<int64_t>(m_) * m_ * num_iters);
125  }
126 
127  void rowChip(int num_iters) {
128  Eigen::array<TensorIndex, 2> input_size;
129  input_size[0] = k_;
130  input_size[1] = n_;
132  Eigen::array<TensorIndex, 1> output_size;
133  output_size[0] = n_;
135 #ifdef EIGEN_USE_SYCL // warmup for sycl
136  for (int iter = 0; iter < 10; ++iter) {
137  C.device(device_) = B.chip(iter % k_, 0);
138  }
139 #endif
141  for (int iter = 0; iter < num_iters; ++iter) {
142  C.device(device_) = B.chip(iter % k_, 0);
143  }
144  // Record the number of values copied from the rhs chip to the lhs.
145  finalizeBenchmark(static_cast<int64_t>(n_) * num_iters);
146  }
147 
148  void colChip(int num_iters) {
149  Eigen::array<TensorIndex, 2> input_size;
150  input_size[0] = k_;
151  input_size[1] = n_;
153  Eigen::array<TensorIndex, 1> output_size;
154  output_size[0] = n_;
156 #ifdef EIGEN_USE_SYCL // warmup for sycl
157  for (int iter = 0; iter < 10; ++iter) {
158  C.device(device_) = B.chip(iter % n_, 1);
159  }
160 #endif
162  for (int iter = 0; iter < num_iters; ++iter) {
163  C.device(device_) = B.chip(iter % n_, 1);
164  }
165  // Record the number of values copied from the rhs chip to the lhs.
166  finalizeBenchmark(static_cast<int64_t>(n_) * num_iters);
167  }
168 
169  void shuffling(int num_iters) {
170  eigen_assert(m_ == n_);
172  size_a[0] = m_;
173  size_a[1] = k_;
174  const TensorMap<Tensor<T, 2>, Eigen::Aligned> A(a_, size_a);
176  size_b[0] = k_;
177  size_b[1] = m_;
179 
181  shuffle[0] = 1;
182  shuffle[1] = 0;
183 #ifdef EIGEN_USE_SYCL // warmup for sycl
184  for (int iter = 0; iter < 10; ++iter) {
185  B.device(device_) = A.shuffle(shuffle);
186  }
187 #endif
189  for (int iter = 0; iter < num_iters; ++iter) {
190  B.device(device_) = A.shuffle(shuffle);
191  }
192  // Record the number of values shuffled from A and copied to B each second
193  finalizeBenchmark(static_cast<int64_t>(m_) * k_ * num_iters);
194  }
195 
196  void padding(int num_iters) {
197  eigen_assert(m_ == k_);
199  size_a[0] = m_;
200  size_a[1] = k_ - 3;
201  const TensorMap<Tensor<T, 2>, Eigen::Aligned> A(a_, size_a);
203  size_b[0] = k_;
204  size_b[1] = m_;
206 
208 #ifdef EIGEN_USE_SYCL // warmup for sycl
209  for (int iter = 0; iter < 10; ++iter) {
210  B.device(device_) = A.pad(paddings);
211  }
212 #endif
214  for (int iter = 0; iter < num_iters; ++iter) {
215  B.device(device_) = A.pad(paddings);
216  }
217  // Record the number of values copied from the padded tensor A each second
218  finalizeBenchmark(static_cast<int64_t>(m_) * k_ * num_iters);
219  }
220 
221  void striding(int num_iters) {
222  eigen_assert(m_ == k_);
224  size_a[0] = m_;
225  size_a[1] = k_;
226  const TensorMap<Tensor<T, 2>, Eigen::Aligned> A(a_, size_a);
228  size_b[0] = m_;
229  size_b[1] = k_ / 2;
231 
233 
234 #ifdef EIGEN_USE_SYCL // warmup for sycl
235  for (int iter = 0; iter < 10; ++iter) {
236  B.device(device_) = A.stride(strides);
237  }
238 #endif
240  for (int iter = 0; iter < num_iters; ++iter) {
241  B.device(device_) = A.stride(strides);
242  }
243  // Record the number of values copied from the padded tensor A each second
244  finalizeBenchmark(static_cast<int64_t>(m_) * k_ * num_iters);
245  }
246 
247  void broadcasting(int num_iters) {
249  size_a[0] = m_;
250  size_a[1] = 1;
251  const TensorMap<Tensor<T, 2>, Eigen::Aligned> A(a_, size_a);
253  size_c[0] = m_;
254  size_c[1] = n_;
257  broadcast.set(1, n_);
258 
259 #ifdef EIGEN_USE_SYCL // warmup for sycl
260  for (int iter = 0; iter < 10; ++iter) {
261  C.device(device_) = A.broadcast(broadcast);
262  }
263 #endif
265  for (int iter = 0; iter < num_iters; ++iter) {
266  C.device(device_) = A.broadcast(broadcast);
267  }
268  // Record the number of values broadcasted from A and copied to C each second
269  finalizeBenchmark(static_cast<int64_t>(m_) * n_ * num_iters);
270  }
271 
272  void coeffWiseOp(int num_iters) {
273  eigen_assert(m_ == k_ && k_ == n_);
275  sizes[0] = m_;
276  sizes[1] = m_;
280 #ifdef EIGEN_USE_SYCL // warmup for sycl
281  for (int iter = 0; iter < 10; ++iter) {
282  C.device(device_) = A * A.constant(static_cast<T>(3.14)) + B * B.constant(static_cast<T>(2.7));
283  }
284 #endif
286  for (int iter = 0; iter < num_iters; ++iter) {
287  C.device(device_) = A * A.constant(static_cast<T>(3.14)) + B * B.constant(static_cast<T>(2.7));
288  }
289  // Record the number of FLOP executed per second (2 multiplications and
290  // 1 addition per value)
291  finalizeBenchmark(static_cast<int64_t>(3) * m_ * m_ * num_iters);
292  }
293 
294  void algebraicFunc(int num_iters) {
295  eigen_assert(m_ == k_ && k_ == n_);
297  sizes[0] = m_;
298  sizes[1] = m_;
302 
303 #ifdef EIGEN_USE_SYCL // warmup for sycl
304  for (int iter = 0; iter < 10; ++iter) {
305  C.device(device_) = A.rsqrt() + B.sqrt() * B.square();
306  }
307 #endif
309  for (int iter = 0; iter < num_iters; ++iter) {
310  C.device(device_) = A.rsqrt() + B.sqrt() * B.square();
311  }
312  // Record the number of FLOP executed per second (assuming one operation
313  // per value)
314  finalizeBenchmark(static_cast<int64_t>(m_) * m_ * num_iters);
315  }
316 
317  void transcendentalFunc(int num_iters) {
318  eigen_assert(m_ == k_ && k_ == n_);
320  sizes[0] = m_;
321  sizes[1] = m_;
325 #ifdef EIGEN_USE_SYCL // warmup for sycl
326  for (int iter = 0; iter < 10; ++iter) {
327  C.device(device_) = A.exp() + B.log();
328  }
329 #endif
331  for (int iter = 0; iter < num_iters; ++iter) {
332  C.device(device_) = A.exp() + B.log();
333  }
334  // Record the number of FLOP executed per second (assuming one operation
335  // per value)
336  finalizeBenchmark(static_cast<int64_t>(m_) * m_ * num_iters);
337  }
338 
339  // Row reduction
340  void rowReduction(int num_iters) {
341  Eigen::array<TensorIndex, 2> input_size;
342  input_size[0] = k_;
343  input_size[1] = n_;
345  Eigen::array<TensorIndex, 1> output_size;
346  output_size[0] = n_;
349 #ifdef EIGEN_USE_SYCL // warmup for sycl
350  for (int iter = 0; iter < 10; ++iter) {
351  C.device(device_) = B.sum(sum_along_dim);
352  }
353 #endif
355  for (int iter = 0; iter < num_iters; ++iter) {
356  C.device(device_) = B.sum(sum_along_dim);
357  }
358  // Record the number of FLOP executed per second (assuming one operation
359  // per value)
360  finalizeBenchmark(static_cast<int64_t>(k_) * n_ * num_iters);
361  }
362 
363  // Column reduction
364  void colReduction(int num_iters) {
365  Eigen::array<TensorIndex, 2> input_size;
366  input_size[0] = k_;
367  input_size[1] = n_;
369  Eigen::array<TensorIndex, 1> output_size;
370  output_size[0] = k_;
372 
373 #ifndef EIGEN_HAS_INDEX_LIST
374  Eigen::array<TensorIndex, 1> sum_along_dim;
375  sum_along_dim[0] = 1;
376 #else
377  // Take advantage of cxx11 to give the compiler information it can use to
378  // optimize the code.
380 #endif
381 #ifdef EIGEN_USE_SYCL // warmup for sycl
382  for (int iter = 0; iter < 10; ++iter) {
383  A.device(device_) = B.sum(sum_along_dim);
384  }
385 #endif
387  for (int iter = 0; iter < num_iters; ++iter) {
388  A.device(device_) = B.sum(sum_along_dim);
389  }
390  // Record the number of FLOP executed per second (assuming one operation
391  // per value)
392  finalizeBenchmark(static_cast<int64_t>(k_) * n_ * num_iters);
393  }
394 
395  // Full reduction
396  void fullReduction(int num_iters) {
397  Eigen::array<TensorIndex, 2> input_size;
398  input_size[0] = k_;
399  input_size[1] = n_;
401  Eigen::array<TensorIndex, 0> output_size;
403 #ifdef EIGEN_USE_SYCL // warmup for sycl
404  for (int iter = 0; iter < 10; ++iter) {
405  C.device(device_) = B.sum();
406  }
407 #endif
409  for (int iter = 0; iter < num_iters; ++iter) {
410  C.device(device_) = B.sum();
411  }
412  // Record the number of FLOP executed per second (assuming one operation
413  // per value)
414  finalizeBenchmark(static_cast<int64_t>(k_) * n_ * num_iters);
415  }
416 
417  // do a contraction which is equivalent to a matrix multiplication
418  void contraction(int num_iters) { contraction<static_cast<int>(Eigen::ColMajor)>(num_iters, false, false); }
419 
420  void contractionRowMajor(int num_iters) { contraction<static_cast<int>(Eigen::RowMajor)>(num_iters, false, false); }
421 
422  void contractionRowMajorAT(int num_iters) { contraction<static_cast<int>(Eigen::RowMajor)>(num_iters, true, false); }
423 
424  void contractionRowMajorBT(int num_iters) { contraction<static_cast<int>(Eigen::RowMajor)>(num_iters, false, true); }
425 
426  void contractionRowMajorABT(int num_iters) { contraction<static_cast<int>(Eigen::RowMajor)>(num_iters, true, true); }
427 
428  void convolution(int num_iters, int kernel_x, int kernel_y) {
429  Eigen::array<TensorIndex, 2> input_sizes;
430  input_sizes[0] = m_;
431  input_sizes[1] = n_;
432  TensorMap<Tensor<T, 2>, Eigen::Aligned> A(a_, input_sizes);
433  Eigen::array<TensorIndex, 2> kernel_sizes;
434  kernel_sizes[0] = kernel_x;
435  kernel_sizes[1] = kernel_y;
436  TensorMap<Tensor<T, 2>, Eigen::Aligned> B(b_, kernel_sizes);
437  Eigen::array<TensorIndex, 2> result_sizes;
438  result_sizes[0] = m_ - kernel_x + 1;
439  result_sizes[1] = n_ - kernel_y + 1;
440  TensorMap<Tensor<T, 2>, Eigen::Aligned> C(c_, result_sizes);
442  dims[0] = 0;
443  dims[1] = 1;
444 #ifdef EIGEN_USE_SYCL // warmup for sycl
445  for (int iter = 0; iter < 10; ++iter) {
446  C.device(device_) = A.convolve(B, dims);
447  }
448 #endif
450  for (int iter = 0; iter < num_iters; ++iter) {
451  C.device(device_) = A.convolve(B, dims);
452  }
453  // Record the number of FLOPs executed per second (kernel_size
454  // multiplications and additions for each value in the resulting tensor)
455  finalizeBenchmark(static_cast<int64_t>(2) * (m_ - kernel_x + 1) * (n_ - kernel_y + 1) * kernel_x * kernel_y *
456  num_iters);
457  }
458 
459  private:
460  // do a contraction which is equivalent to a matrix multiplication
461  template <int Layout>
462  void contraction(int num_iters, bool trans_a, bool trans_b) {
464  sizeA[0] = (trans_a ? k_ : m_);
465  sizeA[1] = (trans_a ? m_ : k_);
467  sizeB[0] = (trans_b ? n_ : k_);
468  sizeB[1] = (trans_b ? k_ : n_);
470  sizeC[0] = m_;
471  sizeC[1] = n_;
472 
476 
479  TensorIndex a_contract_dim = (trans_a ? 0 : 1);
480  TensorIndex b_contract_dim = (trans_b ? 1 : 0);
481  dims[0] = DimPair(a_contract_dim, b_contract_dim);
482 #ifdef EIGEN_USE_SYCL // warmup for sycl
483  for (int iter = 0; iter < 10; ++iter) {
484  C.device(device_) = A.contract(B, dims);
485  }
486 #endif
488  for (int iter = 0; iter < num_iters; ++iter) {
489  C.device(device_) = A.contract(B, dims);
490  }
491  // Record the number of FLOP executed per second (size_ multiplications and
492  // additions for each value in the resulting tensor)
493  finalizeBenchmark(static_cast<int64_t>(2) * m_ * n_ * k_ * num_iters);
494  }
495 
496  void initialize() {
497  a_ = (T*)device_.allocate(m_ * k_ * sizeof(T));
498  b_ = (T*)device_.allocate(k_ * n_ * sizeof(T));
499  c_ = (T*)device_.allocate(m_ * n_ * sizeof(T));
500 
501  // Initialize the content of the memory pools to prevent asan from
502  // complaining.
503  device_.fill(a_, a_ + m_ * k_, T(12));
504  device_.fill(b_, b_ + k_ * n_, T(23));
505  device_.fill(c_, c_ + m_ * n_, T(31));
506  }
507 
508  inline void finalizeBenchmark(int64_t num_items) {
509 #if defined(EIGEN_USE_GPU) && defined(__CUDACC__)
511  device_.synchronize();
512  }
513 #elif defined(EIGEN_USE_SYCL)
515  device_.synchronize();
516  }
517 
518 #endif
520  SetBenchmarkFlopsProcessed(num_items);
521  }
522 
526  T* a_;
527  T* b_;
528  T* c_;
529  Device device_;
530 };
531 #endif // THIRD_PARTY_EIGEN3_TENSOR_BENCHMARKS_H_
const unsigned n
Definition: CG3DPackingUnitTest.cpp:11
Eigen::Triplet< double > T
Definition: EigenUnitTest.cpp:11
#define eigen_assert(x)
Definition: Macros.h:910
Matrix< SCALARA, Dynamic, Dynamic, opt_A > A
Definition: bench_gemm.cpp:47
Matrix< Scalar, Dynamic, Dynamic > C
Definition: bench_gemm.cpp:49
Matrix< SCALARB, Dynamic, Dynamic, opt_B > B
Definition: bench_gemm.cpp:48
void SetBenchmarkFlopsProcessed(int64_t)
Definition: benchmark_main.cc:189
void StopBenchmarkTiming()
Definition: benchmark_main.cc:190
void StartBenchmarkTiming()
Definition: benchmark_main.cc:196
Definition: tensor_benchmarks.h:18
void padding(int num_iters)
Definition: tensor_benchmarks.h:196
BenchmarkSuite(const Device &device, size_t m, size_t k)
Definition: tensor_benchmarks.h:26
void typeCasting(int num_iters)
Definition: tensor_benchmarks.h:49
T * c_
Definition: tensor_benchmarks.h:528
void colReduction(int num_iters)
Definition: tensor_benchmarks.h:364
void rowChip(int num_iters)
Definition: tensor_benchmarks.h:127
void contractionRowMajorABT(int num_iters)
Definition: tensor_benchmarks.h:426
~BenchmarkSuite()
Definition: tensor_benchmarks.h:28
void contraction(int num_iters, bool trans_a, bool trans_b)
Definition: tensor_benchmarks.h:462
void striding(int num_iters)
Definition: tensor_benchmarks.h:221
void slicing(int num_iters)
Definition: tensor_benchmarks.h:93
void coeffWiseOp(int num_iters)
Definition: tensor_benchmarks.h:272
void contraction(int num_iters)
Definition: tensor_benchmarks.h:418
void rowReduction(int num_iters)
Definition: tensor_benchmarks.h:340
void initialize()
Definition: tensor_benchmarks.h:496
void transcendentalFunc(int num_iters)
Definition: tensor_benchmarks.h:317
Device device_
Definition: tensor_benchmarks.h:529
BenchmarkSuite(const Device &device, size_t m, size_t k, size_t n)
Definition: tensor_benchmarks.h:20
void broadcasting(int num_iters)
Definition: tensor_benchmarks.h:247
T * b_
Definition: tensor_benchmarks.h:527
void memcpy(int num_iters)
Definition: tensor_benchmarks.h:34
void fullReduction(int num_iters)
Definition: tensor_benchmarks.h:396
void algebraicFunc(int num_iters)
Definition: tensor_benchmarks.h:294
void contractionRowMajor(int num_iters)
Definition: tensor_benchmarks.h:420
void contractionRowMajorAT(int num_iters)
Definition: tensor_benchmarks.h:422
T * a_
Definition: tensor_benchmarks.h:526
BenchmarkSuite(const Device &device, size_t m)
Definition: tensor_benchmarks.h:24
void contractionRowMajorBT(int num_iters)
Definition: tensor_benchmarks.h:424
TensorIndex k_
Definition: tensor_benchmarks.h:524
void shuffling(int num_iters)
Definition: tensor_benchmarks.h:169
void random(int num_iters)
Definition: tensor_benchmarks.h:74
void finalizeBenchmark(int64_t num_items)
Definition: tensor_benchmarks.h:508
TensorIndex m_
Definition: tensor_benchmarks.h:523
void colChip(int num_iters)
Definition: tensor_benchmarks.h:148
void convolution(int num_iters, int kernel_x, int kernel_y)
Definition: tensor_benchmarks.h:428
TensorIndex n_
Definition: tensor_benchmarks.h:525
The matrix class, also used for vectors and row-vectors.
Definition: Eigen/Eigen/src/Core/Matrix.h:186
A tensor expression mapping an existing array of data.
Definition: TensorMap.h:33
The tensor class.
Definition: Tensor.h:68
Definition: matrices.h:74
Tensor< float, 1 >::DimensionPair DimPair
Definition: cxx11_tensor_contraction.cpp:17
std::vector< Array2i > sizes
Definition: dense_solvers.cpp:12
@ Aligned
Definition: Constants.h:242
@ ColMajor
Definition: Constants.h:318
@ RowMajor
Definition: Constants.h:320
int * m
Definition: level2_cplx_impl.h:294
char char char int int * k
Definition: level2_impl.h:374
EIGEN_STRONG_INLINE Packet2d shuffle(const Packet2d &m, const Packet2d &n, int mask)
Definition: LSX/PacketMath.h:150
EIGEN_ALWAYS_INLINE DSizes< IndexType, NumDims > strides(const DSizes< IndexType, NumDims > &dimensions)
Definition: TensorBlock.h:29
std::int64_t int64_t
Definition: Meta.h:43
std::array< T, N > array
Definition: EmulateArray.h:231
Definition: TensorDimensions.h:161
Definition: TensorIndexList.h:271
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC void set(const Index i, const Index value)
Definition: TensorIndexList.h:280
Definition: TensorIndexList.h:325
Definition: Meta.h:205
Definition: TensorIndexList.h:39
Definition: TensorIndexList.h:48
int TensorIndex
Definition: tensor_benchmarks.h:4