TensorContraction.h
Go to the documentation of this file.
1 // This file is part of Eigen, a lightweight C++ template library
2 // for linear algebra.
3 //
4 // Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
5 //
6 // This Source Code Form is subject to the terms of the Mozilla
7 // Public License v. 2.0. If a copy of the MPL was not distributed
8 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9 
10 #ifndef EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_H
11 #define EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_H
12 
13 // IWYU pragma: private
14 #include "./InternalHeaderCheck.h"
15 
16 namespace Eigen {
17 
25 namespace internal {
26 
27 template <typename Dimensions, typename LhsXprType, typename RhsXprType, typename OutputKernelType>
28 struct traits<TensorContractionOp<Dimensions, LhsXprType, RhsXprType, OutputKernelType>> {
29  // Type promotion to handle the case where the types of the lhs and the rhs are different.
31  std::remove_const_t<typename RhsXprType::Scalar>>::ResScalar Scalar;
32 
35  typedef
37  typedef typename LhsXprType::Nested LhsNested;
38  typedef typename RhsXprType::Nested RhsNested;
39  typedef std::remove_reference_t<LhsNested> LhsNested_;
40  typedef std::remove_reference_t<RhsNested> RhsNested_;
41 
42  // From NumDims below.
43  static constexpr int NumDimensions =
45  static constexpr int Layout = traits<LhsXprType>::Layout;
49 
50  enum { Flags = 0 };
51 };
52 
53 template <typename Dimensions, typename LhsXprType, typename RhsXprType, typename OutputKernelType>
54 struct eval<TensorContractionOp<Dimensions, LhsXprType, RhsXprType, OutputKernelType>, Eigen::Dense> {
56 };
57 
58 template <typename Dimensions, typename LhsXprType, typename RhsXprType, typename OutputKernelType>
59 struct nested<TensorContractionOp<Dimensions, LhsXprType, RhsXprType, OutputKernelType>, 1,
60  typename eval<TensorContractionOp<Dimensions, LhsXprType, RhsXprType, OutputKernelType>>::type> {
62 };
63 
64 template <typename Indices_, typename LeftArgType_, typename RightArgType_, typename OutputKernelType_,
65  typename Device_>
66 struct traits<
67  TensorEvaluator<const TensorContractionOp<Indices_, LeftArgType_, RightArgType_, OutputKernelType_>, Device_>> {
68  typedef Indices_ Indices;
69  typedef LeftArgType_ LeftArgType;
70  typedef RightArgType_ RightArgType;
71  typedef OutputKernelType_ OutputKernelType;
72  typedef Device_ Device;
73 
74  // From NumDims below.
75  static constexpr int NumDimensions =
77 };
78 
79 // Helper class to allocate and deallocate temporary memory for packed buffers.
80 template <typename LhsScalar, typename RhsScalar>
82  typedef void* BlockMemHandle;
83 
84  template <typename Device>
85  EIGEN_DEVICE_FUNC static BlockMemHandle allocate(Device& d, const Index bm, const Index bk, const Index bn,
86  LhsScalar** lhs_block, RhsScalar** rhs_block) {
87  eigen_assert(lhs_block);
88  eigen_assert(rhs_block);
89  BlockSizes sz = ComputeLhsRhsBlockSizes(bm, bk, bn);
90  char* block_mem = static_cast<char*>(d.allocate(sz.lhs_size + sz.rhs_size));
91  *lhs_block = static_cast<LhsScalar*>(static_cast<void*>(block_mem));
92  *rhs_block = static_cast<RhsScalar*>(static_cast<void*>(block_mem + sz.lhs_size));
93  return block_mem;
94  }
95 
96  template <typename Device>
97  EIGEN_DEVICE_FUNC static BlockMemHandle allocateSlices(Device& d, const Index bm, const Index bk, const Index bn,
98  const Index num_lhs, const Index num_rhs,
99  const Index num_slices, std::vector<LhsScalar*>* lhs_blocks,
100  std::vector<RhsScalar*>* rhs_blocks) {
101  eigen_assert(num_slices > 0);
102  eigen_assert(num_lhs >= 0 && num_rhs >= 0);
103  eigen_assert(num_lhs == 0 || lhs_blocks);
104  eigen_assert(num_rhs == 0 || rhs_blocks);
105  BlockSizes sz = ComputeLhsRhsBlockSizes(bm, bk, bn);
106  void* block_mem = d.allocate((num_lhs * sz.lhs_size + num_rhs * sz.rhs_size) * num_slices);
107  eigen_assert(block_mem);
108  char* mem = static_cast<char*>(block_mem);
109 
110  for (Index x = 0; x < num_slices; x++) {
111  if (num_lhs > 0) lhs_blocks[x].resize(num_lhs);
112  for (Index m = 0; m < num_lhs; m++) {
113  lhs_blocks[x][m] = static_cast<LhsScalar*>(static_cast<void*>(mem));
114  mem += sz.lhs_size;
115  }
116  if (num_rhs > 0) rhs_blocks[x].resize(num_rhs);
117  for (Index n = 0; n < num_rhs; n++) {
118  rhs_blocks[x][n] = static_cast<RhsScalar*>(static_cast<void*>(mem));
119  mem += sz.rhs_size;
120  }
121  }
122 
123  return block_mem;
124  }
125 
126  template <typename Device>
127  EIGEN_DEVICE_FUNC static void deallocate(Device& d, BlockMemHandle handle) {
128  d.deallocate(handle);
129  }
130 
131  private:
132  struct BlockSizes {
135  };
136  EIGEN_DEVICE_FUNC static BlockSizes ComputeLhsRhsBlockSizes(const Index bm, const Index bk, const Index bn) {
138  BlockSizes sz;
139  sz.lhs_size = numext::div_ceil<Index>(bm * bk * sizeof(LhsScalar), align) * align;
140  sz.rhs_size = numext::div_ceil<Index>(bn * bk * sizeof(RhsScalar), align) * align;
141  return sz;
142  }
143 };
144 
145 // WARNING: In this code we assume that Lhs and Rhs tensor expressions are in
146 // ColMajor storage order. This property is guaranteed by the
147 // TensorContractionOp evaluator. TensorContractionKernel specifies how we pack
148 // blocks of Lhs and Rhs tensor expressions, and how we invoke matrix
149 // multiplication for these blocks. Default tensor contraction uses
150 // gemm_pack_rhs, gemm_pack_lhs and gebp_kernel from Eigen Core (see
151 // GeneralBlocPanelKernel.h for details).
152 //
153 // By specializing contraction kernels we can use other low level libraries to
154 // perform matrix multiplication, and still rely on Eigen contraction evaluator.
155 // This also includes full support in TensorContractionThreadPool, assuming that
156 // underlying gemm do not use it's own threading.
157 //
158 // - ResScalar/LhsScalar/RhsScalar - scalar type for the result of
159 // multiplication, lhs tensor and rhs tensor respectively.
160 //
161 // - StorageIndex - index type for the tensor expressions. In practice almost
162 // always is Eigen::Index.
163 //
164 // - OutputMapper provides access to the memory of the output matrix. In
165 // practice it's always column major blas_data_mapper (it must be of ResScalar
166 // type).
167 //
168 // - LhsMapper/RhsMapper similarly to blas_data_mapper provide a two dimensional
169 // view into the Lhs/Rhs tensor expressions. In practice it's
170 // TensorContractionInputMapper, or some specialization of it based on the
171 // type of tensor expression (e.g. TensorImagePatchOp has optimized input
172 // mapper).
173 template <typename ResScalar, typename LhsScalar, typename RhsScalar, typename StorageIndex, typename OutputMapper,
174  typename LhsMapper, typename RhsMapper>
176  // True if `invoke()` supports `beta` in `C <- alpha * A * B + beta * C`
177  // (otherwise beta should be always equal to 1).
178  enum { HasBeta = false };
179 
180  EIGEN_DEVICE_FUNC TensorContractionKernel(StorageIndex m_, StorageIndex k_, StorageIndex n_, StorageIndex bm_,
181  StorageIndex bk_, StorageIndex bn_)
182  : m(m_), k(k_), n(n_), bm(bm_), bk(bk_), bn(bn_) {}
183 
184  // Pack blocks of Lhs and Rhs into contiguous blocks in memory.
185  typedef LhsScalar* LhsBlock;
186  typedef RhsScalar* RhsBlock;
187 
188  // Packed Lhs/Rhs block memory allocator.
191 
193 
194  typedef internal::gemm_pack_lhs<LhsScalar, StorageIndex, typename LhsMapper::SubMapper, Traits::mr,
197 
200 
201  typedef internal::gebp_kernel<LhsScalar, RhsScalar, StorageIndex, OutputMapper, Traits::mr, Traits::nr,
202  /*ConjugateLhs*/ false, /*ConjugateRhs*/ false>
204 
205  template <typename Device>
206  EIGEN_DEVICE_FUNC BlockMemHandle allocate(Device& d, LhsBlock* lhs_block, RhsBlock* rhs_block) {
207  return BlockMemAllocator::allocate(d, bm, bk, bn, lhs_block, rhs_block);
208  }
209 
210  template <typename Device>
211  EIGEN_DEVICE_FUNC BlockMemHandle allocateSlices(Device& d, const StorageIndex num_lhs, const StorageIndex num_rhs,
212  const StorageIndex num_slices, std::vector<LhsBlock>* lhs_blocks,
213  std::vector<RhsBlock>* rhs_blocks) {
214  return BlockMemAllocator::allocateSlices(d, bm, bk, bn, num_lhs, num_rhs, num_slices, lhs_blocks, rhs_blocks);
215  }
216 
217  template <typename Device>
218  EIGEN_DEVICE_FUNC static void deallocate(Device& d, BlockMemHandle handle) {
220  }
221 
222  EIGEN_DEVICE_FUNC EIGEN_DONT_INLINE void packLhs(LhsBlock* lhsBlock, const typename LhsMapper::SubMapper& data_mapper,
223  const StorageIndex depth, const StorageIndex rows) {
224  LhsPacker()(*lhsBlock, data_mapper, depth, rows, /*stride*/ 0,
225  /*offset*/ 0);
226  }
227 
228  EIGEN_DEVICE_FUNC EIGEN_DONT_INLINE void packRhs(RhsBlock* rhsBlock, const typename RhsMapper::SubMapper& data_mapper,
229  const StorageIndex depth, const StorageIndex cols) {
230  RhsPacker()(*rhsBlock, data_mapper, depth, cols);
231  }
232 
233  EIGEN_DEVICE_FUNC EIGEN_DONT_INLINE void invoke(const OutputMapper& output_mapper, const LhsBlock& lhsBlock,
234  const RhsBlock& rhsBlock, const StorageIndex rows,
235  const StorageIndex depth, const StorageIndex cols,
236  const ResScalar alpha, const ResScalar beta) {
237  // Default GEBP kernel does not support beta.
238  eigen_assert(beta == ResScalar(1));
239  static const int kComputeStrideFromBlockDimensions = -1;
240  GebpKernel()(output_mapper, lhsBlock, rhsBlock, rows, depth, cols, alpha,
241  /*strideA*/ kComputeStrideFromBlockDimensions,
242  /*strideB*/ kComputeStrideFromBlockDimensions,
243  /*offsetA*/ 0, /*offsetB*/ 0);
244  }
245 
246  private:
247  // These are dimensions of the original Tensors, and selected block sizes. The
248  // actual block sizes passed to all function above might be smaller because of
249  // the partial blocks at the end.
250  const StorageIndex m;
251  const StorageIndex k;
252  const StorageIndex n;
253  const StorageIndex bm;
254  const StorageIndex bk;
255  const StorageIndex bn;
256 };
257 
258 } // end namespace internal
259 
260 // Tensor contraction params that should enable to get from output matrix
261 // 2-dimensional coordinates to the output tensor dimensions.
263  // TensorContraction evaluator assumes that both tensors are in ColMajor
264  // layout, if tensors are in RowMajor evaluator swap lhs with rhs.
266 };
267 
268 // Output kernel allows to fuse operations into the tensor contraction.
269 //
270 // Examples:
271 // 1. Elementwise Relu transformation following Conv2D.
272 // 2. AddBias to the Conv2D output channels dimension.
273 //
274 // The NoOpOutputKernel implements an output kernel that does absolutely nothing.
291  template <typename Index, typename Scalar>
293  const TensorContractionParams& params, Index i, Index j, Index num_rows,
294  Index num_cols) const {
295  EIGEN_UNUSED_VARIABLE(output_mapper);
299  EIGEN_UNUSED_VARIABLE(num_rows);
300  EIGEN_UNUSED_VARIABLE(num_cols);
301  }
302 };
303 
304 template <typename Indices, typename LhsXprType, typename RhsXprType,
305  typename OutputKernelType = const NoOpOutputKernel>
307  : public TensorBase<TensorContractionOp<Indices, LhsXprType, RhsXprType, OutputKernelType>, ReadOnlyAccessors> {
308  public:
310  typedef typename internal::gebp_traits<typename LhsXprType::CoeffReturnType,
311  typename RhsXprType::CoeffReturnType>::ResScalar CoeffReturnType;
315 
316  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorContractionOp(const LhsXprType& lhs, const RhsXprType& rhs,
317  const Indices& dims,
318  const OutputKernelType& output_kernel = OutputKernelType())
319  : m_lhs_xpr(lhs), m_rhs_xpr(rhs), m_indices(dims), m_output_kernel(output_kernel) {}
320 
321  EIGEN_DEVICE_FUNC const Indices& indices() const { return m_indices; }
322 
325  return m_lhs_xpr;
326  }
327 
329  return m_rhs_xpr;
330  }
331 
332  EIGEN_DEVICE_FUNC const OutputKernelType& outputKernel() const { return m_output_kernel; }
333 
334  protected:
335  typename LhsXprType::Nested m_lhs_xpr;
336  typename RhsXprType::Nested m_rhs_xpr;
337  const Indices m_indices;
338  const OutputKernelType m_output_kernel;
339 };
340 
341 template <typename Derived>
348 
350  typedef std::remove_const_t<typename XprType::Scalar> Scalar;
351  typedef typename XprType::Index Index;
356 
358  enum {
359  IsAligned = true,
361  BlockAccess = false,
363  CoordAccess = false, // to be implemented
364  RawAccess = true
365  };
366 
367  //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===//
369  //===--------------------------------------------------------------------===//
370 
371  // Most of the code is assuming that both input tensors are ColMajor. If the
372  // inputs are RowMajor, we will "cheat" by swapping the LHS and RHS:
373  // If we want to compute A * B = C, where A is LHS and B is RHS, the code
374  // will pretend B is LHS and A is RHS.
375  typedef std::conditional_t<static_cast<int>(Layout) == static_cast<int>(ColMajor), LeftArgType, RightArgType>
377  typedef std::conditional_t<static_cast<int>(Layout) == static_cast<int>(ColMajor), RightArgType, LeftArgType>
379 
382 
383  static constexpr int LDims =
385  static constexpr int RDims =
388  static constexpr int NumDims = LDims + RDims - 2 * ContractDims;
389 
393 
395 
397  : m_leftImpl(choose(Cond<static_cast<int>(Layout) == static_cast<int>(ColMajor)>(), op.lhsExpression(),
398  op.rhsExpression()),
399  device),
400  m_rightImpl(choose(Cond<static_cast<int>(Layout) == static_cast<int>(ColMajor)>(), op.rhsExpression(),
401  op.lhsExpression()),
402  device),
403  m_device(device),
404  m_output_kernel(op.outputKernel()),
405  m_result(NULL) {
408  YOU_MADE_A_PROGRAMMING_MISTAKE);
409 
410  DSizes<Index, LDims> eval_left_dims;
411  DSizes<Index, RDims> eval_right_dims;
412  array<IndexPair<Index>, ContractDims> eval_op_indices;
413  if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
414  // For ColMajor, we keep using the existing dimensions
415  for (int i = 0; i < LDims; i++) {
416  eval_left_dims[i] = m_leftImpl.dimensions()[i];
417  }
418  for (int i = 0; i < RDims; i++) {
419  eval_right_dims[i] = m_rightImpl.dimensions()[i];
420  }
421  // We keep the pairs of contracting indices.
422  for (int i = 0; i < ContractDims; i++) {
423  eval_op_indices[i].first = op.indices()[i].first;
424  eval_op_indices[i].second = op.indices()[i].second;
425  }
426  } else {
427  // For RowMajor, we need to reverse the existing dimensions
428  for (int i = 0; i < LDims; i++) {
429  eval_left_dims[i] = m_leftImpl.dimensions()[LDims - i - 1];
430  }
431  for (int i = 0; i < RDims; i++) {
432  eval_right_dims[i] = m_rightImpl.dimensions()[RDims - i - 1];
433  }
434  // We need to flip all the pairs of contracting indices as well as
435  // reversing the dimensions.
436  for (int i = 0; i < ContractDims; i++) {
437  eval_op_indices[i].first = LDims - 1 - op.indices()[ContractDims - 1 - i].second;
438  eval_op_indices[i].second = RDims - 1 - op.indices()[ContractDims - 1 - i].first;
439  }
440  }
441 
442  // Check for duplicate axes and make sure the first index in eval_op_indices
443  // is increasing. Using O(n^2) sorting is OK since ContractDims is small
444  for (int i = 0; i < ContractDims; i++) {
445  for (int j = i + 1; j < ContractDims; j++) {
446  eigen_assert(eval_op_indices[j].first != eval_op_indices[i].first &&
447  eval_op_indices[j].second != eval_op_indices[i].second && "contraction axes should be unique");
448  if (eval_op_indices[j].first < eval_op_indices[i].first) {
449  numext::swap(eval_op_indices[j], eval_op_indices[i]);
450  }
451  }
452  }
453 
454  array<Index, LDims> lhs_strides;
455  lhs_strides[0] = 1;
456  for (int i = 0; i < LDims - 1; ++i) {
457  lhs_strides[i + 1] = lhs_strides[i] * eval_left_dims[i];
458  }
459 
460  array<Index, RDims> rhs_strides;
461  rhs_strides[0] = 1;
462  for (int i = 0; i < RDims - 1; ++i) {
463  rhs_strides[i + 1] = rhs_strides[i] * eval_right_dims[i];
464  }
465 
466  if (m_i_strides.size() > 0) m_i_strides[0] = 1;
467  if (m_j_strides.size() > 0) m_j_strides[0] = 1;
468  if (m_k_strides.size() > 0) m_k_strides[0] = 1;
469 
470  m_i_size = 1;
471  m_j_size = 1;
472  m_k_size = 1;
473 
474  // To compute the dimension, we simply concatenate the non-contracting
475  // dimensions of the left and then the right tensor. Additionally, we also
476  // compute the strides corresponding to the left non-contracting
477  // dimensions and right non-contracting dimensions.
479  int dim_idx = 0;
480  Index nocontract_idx = 0;
481 
482  for (int i = 0; i < LDims; i++) {
483  // find if we are contracting on index i of left tensor
484  bool contracting = false;
485  for (int j = 0; j < ContractDims; j++) {
486  if (eval_op_indices[j].first == i) {
487  contracting = true;
488  break;
489  }
490  }
491  if (!contracting) {
492  // add dimension size to output dimensions
493  m_dimensions[dim_idx] = eval_left_dims[i];
494  m_left_nocontract_strides[nocontract_idx] = lhs_strides[i];
495  if (dim_idx != i) {
497  }
498  if (nocontract_idx + 1 < internal::array_size<left_nocontract_t>::value) {
499  m_i_strides[nocontract_idx + 1] = m_i_strides[nocontract_idx] * eval_left_dims[i];
500  } else {
501  m_i_size = m_i_strides[nocontract_idx] * eval_left_dims[i];
502  }
503  dim_idx++;
504  nocontract_idx++;
505  }
506  }
507 
508  nocontract_idx = 0;
509  for (int i = 0; i < RDims; i++) {
510  bool contracting = false;
511  // find if we are contracting on index i of right tensor
512  for (int j = 0; j < ContractDims; j++) {
513  if (eval_op_indices[j].second == i) {
514  contracting = true;
515  break;
516  }
517  }
518  if (!contracting) {
519  m_dimensions[dim_idx] = eval_right_dims[i];
520  if (nocontract_idx + 1 < internal::array_size<right_nocontract_t>::value) {
521  m_j_strides[nocontract_idx + 1] = m_j_strides[nocontract_idx] * eval_right_dims[i];
522  } else {
523  m_j_size = m_j_strides[nocontract_idx] * eval_right_dims[i];
524  }
525  m_right_nocontract_strides[nocontract_idx] = rhs_strides[i];
526  dim_idx++;
527  nocontract_idx++;
528  }
529  }
530 
531  // Now compute the strides corresponding to the contracting dimensions. We
532  // assumed above that non-contracting axes are represented in the same order
533  // in the matrix as they are in the tensor. This is not the case for
534  // contracting axes. As the contracting axes must be of the same size in
535  // each tensor, we'll only look at the first tensor here.
538  for (int i = 0; i < ContractDims; i++) {
539  Index left = eval_op_indices[i].first;
540  Index right = eval_op_indices[i].second;
541 
542  Index size = eval_left_dims[left];
543  eigen_assert(size == eval_right_dims[right] && "Contraction axes must be same size");
544 
545  if (i + 1 < static_cast<int>(internal::array_size<contract_t>::value)) {
546  m_k_strides[i + 1] = m_k_strides[i] * size;
547  } else {
548  m_k_size = m_k_strides[i] * size;
549  }
550  m_left_contracting_strides[i] = lhs_strides[left];
551  m_right_contracting_strides[i] = rhs_strides[right];
552 
553  if (i > 0 && right < eval_op_indices[i - 1].second) {
555  }
556  if (right != i) {
558  }
559  }
560 
561  // If the layout is RowMajor, we need to reverse the m_dimensions
562  if (static_cast<int>(Layout) == static_cast<int>(RowMajor)) {
563  for (int i = 0, j = NumDims - 1; i < j; i++, j--) {
565  }
566  }
567 
568  // A set of parameters that will allow output kernel to get from output
569  // tensor dimensions (i, j) into the original tensor dimensions.
570  // TODO(ezhulenev): Add parameters required to infer output tensor index for
571  // more complex contractions than 2x2 on internal dimension.
573  }
574 
576 
580  if (data) {
581  evalTo(data);
582  return false;
583  } else {
584  m_result = static_cast<EvaluatorPointerType>(m_device.allocate(dimensions().TotalSize() * sizeof(Scalar)));
585  evalTo(m_result);
586  return true;
587  }
588  }
589 
590 #ifdef EIGEN_USE_THREADS
591  template <typename EvalSubExprsCallback>
592  EIGEN_STRONG_INLINE void evalSubExprsIfNeededAsync(EvaluatorPointerType dest, EvalSubExprsCallback done) {
593  m_leftImpl.evalSubExprsIfNeededAsync(nullptr, [this, done, dest](bool) {
594  m_rightImpl.evalSubExprsIfNeededAsync(nullptr, [this, done, dest](bool) {
595  if (dest) {
596  evalToAsync(dest, [done]() { done(false); });
597  } else {
598  m_result = static_cast<EvaluatorPointerType>(m_device.allocate(dimensions().TotalSize() * sizeof(Scalar)));
599  evalToAsync(m_result, [done]() { done(true); });
600  }
601  });
602  });
603  }
604 #endif // EIGEN_USE_THREADS
605 
606 #ifndef TENSOR_CONTRACTION_DISPATCH
607 #define TENSOR_CONTRACTION_DISPATCH(METHOD, ALIGNMENT, ARGS) \
608  if (this->m_lhs_inner_dim_contiguous) { \
609  if (this->m_rhs_inner_dim_contiguous) { \
610  if (this->m_rhs_inner_dim_reordered) { \
611  METHOD<true, true, true, ALIGNMENT> ARGS; \
612  } else { \
613  METHOD<true, true, false, ALIGNMENT> ARGS; \
614  } \
615  } else { \
616  if (this->m_rhs_inner_dim_reordered) { \
617  METHOD<true, false, true, ALIGNMENT> ARGS; \
618  } else { \
619  METHOD<true, false, false, ALIGNMENT> ARGS; \
620  } \
621  } \
622  } else { \
623  if (this->m_rhs_inner_dim_contiguous) { \
624  if (this->m_rhs_inner_dim_reordered) { \
625  METHOD<false, true, true, ALIGNMENT> ARGS; \
626  } else { \
627  METHOD<false, true, false, ALIGNMENT> ARGS; \
628  } \
629  } else { \
630  if (this->m_rhs_inner_dim_reordered) { \
631  METHOD<false, false, true, ALIGNMENT> ARGS; \
632  } else { \
633  METHOD<false, false, false, ALIGNMENT> ARGS; \
634  } \
635  } \
636  }
637 #endif
638 
639 #ifndef TENSOR_CONTRACTION_ASYNC_DISPATCH
640 #define TENSOR_CONTRACTION_ASYNC_DISPATCH(METHOD, DONE, ALIGNMENT, ARGS, FN) \
641  if (this->m_lhs_inner_dim_contiguous) { \
642  if (this->m_rhs_inner_dim_contiguous) { \
643  if (this->m_rhs_inner_dim_reordered) { \
644  (new METHOD<DONE, true, true, true, ALIGNMENT> ARGS)->FN; \
645  } else { \
646  (new METHOD<DONE, true, true, false, ALIGNMENT> ARGS)->FN; \
647  } \
648  } else { \
649  if (this->m_rhs_inner_dim_reordered) { \
650  (new METHOD<DONE, true, false, true, ALIGNMENT> ARGS)->FN; \
651  } else { \
652  (new METHOD<DONE, true, false, false, ALIGNMENT> ARGS)->FN; \
653  } \
654  } \
655  } else { \
656  if (this->m_rhs_inner_dim_contiguous) { \
657  if (this->m_rhs_inner_dim_reordered) { \
658  (new METHOD<DONE, false, true, true, ALIGNMENT> ARGS)->FN; \
659  } else { \
660  (new METHOD<DONE, false, true, false, ALIGNMENT> ARGS)->FN; \
661  } \
662  } else { \
663  if (this->m_rhs_inner_dim_reordered) { \
664  (new METHOD<DONE, false, false, true, ALIGNMENT> ARGS)->FN; \
665  } else { \
666  (new METHOD<DONE, false, false, false, ALIGNMENT> ARGS)->FN; \
667  } \
668  } \
669  }
670 #endif
671 
672  EIGEN_DEVICE_FUNC void evalTo(Scalar* buffer) const {
673  static_cast<const Derived*>(this)->template evalProduct<Unaligned>(buffer);
674  }
675 
676 #ifdef EIGEN_USE_THREADS
677  template <typename EvalToCallback>
678  void evalToAsync(Scalar* buffer, EvalToCallback done) const {
679  static_cast<const Derived*>(this)->template evalProductAsync<EvalToCallback, Unaligned>(buffer, std::move(done));
680  }
681 #endif // EIGEN_USE_THREADS
682 
683  template <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment>
684  void evalProductSequential(Scalar* buffer) const {
685  if (this->m_j_size == 1) {
686  this->template evalGemv<lhs_inner_dim_contiguous, rhs_inner_dim_contiguous, rhs_inner_dim_reordered, Alignment>(
687  buffer);
688  } else {
689  this->template evalGemm<lhs_inner_dim_contiguous, rhs_inner_dim_contiguous, rhs_inner_dim_reordered, Alignment>(
690  buffer);
691  }
692  }
693 
694  template <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment>
695 #if !defined(EIGEN_HIPCC)
697 #endif
698  void
699  evalGemv(Scalar* buffer) const {
700  const Index rows = m_i_size;
701  const Index cols = m_k_size;
702 
703  typedef std::remove_const_t<typename EvalLeftArgType::Scalar> LhsScalar;
704  typedef std::remove_const_t<typename EvalRightArgType::Scalar> RhsScalar;
705  typedef TensorEvaluator<EvalLeftArgType, Device> LeftEvaluator;
706  typedef TensorEvaluator<EvalRightArgType, Device> RightEvaluator;
709  const int lhs_alignment = LeftEvaluator::IsAligned ? Aligned : Unaligned;
710  const int rhs_alignment = RightEvaluator::IsAligned ? Aligned : Unaligned;
712  contract_t, lhs_packet_size, lhs_inner_dim_contiguous, false,
713  lhs_alignment>
714  LhsMapper;
715 
717  contract_t, rhs_packet_size, rhs_inner_dim_contiguous,
718  rhs_inner_dim_reordered, rhs_alignment>
719  RhsMapper;
720 
723 
724  const Scalar alpha(1);
725  const Index resIncr(1);
726 
727  // zero out the result buffer (which must be of size at least rows * sizeof(Scalar)
728  m_device.fill(buffer, buffer + rows, Scalar(0));
729 
730  internal::general_matrix_vector_product<Index, LhsScalar, LhsMapper, ColMajor, false, RhsScalar, RhsMapper,
731  false>::run(rows, cols, lhs, rhs, buffer, resIncr, alpha);
732 
734  m_output_kernel(OutputMapper(buffer, rows), m_tensor_contraction_params, static_cast<Index>(0),
735  static_cast<Index>(0), rows, static_cast<Index>(1));
736  }
737 
738  template <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment>
739 #if !defined(EIGEN_HIPCC)
741 #endif
742  void
743  evalGemm(Scalar* buffer) const {
744  // columns in left side, rows in right side
745  const Index k = this->m_k_size;
746  this->template evalGemmPartial<lhs_inner_dim_contiguous, rhs_inner_dim_contiguous, rhs_inner_dim_reordered,
747  Alignment, true>(buffer, 0, k, 1);
748  }
749 
750  template <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment>
752  int num_threads) const {
753  evalGemmPartial<lhs_inner_dim_contiguous, rhs_inner_dim_contiguous, rhs_inner_dim_reordered, Alignment,
754  /*use_output_kernel*/ false>(buffer, k_start, k_end, num_threads);
755  }
756 
757  template <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment,
758  bool use_output_kernel>
759  EIGEN_DEVICE_FUNC void evalGemmPartial(Scalar* buffer, Index k_start, Index k_end, int num_threads) const {
760  eigen_assert(k_end >= k_start && k_start >= 0 && k_end <= this->m_k_size);
761  // columns in slice on left side, rows on right side
762  const Index k_slice = k_end - k_start;
763 
764  // rows in left side
765  const Index m = this->m_i_size;
766 
767  // columns in right side
768  const Index n = this->m_j_size;
769 
770  // define data mappers for Lhs and Rhs
771  typedef std::remove_const_t<typename EvalLeftArgType::Scalar> LhsScalar;
772  typedef std::remove_const_t<typename EvalRightArgType::Scalar> RhsScalar;
773 
774  typedef TensorEvaluator<EvalLeftArgType, Device> LeftEvaluator;
775  typedef TensorEvaluator<EvalRightArgType, Device> RightEvaluator;
776 
779 
781  contract_t, lhs_packet_size, lhs_inner_dim_contiguous, false,
782  Unaligned>
783  LhsMapper;
784 
786  contract_t, rhs_packet_size, rhs_inner_dim_contiguous,
787  rhs_inner_dim_reordered, Unaligned>
788  RhsMapper;
789 
791 
793  TensorContractionKernel;
794 
795  // initialize data mappers
796  LhsMapper lhs(this->m_leftImpl, this->m_left_nocontract_strides, this->m_i_strides,
798 
799  RhsMapper rhs(this->m_rightImpl, this->m_right_nocontract_strides, this->m_j_strides,
801 
802  OutputMapper output(buffer, m);
803 
804  // Sizes of the blocks to load in cache. See the Goto paper for details.
806  k_slice, m, n, num_threads);
807  const Index kc = blocking.kc();
808  const Index mc = numext::mini(m, blocking.mc());
809  const Index nc = numext::mini(n, blocking.nc());
810 
811  typedef typename TensorContractionKernel::LhsBlock LhsBlock;
812  typedef typename TensorContractionKernel::RhsBlock RhsBlock;
813 
814  LhsBlock blockA;
815  RhsBlock blockB;
816 
817  TensorContractionKernel kernel(m, k_slice, n, mc, kc, nc);
818 
819  typedef typename TensorContractionKernel::BlockMemHandle BlockMemHandle;
820  const BlockMemHandle packed_mem = kernel.allocate(this->m_device, &blockA, &blockB);
821 
822  // If a contraction kernel does not support beta, explicitly initialize
823  // output buffer with zeroes.
824  if (!TensorContractionKernel::HasBeta) {
825  this->m_device.fill(buffer, buffer + m * n, Scalar(0));
826  }
827 
828  for (Index i2 = 0; i2 < m; i2 += mc) {
829  const Index actual_mc = numext::mini(i2 + mc, m) - i2;
830  for (Index k2 = k_start; k2 < k_end; k2 += kc) {
831  // make sure we don't overshoot right edge of left matrix, then pack vertical panel
832  const Index actual_kc = numext::mini(k2 + kc, k_end) - k2;
833  kernel.packLhs(&blockA, lhs.getSubMapper(i2, k2), actual_kc, actual_mc);
834 
835  // If kernel supports beta, there is no need to initialize output
836  // buffer with zeroes.
837  const Scalar alpha = Scalar(1);
838  const Scalar beta = (TensorContractionKernel::HasBeta && k2 == k_start) ? Scalar(0) : Scalar(1);
839 
840  // series of horizontal blocks
841  for (Index j2 = 0; j2 < n; j2 += nc) {
842  // make sure we don't overshoot right edge of right matrix, then pack block
843  const Index actual_nc = numext::mini(j2 + nc, n) - j2;
844  kernel.packRhs(&blockB, rhs.getSubMapper(k2, j2), actual_kc, actual_nc);
845 
846  // call gebp (matrix kernel)
847  // The parameters here are copied from Eigen's GEMM implementation
848  const OutputMapper output_mapper = output.getSubMapper(i2, j2);
849  kernel.invoke(output_mapper, blockA, blockB, actual_mc, actual_kc, actual_nc, alpha, beta);
850 
851  // We are done with this [i2, j2] output block.
852  if (use_output_kernel && k2 + kc >= k_end) {
853  m_output_kernel(output_mapper, m_tensor_contraction_params, i2, j2, actual_mc, actual_nc);
854  }
855  }
856  }
857  }
858 
859  kernel.deallocate(this->m_device, packed_mem);
860  }
861 
865 
866  if (m_result != NULL) {
867  m_device.deallocate(m_result);
868  m_result = NULL;
869  }
870  }
871 
873 
875  return TensorOpCost(sizeof(CoeffReturnType), 0, 0);
876  }
877 
878  template <int LoadMode>
880  return internal::ploadt<PacketReturnType, LoadMode>(m_result + index);
881  }
882 
884 
885  protected:
887 
891 
895 
900 
904 
906 
912 };
913 
914 // evaluator for default device
915 template <typename Indices, typename LeftArgType, typename RightArgType, typename OutputKernelType, typename Device>
918  TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgType, OutputKernelType>, Device>> {
921 
923  typedef std::remove_const_t<typename XprType::Scalar> Scalar;
924  typedef typename XprType::Index Index;
927 
929 
930  // Most of the code is assuming that both input tensors are ColMajor. If the
931  // inputs are RowMajor, we will "cheat" by swapping the LHS and RHS:
932  // If we want to compute A * B = C, where A is LHS and B is RHS, the code
933  // will pretend B is LHS and A is RHS.
934  typedef std::conditional_t<Layout == static_cast<int>(ColMajor), LeftArgType, RightArgType> EvalLeftArgType;
935  typedef std::conditional_t<Layout == static_cast<int>(ColMajor), RightArgType, LeftArgType> EvalRightArgType;
936 
937  static constexpr int LDims =
939  static constexpr int RDims =
941  static constexpr int ContractDims = internal::array_size<Indices>::value;
942 
944  typedef array<Index, LDims - ContractDims> left_nocontract_t;
945  typedef array<Index, RDims - ContractDims> right_nocontract_t;
946 
947  static constexpr int NumDims = LDims + RDims - 2 * ContractDims;
948 
949  // Could we use NumDimensions here?
951 
952  TensorEvaluator(const XprType& op, const Device& device) : Base(op, device) {}
953 
954  template <int Alignment>
955  void evalProduct(Scalar* buffer) const {
956  TENSOR_CONTRACTION_DISPATCH(this->template evalProductSequential, Alignment, (buffer));
957  }
958 };
959 
960 } // end namespace Eigen
961 
962 #endif // EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_H
int i
Definition: BiCGSTAB_step_by_step.cpp:9
const unsigned n
Definition: CG3DPackingUnitTest.cpp:11
#define EIGEN_MAX_ALIGN_BYTES
Definition: ConfigureVectorization.h:163
#define EIGEN_ALWAYS_INLINE
Definition: Macros.h:845
#define EIGEN_UNUSED_VARIABLE(var)
Definition: Macros.h:966
#define EIGEN_DEVICE_FUNC
Definition: Macros.h:892
#define EIGEN_DONT_INLINE
Definition: Macros.h:853
#define eigen_assert(x)
Definition: Macros.h:910
#define EIGEN_STRONG_INLINE
Definition: Macros.h:834
#define EIGEN_STATIC_ASSERT(X, MSG)
Definition: StaticAssert.h:26
#define TENSOR_CONTRACTION_DISPATCH(METHOD, ALIGNMENT, ARGS)
Definition: TensorContraction.h:607
#define EIGEN_DEVICE_REF
Definition: TensorMacros.h:34
int rows
Definition: Tutorial_commainit_02.cpp:1
int cols
Definition: Tutorial_commainit_02.cpp:1
Scalar Scalar int size
Definition: benchVecAdd.cpp:17
SCALAR Scalar
Definition: bench_gemm.cpp:45
The tensor base class.
Definition: TensorBase.h:1026
Definition: TensorContraction.h:307
EIGEN_DEVICE_FUNC const internal::remove_all_t< typename LhsXprType::Nested > & lhsExpression() const
Definition: TensorContraction.h:324
EIGEN_DEVICE_FUNC const OutputKernelType & outputKernel() const
Definition: TensorContraction.h:332
const OutputKernelType m_output_kernel
Definition: TensorContraction.h:338
Eigen::internal::traits< TensorContractionOp >::Index Index
Definition: TensorContraction.h:314
Eigen::internal::nested< TensorContractionOp >::type Nested
Definition: TensorContraction.h:312
Eigen::internal::traits< TensorContractionOp >::StorageKind StorageKind
Definition: TensorContraction.h:313
EIGEN_DEVICE_FUNC const internal::remove_all_t< typename RhsXprType::Nested > & rhsExpression() const
Definition: TensorContraction.h:328
internal::gebp_traits< typename LhsXprType::CoeffReturnType, typename RhsXprType::CoeffReturnType >::ResScalar CoeffReturnType
Definition: TensorContraction.h:311
Eigen::internal::traits< TensorContractionOp >::Scalar Scalar
Definition: TensorContraction.h:309
const Indices m_indices
Definition: TensorContraction.h:337
LhsXprType::Nested m_lhs_xpr
Definition: TensorContraction.h:335
RhsXprType::Nested m_rhs_xpr
Definition: TensorContraction.h:336
EIGEN_DEVICE_FUNC const Indices & indices() const
Definition: TensorContraction.h:321
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorContractionOp(const LhsXprType &lhs, const RhsXprType &rhs, const Indices &dims, const OutputKernelType &output_kernel=OutputKernelType())
Definition: TensorContraction.h:316
Definition: TensorCostModel.h:28
Definition: TensorBlock.h:566
Definition: TensorContractionBlocking.h:24
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE StorageIndex nc() const
Definition: TensorContractionBlocking.h:58
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE StorageIndex mc() const
Definition: TensorContractionBlocking.h:57
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE StorageIndex kc() const
Definition: TensorContractionBlocking.h:56
Definition: TensorContractionMapper.h:482
Definition: BlasUtil.h:304
Definition: products/GeneralBlockPanelKernel.h:397
LhsPacket LhsPacket4Packing
Definition: products/GeneralBlockPanelKernel.h:440
@ nr
Definition: products/GeneralBlockPanelKernel.h:418
@ LhsProgress
Definition: products/GeneralBlockPanelKernel.h:433
@ mr
Definition: products/GeneralBlockPanelKernel.h:430
@ Unaligned
Definition: Constants.h:235
@ Aligned
Definition: Constants.h:242
@ ColMajor
Definition: Constants.h:318
@ RowMajor
Definition: Constants.h:320
return int(ret)+1
Eigen::DenseIndex ret
Definition: level1_cplx_impl.h:43
RealScalar alpha
Definition: level1_cplx_impl.h:151
int * m
Definition: level2_cplx_impl.h:294
Scalar beta
Definition: level2_cplx_impl.h:36
char char char int int * k
Definition: level2_impl.h:374
char char * op
Definition: level2_impl.h:374
@ Lhs
Definition: TensorContractionMapper.h:20
@ Rhs
Definition: TensorContractionMapper.h:20
typename remove_all< T >::type remove_all_t
Definition: Meta.h:142
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE T maxi(const T &x, const T &y)
Definition: MathFunctions.h:926
EIGEN_STRONG_INLINE void swap(T &a, T &b)
Definition: Meta.h:536
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE T mini(const T &x, const T &y)
Definition: MathFunctions.h:920
Namespace containing all symbols from the Eigen library.
Definition: bench_norm.cpp:70
auto run(Kernel kernel, Args &&... args) -> decltype(kernel(args...))
Definition: gpu_test_helper.h:414
std::array< T, N > array
Definition: EmulateArray.h:231
squared absolute value
Definition: GlobalFunctions.h:87
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE const T1 & choose(Cond< true >, const T1 &first, const T2 &)
Definition: TensorMeta.h:22
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
The Index type as used for the API.
Definition: Meta.h:83
Extend namespace for flags.
Definition: fsi_chan_precond_driver.cc:56
dictionary params
Definition: Particles2023AnalysisHung.py:35
val
Definition: calibrate.py:119
type
Definition: compute_granudrum_aor.py:141
Definition: Eigen_Colamd.h:49
list x
Definition: plotDoE.py:28
void output(std::ostream &outfile, const unsigned &nplot)
Overload output function.
Definition: overloaded_element_body.h:490
Definition: TensorMeta.h:19
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE DenseIndex TotalSize() const
Definition: TensorDimensions.h:167
Definition: Constants.h:519
Definition: TensorContraction.h:275
EIGEN_ALWAYS_INLINE void operator()(const internal::blas_data_mapper< Scalar, Index, ColMajor > &output_mapper, const TensorContractionParams &params, Index i, Index j, Index num_rows, Index num_cols) const
Definition: TensorContraction.h:292
Definition: TensorMeta.h:47
Definition: TensorForwardDeclarations.h:42
Definition: TensorContraction.h:342
XprType::CoeffReturnType CoeffReturnType
Definition: TensorContraction.h:352
static constexpr int Layout
Definition: TensorContraction.h:357
TensorEvaluator< EvalRightArgType, Device > RightEvaluatorType
Definition: TensorContraction.h:381
EIGEN_STRONG_INLINE void cleanup()
Definition: TensorContraction.h:862
Index m_i_size
Definition: TensorContraction.h:901
DSizes< Index, NumDims > Dimensions
Definition: TensorContraction.h:394
static constexpr int NumDims
Definition: TensorContraction.h:388
EIGEN_STRONG_INLINE TensorContractionEvaluatorBase(const XprType &op, const Device &device)
Definition: TensorContraction.h:396
static constexpr int LDims
Definition: TensorContraction.h:383
StorageMemory< Scalar, Device > Storage
Definition: TensorContraction.h:354
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const
Definition: TensorContraction.h:879
right_nocontract_t m_j_strides
Definition: TensorContraction.h:897
internal::traits< Derived >::Device Device
Definition: TensorContraction.h:347
right_nocontract_t m_right_nocontract_strides
Definition: TensorContraction.h:899
internal::traits< Derived >::LeftArgType LeftArgType
Definition: TensorContraction.h:344
contract_t m_right_contracting_strides
Definition: TensorContraction.h:890
const Device EIGEN_DEVICE_REF m_device
Definition: TensorContraction.h:909
static constexpr int RDims
Definition: TensorContraction.h:385
array< Index, RDims - ContractDims > right_nocontract_t
Definition: TensorContraction.h:392
TensorEvaluator< EvalLeftArgType, Device > LeftEvaluatorType
Definition: TensorContraction.h:380
left_nocontract_t m_left_nocontract_strides
Definition: TensorContraction.h:898
std::conditional_t< static_cast< int >Layout)==static_cast< int >ColMajor), LeftArgType, RightArgType > EvalLeftArgType
Definition: TensorContraction.h:376
EvaluatorPointerType m_result
Definition: TensorContraction.h:911
EIGEN_DEVICE_FUNC void evalGemv(Scalar *buffer) const
Definition: TensorContraction.h:699
XprType::Index Index
Definition: TensorContraction.h:351
Storage::Type EvaluatorPointerType
Definition: TensorContraction.h:355
bool m_rhs_inner_dim_reordered
Definition: TensorContraction.h:894
internal::traits< Derived >::RightArgType RightArgType
Definition: TensorContraction.h:345
bool m_rhs_inner_dim_contiguous
Definition: TensorContraction.h:893
EIGEN_DEVICE_FUNC void evalGemmPartial(Scalar *buffer, Index k_start, Index k_end, int num_threads) const
Definition: TensorContraction.h:759
contract_t m_left_contracting_strides
Definition: TensorContraction.h:889
EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType data)
Definition: TensorContraction.h:577
static constexpr int ContractDims
Definition: TensorContraction.h:387
TensorContractionOp< Indices, LeftArgType, RightArgType, OutputKernelType > XprType
Definition: TensorContraction.h:349
std::remove_const_t< typename XprType::Scalar > Scalar
Definition: TensorContraction.h:350
EIGEN_DEVICE_FUNC void evalTo(Scalar *buffer) const
Definition: TensorContraction.h:672
Index m_j_size
Definition: TensorContraction.h:902
TensorEvaluator< EvalRightArgType, Device > m_rightImpl
Definition: TensorContraction.h:908
std::conditional_t< static_cast< int >Layout)==static_cast< int >ColMajor), RightArgType, LeftArgType > EvalRightArgType
Definition: TensorContraction.h:378
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions & dimensions() const
Definition: TensorContraction.h:575
Index m_k_size
Definition: TensorContraction.h:903
internal::TensorBlockNotImplemented TensorBlock
Definition: TensorContraction.h:368
void evalProductSequential(Scalar *buffer) const
Definition: TensorContraction.h:684
OutputKernelType m_output_kernel
Definition: TensorContraction.h:910
TensorContractionParams m_tensor_contraction_params
Definition: TensorContraction.h:905
bool m_lhs_inner_dim_contiguous
Definition: TensorContraction.h:892
Dimensions m_dimensions
Definition: TensorContraction.h:886
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool) const
Definition: TensorContraction.h:874
array< Index, ContractDims > contract_t
Definition: TensorContraction.h:390
internal::traits< Derived >::OutputKernelType OutputKernelType
Definition: TensorContraction.h:346
TensorEvaluator< EvalLeftArgType, Device > m_leftImpl
Definition: TensorContraction.h:907
@ PreferBlockAccess
Definition: TensorContraction.h:362
@ PacketAccess
Definition: TensorContraction.h:360
@ RawAccess
Definition: TensorContraction.h:364
@ CoordAccess
Definition: TensorContraction.h:363
@ IsAligned
Definition: TensorContraction.h:359
@ BlockAccess
Definition: TensorContraction.h:361
PacketType< CoeffReturnType, Device >::type PacketReturnType
Definition: TensorContraction.h:353
array< Index, LDims - ContractDims > left_nocontract_t
Definition: TensorContraction.h:391
EIGEN_DEVICE_FUNC void evalGemm(Scalar *buffer) const
Definition: TensorContraction.h:743
contract_t m_k_strides
Definition: TensorContraction.h:888
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EvaluatorPointerType data() const
Definition: TensorContraction.h:883
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
Definition: TensorContraction.h:872
EIGEN_DEVICE_FUNC void evalGemmPartialWithoutOutputKernel(Scalar *buffer, Index k_start, Index k_end, int num_threads) const
Definition: TensorContraction.h:751
internal::traits< Derived >::Indices Indices
Definition: TensorContraction.h:343
left_nocontract_t m_i_strides
Definition: TensorContraction.h:896
Definition: TensorContraction.h:262
bool swapped_arguments
Definition: TensorContraction.h:265
PacketType< CoeffReturnType, Device >::type PacketReturnType
Definition: TensorContraction.h:926
TensorContractionOp< Indices, LeftArgType, RightArgType, OutputKernelType > XprType
Definition: TensorContraction.h:922
std::conditional_t< Layout==static_cast< int >ColMajor), RightArgType, LeftArgType > EvalRightArgType
Definition: TensorContraction.h:935
TensorEvaluator< const TensorContractionOp< Indices, LeftArgType, RightArgType, OutputKernelType >, Device > Self
Definition: TensorContraction.h:919
TensorEvaluator(const XprType &op, const Device &device)
Definition: TensorContraction.h:952
std::remove_const_t< typename XprType::Scalar > Scalar
Definition: TensorContraction.h:923
std::conditional_t< Layout==static_cast< int >ColMajor), LeftArgType, RightArgType > EvalLeftArgType
Definition: TensorContraction.h:934
array< Index, RDims - ContractDims > right_nocontract_t
Definition: TensorContraction.h:945
A cost model used to limit the number of threads used for evaluating tensor expression.
Definition: TensorEvaluator.h:31
static constexpr int Layout
Definition: TensorEvaluator.h:46
EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType dest)
Definition: TensorEvaluator.h:71
EIGEN_STRONG_INLINE void cleanup()
Definition: TensorEvaluator.h:87
Derived::Index Index
Definition: TensorEvaluator.h:32
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions & dimensions() const
Definition: TensorEvaluator.h:69
Index lhs_size
Definition: TensorContraction.h:133
Index rhs_size
Definition: TensorContraction.h:134
Definition: TensorContraction.h:81
static EIGEN_DEVICE_FUNC BlockMemHandle allocate(Device &d, const Index bm, const Index bk, const Index bn, LhsScalar **lhs_block, RhsScalar **rhs_block)
Definition: TensorContraction.h:85
static EIGEN_DEVICE_FUNC void deallocate(Device &d, BlockMemHandle handle)
Definition: TensorContraction.h:127
void * BlockMemHandle
Definition: TensorContraction.h:82
static EIGEN_DEVICE_FUNC BlockSizes ComputeLhsRhsBlockSizes(const Index bm, const Index bk, const Index bn)
Definition: TensorContraction.h:136
static EIGEN_DEVICE_FUNC BlockMemHandle allocateSlices(Device &d, const Index bm, const Index bk, const Index bn, const Index num_lhs, const Index num_rhs, const Index num_slices, std::vector< LhsScalar * > *lhs_blocks, std::vector< RhsScalar * > *rhs_blocks)
Definition: TensorContraction.h:97
Definition: TensorContraction.h:175
const StorageIndex m
Definition: TensorContraction.h:250
LhsScalar * LhsBlock
Definition: TensorContraction.h:185
internal::gemm_pack_lhs< LhsScalar, StorageIndex, typename LhsMapper::SubMapper, Traits::mr, Traits::LhsProgress, typename Traits::LhsPacket4Packing, ColMajor > LhsPacker
Definition: TensorContraction.h:196
RhsScalar * RhsBlock
Definition: TensorContraction.h:186
EIGEN_DEVICE_FUNC BlockMemHandle allocateSlices(Device &d, const StorageIndex num_lhs, const StorageIndex num_rhs, const StorageIndex num_slices, std::vector< LhsBlock > *lhs_blocks, std::vector< RhsBlock > *rhs_blocks)
Definition: TensorContraction.h:211
const StorageIndex n
Definition: TensorContraction.h:252
EIGEN_DEVICE_FUNC TensorContractionKernel(StorageIndex m_, StorageIndex k_, StorageIndex n_, StorageIndex bm_, StorageIndex bk_, StorageIndex bn_)
Definition: TensorContraction.h:180
EIGEN_DEVICE_FUNC EIGEN_DONT_INLINE void packLhs(LhsBlock *lhsBlock, const typename LhsMapper::SubMapper &data_mapper, const StorageIndex depth, const StorageIndex rows)
Definition: TensorContraction.h:222
internal::gebp_kernel< LhsScalar, RhsScalar, StorageIndex, OutputMapper, Traits::mr, Traits::nr, false, false > GebpKernel
Definition: TensorContraction.h:203
const StorageIndex bm
Definition: TensorContraction.h:253
const StorageIndex bk
Definition: TensorContraction.h:254
const StorageIndex k
Definition: TensorContraction.h:251
internal::gemm_pack_rhs< RhsScalar, StorageIndex, typename RhsMapper::SubMapper, Traits::nr, ColMajor > RhsPacker
Definition: TensorContraction.h:199
EIGEN_DEVICE_FUNC EIGEN_DONT_INLINE void invoke(const OutputMapper &output_mapper, const LhsBlock &lhsBlock, const RhsBlock &rhsBlock, const StorageIndex rows, const StorageIndex depth, const StorageIndex cols, const ResScalar alpha, const ResScalar beta)
Definition: TensorContraction.h:233
static EIGEN_DEVICE_FUNC void deallocate(Device &d, BlockMemHandle handle)
Definition: TensorContraction.h:218
@ HasBeta
Definition: TensorContraction.h:178
BlockMemAllocator::BlockMemHandle BlockMemHandle
Definition: TensorContraction.h:190
internal::gebp_traits< LhsScalar, RhsScalar > Traits
Definition: TensorContraction.h:192
EIGEN_DEVICE_FUNC BlockMemHandle allocate(Device &d, LhsBlock *lhs_block, RhsBlock *rhs_block)
Definition: TensorContraction.h:206
TensorContractionBlockMemAllocator< LhsScalar, RhsScalar > BlockMemAllocator
Definition: TensorContraction.h:189
const StorageIndex bn
Definition: TensorContraction.h:255
EIGEN_DEVICE_FUNC EIGEN_DONT_INLINE void packRhs(RhsBlock *rhsBlock, const typename RhsMapper::SubMapper &data_mapper, const StorageIndex depth, const StorageIndex cols)
Definition: TensorContraction.h:228
Definition: Meta.h:305
const TensorContractionOp< Dimensions, LhsXprType, RhsXprType, OutputKernelType > & type
Definition: TensorContraction.h:55
Definition: XprHelper.h:427
Definition: products/GeneralBlockPanelKernel.h:960
Definition: BlasUtil.h:34
Definition: BlasUtil.h:30
Definition: TensorTraits.h:152
ref_selector< T >::type type
Definition: TensorTraits.h:153
Definition: XprHelper.h:145
Definition: XprHelper.h:591
std::remove_reference_t< RhsNested > RhsNested_
Definition: TensorContraction.h:40
gebp_traits< std::remove_const_t< typename LhsXprType::Scalar >, std::remove_const_t< typename RhsXprType::Scalar > >::ResScalar Scalar
Definition: TensorContraction.h:31
promote_storage_type< typename traits< LhsXprType >::StorageKind, typename traits< RhsXprType >::StorageKind >::ret StorageKind
Definition: TensorContraction.h:34
std::conditional_t< Pointer_type_promotion< typename LhsXprType::Scalar, Scalar >::val, typename traits< LhsXprType >::PointerType, typename traits< RhsXprType >::PointerType > PointerType
Definition: TensorContraction.h:48
promote_index_type< typename traits< LhsXprType >::Index, typename traits< RhsXprType >::Index >::type Index
Definition: TensorContraction.h:36
std::remove_reference_t< LhsNested > LhsNested_
Definition: TensorContraction.h:39
Definition: ForwardDeclarations.h:21
Definition: GenericPacketMath.h:134
std::ptrdiff_t j
Definition: tut_arithmetic_redux_minmax.cpp:2