TensorContractionThreadPool.h
Go to the documentation of this file.
1 // This file is part of Eigen, a lightweight C++ template library
2 // for linear algebra.
3 //
4 // Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
5 //
6 // This Source Code Form is subject to the terms of the Mozilla
7 // Public License v. 2.0. If a copy of the MPL was not distributed
8 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9 
10 #ifndef EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_THREAD_POOL_H
11 #define EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_THREAD_POOL_H
12 
13 // evaluator for thread pool device
14 #ifdef EIGEN_USE_THREADS
15 
16 // IWYU pragma: private
17 #include "./InternalHeaderCheck.h"
18 
19 namespace Eigen {
20 
21 template <typename Indices, typename LeftArgType, typename RightArgType, typename OutputKernelType>
22 struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgType, OutputKernelType>,
23  ThreadPoolDevice>
24  : public TensorContractionEvaluatorBase<TensorEvaluator<
25  const TensorContractionOp<Indices, LeftArgType, RightArgType, OutputKernelType>, ThreadPoolDevice>> {
26  typedef ThreadPoolDevice Device;
27 
28  typedef TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgType, OutputKernelType>, Device> Self;
29  typedef TensorContractionEvaluatorBase<Self> Base;
30 
31  typedef TensorContractionOp<Indices, LeftArgType, RightArgType, OutputKernelType> XprType;
32  typedef std::remove_const_t<typename XprType::Scalar> Scalar;
33  typedef typename XprType::Index Index;
34  typedef typename XprType::CoeffReturnType CoeffReturnType;
36 
38 
39  // Most of the code is assuming that both input tensors are ColMajor. If the
40  // inputs are RowMajor, we will "cheat" by swapping the LHS and RHS:
41  // If we want to compute A * B = C, where A is LHS and B is RHS, the code
42  // will pretend B is LHS and A is RHS.
43  typedef std::conditional_t<static_cast<int>(Layout) == static_cast<int>(ColMajor), LeftArgType, RightArgType>
44  EvalLeftArgType;
45  typedef std::conditional_t<static_cast<int>(Layout) == static_cast<int>(ColMajor), RightArgType, LeftArgType>
46  EvalRightArgType;
47 
48  static constexpr int LDims =
49  internal::array_size<typename TensorEvaluator<EvalLeftArgType, Device>::Dimensions>::value;
50  static constexpr int RDims =
51  internal::array_size<typename TensorEvaluator<EvalRightArgType, Device>::Dimensions>::value;
52  static constexpr int ContractDims = internal::array_size<Indices>::value;
53 
54  typedef array<Index, LDims> left_dim_mapper_t;
55  typedef array<Index, RDims> right_dim_mapper_t;
56 
57  typedef array<Index, ContractDims> contract_t;
58  typedef array<Index, LDims - ContractDims> left_nocontract_t;
59  typedef array<Index, RDims - ContractDims> right_nocontract_t;
60 
61  static constexpr int NumDims = LDims + RDims - 2 * ContractDims;
62 
63  typedef DSizes<Index, NumDims> Dimensions;
64 
65  // typedefs needed in evalTo
66  typedef std::remove_const_t<typename EvalLeftArgType::Scalar> LhsScalar;
67  typedef std::remove_const_t<typename EvalRightArgType::Scalar> RhsScalar;
68  typedef typename internal::gebp_traits<LhsScalar, RhsScalar> Traits;
69 
70  typedef TensorEvaluator<EvalLeftArgType, Device> LeftEvaluator;
71  typedef TensorEvaluator<EvalRightArgType, Device> RightEvaluator;
72 
73  TensorEvaluator(const XprType& op, const Device& device) : Base(op, device) {}
74 
75  template <int Alignment>
76  void evalProduct(Scalar* buffer) const {
77  evalProductImpl<NoCallback, Alignment>(buffer, NoCallback());
78  }
79 
80  template <typename EvalToCallback, int Alignment>
81  void evalProductAsync(Scalar* buffer, EvalToCallback done) const {
82  evalProductImpl<EvalToCallback, Alignment>(buffer, std::move(done));
83  }
84 
85  template <typename DoneCallback, int Alignment>
86  void evalProductImpl(Scalar* buffer, DoneCallback done) const {
87  // This function computes a lot of heuristics in multiple steps, and it
88  // also has multiple exit points. To keep it sane, readable and all in one
89  // place, sync/async execution decision is made at runtime at the very end.
90  //
91  // (1) In sync mode we allocate Context on the stack, submit computations
92  // to the device thread pool, and block on a barrier until it is
93  // completed.
94  //
95  // (2) In async mode we allocate Context on the heap, and after all tasks
96  // are finished, we call provided the done callback, and delete a
97  // context from the heap.
98  //
99  // (*) EvalParallelContext & EvalShardedByInnerDimContext owns all the state
100  // and temporary buffers, required for executing the tensor contraction.
101  // They are responsible for cleaning it up after contraction is done.
102  static const bool IsEvalInSyncMode = std::is_same<DoneCallback, NoCallback>::value;
103 
104  const Index m = this->m_i_size;
105  const Index n = this->m_j_size;
106  const Index k = this->m_k_size;
107  if (m == 0 || n == 0 || k == 0) return;
108 
109  // Compute a set of algorithm parameters:
110  // - kernel block sizes (bm, bn, bk)
111  // - task grain sizes (number of kernels executed per task: gm, gn)
112  // - number of threads
113  // - sharding by row/column
114  // - parallel packing or first lhs then rhs
115  // and some derived parameters:
116  // - number of tasks (nm, nn, nk)
117  // - number of kernels (nm0, nn0)
118  // Unfortunately, all these parameters are tightly interdependent.
119  // So in some cases we first compute approximate values, then compute other
120  // values based on these approximations and then refine the approximations.
121 
122  // There are lots of heuristics here. There is some reasoning behind them,
123  // but ultimately they are just tuned on contraction benchmarks for
124  // different input configurations, thread counts and instruction sets.
125  // So feel free to question any of them.
126 
127  // Compute whether we want to shard by row or by column.
128  // This is a first approximation, it will be refined later. Since we don't
129  // know number of threads yet we use 2, because what's we are most
130  // interested in at this point is whether it makes sense to use
131  // parallelization at all or not.
132  bool shard_by_col = shardByCol(m, n, 2);
133 
134  // First approximation of kernel blocking sizes.
135  // Again, we don't know number of threads yet, so we use 2.
136  Index bm, bn, bk;
137  if (shard_by_col) {
138  internal::TensorContractionBlocking<Scalar, LhsScalar, RhsScalar, Index, internal::ShardByCol> blocking(k, m, n,
139  2);
140  bm = blocking.mc();
141  bn = blocking.nc();
142  bk = blocking.kc();
143  } else {
144  internal::TensorContractionBlocking<Scalar, LhsScalar, RhsScalar, Index, internal::ShardByRow> blocking(k, m, n,
145  2);
146  bm = blocking.mc();
147  bn = blocking.nc();
148  bk = blocking.kc();
149  }
150 
151  // Compute optimal number of threads.
152  // Note: we use bk instead of k here because we are interested in amount of
153  // _parallelizable_ computations, and computations are not parallelizable
154  // across k dimension.
155  const TensorOpCost cost = contractionCost(m, n, bm, bn, bk, shard_by_col, false);
156  int num_threads =
157  TensorCostModel<ThreadPoolDevice>::numThreads(static_cast<double>(n) * m, cost, this->m_device.numThreads());
158  int num_threads_by_k = numThreadsInnerDim(m, n, k);
159  if (shardByInnerDim(m, n, k, num_threads, num_threads_by_k)) {
160  // We are in the scenario where it is more effective to shard by the
161  // inner dimension.
162  if (IsEvalInSyncMode) {
163  EvalShardedByInnerDimContext<DoneCallback> ctx(this, num_threads_by_k, buffer, m, n, k, std::move(done));
164  ctx.template run<Alignment>();
165  } else {
166  auto* ctx =
167  new EvalShardedByInnerDimContext<DoneCallback>(this, num_threads_by_k, buffer, m, n, k, std::move(done));
168  ctx->template runAsync<Alignment>();
169  }
170 
171  return;
172  }
173 
174  // TODO(dvyukov): this is a stop-gap to prevent regressions while the cost
175  // model is not tuned. Remove this when the cost model is tuned.
176  if (n == 1) num_threads = 1;
177 
178  if (num_threads == 1) {
179  TENSOR_CONTRACTION_DISPATCH(this->template evalProductSequential, Unaligned, (buffer));
180  if (!IsEvalInSyncMode) done();
181  return;
182  }
183 
184  // Now that we know number of threads, recalculate sharding and blocking.
185  shard_by_col = shardByCol(m, n, num_threads);
186  if (shard_by_col) {
187  internal::TensorContractionBlocking<Scalar, LhsScalar, RhsScalar, Index, internal::ShardByCol> blocking(
188  k, m, n, num_threads);
189  bm = blocking.mc();
190  bn = blocking.nc();
191  bk = blocking.kc();
192  } else {
193  internal::TensorContractionBlocking<Scalar, LhsScalar, RhsScalar, Index, internal::ShardByRow> blocking(
194  k, m, n, num_threads);
195  bm = blocking.mc();
196  bn = blocking.nc();
197  bk = blocking.kc();
198  }
199 
200  // Number of kernels for each dimension.
201  Index nm0 = numext::div_ceil(m, bm);
202  Index nn0 = numext::div_ceil(n, bn);
203  Index nk = numext::div_ceil(k, bk);
204 
205  // Calculate task grain size (number of kernels executed per task).
206  // This task size coarsening serves two purposes:
207  // 1. It reduces per-task overheads including synchronization overheads.
208  // 2. It allows to use caches better (reuse the same packed rhs in several
209  // consecutive kernels).
210  Index gm = 1;
211  Index gn = 1;
212  // If we are sharding by column, then we prefer to reduce rows first.
213  if (shard_by_col) {
214  gm = coarsenM(m, n, bm, bn, bk, gn, num_threads, shard_by_col);
215  gn = coarsenN(m, n, bm, bn, bk, gm, num_threads, shard_by_col);
216  } else {
217  gn = coarsenN(m, n, bm, bn, bk, gm, num_threads, shard_by_col);
218  gm = coarsenM(m, n, bm, bn, bk, gn, num_threads, shard_by_col);
219  }
220  // Number of tasks in each dimension.
221  Index nm = numext::div_ceil(nm0, gm);
222  Index nn = numext::div_ceil(nn0, gn);
223 
224  // If there is enough concurrency in the sharding dimension, we choose not
225  // to paralellize by the other dimension, and execute all kernels in sync
226  // mode. This reduces parallelism from the nm x nn down to nn
227  // (shard_by_col==true) or nm (shard_by_col==false).
228  const Index sharding_dim_tasks = shard_by_col ? nn : nm;
229  const int num_worker_threads = this->m_device.numThreadsInPool();
230 
231  // With small number of threads we want to make sure that we do not reduce
232  // parallelism too much. With large number of threads we trade maximum
233  // parallelism for better memory locality.
234  const float oversharding_factor = num_worker_threads <= 4 ? 8.0
235  : num_worker_threads <= 8 ? 4.0
236  : num_worker_threads <= 16 ? 2.0
237  : num_worker_threads <= 32 ? 1.0
238  : num_worker_threads <= 64 ? 0.8
239  : /* num_worker_threads > 64 */ 0.6;
240 
241  const bool parallelize_by_sharding_dim_only = sharding_dim_tasks >= oversharding_factor * num_worker_threads;
242 
243  // Last by not least, decide whether we want to issue both lhs and rhs
244  // packing in parallel; or issue lhs packing first, and then issue rhs
245  // packing when lhs packing completes (for !shard_by_col lhs and rhs are
246  // swapped). Parallel packing allows more parallelism (for both packing and
247  // kernels), while sequential packing provides better locality (once
248  // a thread finishes rhs packing it proceed to kernels with that rhs).
249  // First, we are interested in parallel packing if there are few tasks.
250  bool parallel_pack = num_threads >= nm * nn;
251  // Also do parallel packing if all data fits into L2$.
252  if (m * bk * Index(sizeof(LhsScalar)) + n * bk * Index(sizeof(RhsScalar)) <= l2CacheSize() * num_threads)
253  parallel_pack = true;
254  // But don't do it if we will use each rhs only once. Locality seems to be
255  // more important in this case.
256  if ((shard_by_col ? nm : nn) == 1) parallel_pack = false;
257  // Also don't get in the way of parallelize_by_sharding_dim_only
258  // optimization.
259  if (parallelize_by_sharding_dim_only) parallel_pack = false;
260 
261  // TODO(ezhulnev): With if contexpr we don't need SyncEvalParallelContext.
262  if (IsEvalInSyncMode) {
263 #define CONTEXT_ARGS \
264  (this, num_threads, buffer, m, n, k, bm, bn, bk, nm, nn, nk, gm, gn, nm0, nn0, shard_by_col, parallel_pack, \
265  parallelize_by_sharding_dim_only, NoCallback()) \
266  .run()
267  TENSOR_CONTRACTION_DISPATCH(SyncEvalParallelContext, Alignment, CONTEXT_ARGS);
268 #undef CONTEXT_ARGS
269 
270  } else {
271 #define CONTEXT_ARGS \
272  (this, num_threads, buffer, m, n, k, bm, bn, bk, nm, nn, nk, gm, gn, nm0, nn0, shard_by_col, parallel_pack, \
273  parallelize_by_sharding_dim_only, std::move(done))
274  TENSOR_CONTRACTION_ASYNC_DISPATCH(EvalParallelContext, DoneCallback, Alignment, CONTEXT_ARGS, run());
275 #undef CONTEXT_ARGS
276  }
277  }
278 
279  // ------------------------------------------------------------------------ //
280 
281  // Dummy struct to represent an empty DoneCallback.
282 
283  struct NoCallback {
284  void operator()() { eigen_assert(false && "NoCallback should never be called"); }
285  };
286 
287  // ------------------------------------------------------------------------ //
288 
289  template <typename DoneCallback, typename Context>
290  class EvalParallelNotification;
291 
292  // Synchronous evaluation notification that blocks caller thread in Wait().
293  template <typename Context>
294  class EvalParallelNotification<NoCallback, Context> {
295  public:
296  EvalParallelNotification(Context*, NoCallback) {}
297  void Notify() { done_.Notify(); }
298  void Wait() { done_.Wait(); }
299 
300  private:
301  Eigen::Notification done_;
302  };
303 
304  // Asynchronous evaluation notification that does not block in Wait().
305  template <typename DoneCallback, typename Context>
306  class EvalParallelNotification {
307  public:
308  EvalParallelNotification(Context* ctx, DoneCallback done) : ctx_(ctx), done_(std::move(done)) {}
309 
310  void Notify() {
311  // Make a copy of done callback, because it will be destructed when we
312  // will delete context in the next line (EvalParallelNotification is a
313  // data member of EvalParallelContext class).
314  DoneCallback done_copy = std::move(done_);
315 
316  // Delete parallel evaluation context.
317  delete ctx_;
318 
319  // Now safely call the done callback.
320  done_copy();
321  }
322 
323  void Wait() {}
324 
325  private:
326  Context* ctx_;
327  DoneCallback done_;
328  };
329 
330  // Context orchestrates sync/async parallel contraction evaluation. When it is
331  // executed in asynchronous mode, it owns all the shared state that might be
332  // accessible by block packing and kernel tasks.
333 
334  template <typename DoneCallback, bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous,
335  bool rhs_inner_dim_reordered, int Alignment>
336  class EvalParallelContext {
337  public:
338  typedef internal::TensorContractionInputMapper<LhsScalar, Index, internal::Lhs, LeftEvaluator, left_nocontract_t,
340  lhs_inner_dim_contiguous, false, Unaligned>
341  LhsMapper;
342  typedef internal::TensorContractionInputMapper<RhsScalar, Index, internal::Rhs, RightEvaluator, right_nocontract_t,
344  rhs_inner_dim_contiguous, rhs_inner_dim_reordered, Unaligned>
345  RhsMapper;
346 
347  typedef internal::blas_data_mapper<Scalar, Index, ColMajor> OutputMapper;
348 
349  typedef internal::TensorContractionKernel<Scalar, LhsScalar, RhsScalar, Index, OutputMapper, LhsMapper, RhsMapper>
350  TensorContractionKernel;
351 
352  typedef typename TensorContractionKernel::LhsBlock LhsBlock;
353  typedef typename TensorContractionKernel::RhsBlock RhsBlock;
354  typedef typename TensorContractionKernel::BlockMemHandle BlockMemHandle;
355 
356  EvalParallelContext(const Self* self, int num_threads, Scalar* buffer, Index tm, Index tn, Index tk, Index bm,
357  Index bn, Index bk, Index nm, Index nn, Index nk, Index gm, Index gn, Index nm0, Index nn0,
358  bool shard_by_col, bool parallel_pack, bool parallelize_by_sharding_dim_only, DoneCallback done)
359  : created_by_thread_id_(std::this_thread::get_id()),
360  done_(this, std::move(done)),
361  device_(self->m_device),
362  lhs_(self->m_leftImpl, self->m_left_nocontract_strides, self->m_i_strides, self->m_left_contracting_strides,
363  self->m_k_strides),
364  rhs_(self->m_rightImpl, self->m_right_nocontract_strides, self->m_j_strides,
365  self->m_right_contracting_strides, self->m_k_strides),
366  buffer_(buffer),
367  output_(buffer, tm),
368  output_kernel_(self->m_output_kernel),
369  tensor_contraction_params_(self->m_tensor_contraction_params),
370  num_threads_(num_threads),
371  shard_by_col_(shard_by_col),
372  parallel_pack_(parallel_pack),
373  parallelize_by_sharding_dim_only_(parallelize_by_sharding_dim_only),
374  m_(tm),
375  n_(tn),
376  k_(tk),
377  bm_(bm),
378  bn_(bn),
379  bk_(bk),
380  nm_(nm),
381  nn_(nn),
382  nk_(nk),
383  gm_(gm),
384  gn_(gn),
385  nm0_(nm0),
386  nn0_(nn0),
387  kernel_(m_, k_, n_, bm_, bk_, bn_),
388  num_thread_local_allocations_(0),
389  // We reserve 2X more capacity for a thread local values, than the
390  // number of threads in the pool to efficiently handle task stealing
391  // by threads that are not managed by the pool.
392  thread_local_capacity(2 * (parallelize_by_sharding_dim_only_ ? device_.numThreadsInPool() : 0)),
393  // We will use only one of the Lhs/Rhs thread local storage depending
394  // on the shard_by_col value and we parallelize by sharding dim ONLY.
395  lhs_thread_local_blocks_(shard_by_col_ ? 0 : thread_local_capacity, {*this}, {*this}),
396  rhs_thread_local_blocks_(shard_by_col_ ? thread_local_capacity : 0, {*this}, {*this}) {
397  // These two options are mutually exclusive.
398  eigen_assert(!(parallel_pack && parallelize_by_sharding_dim_only));
399 
400  for (Index x = 0; x < P; x++) {
401  // Normal number of notifications for k slice switch is
402  // nm_ + nn_ + nm_ * nn_. However, first P - 1 slices will receive only
403  // nm_ + nn_ notifications, because they will not receive notifications
404  // from preceding kernels.
405  state_switch_[x] =
406  x == 0 ? 1 : (parallel_pack_ ? nn_ + nm_ : (shard_by_col_ ? nn_ : nm_)) + (x == P - 1 ? nm_ * nn_ : 0);
407  state_packing_ready_[x] = parallel_pack_ ? 0 : (shard_by_col_ ? nm_ : nn_);
408  state_kernel_[x] = new std::atomic<uint8_t>*[nm_];
409  for (Index m = 0; m < nm_; m++) {
410  state_kernel_[x][m] = new std::atomic<uint8_t>[nn_];
411  // Kernels generally receive 3 notifications (previous kernel + 2
412  // packing), but the first slice won't get notifications from previous
413  // kernels.
414  for (Index n = 0; n < nn_; n++)
415  state_kernel_[x][m][n].store((x == 0 ? 0 : 1) + (parallel_pack_ ? 2 : 1), std::memory_order_relaxed);
416  }
417  }
418 
419  // Allocate memory for packed rhs/lhs matrices.
420  packed_mem_ = kernel_.allocateSlices( //
421  device_, //
422  /*num_lhs=*/nm0_, //
423  /*num_rhs=*/nn0_, //
424  /*num_slices=*/std::min<Index>(nk_, P - 1), //
425  packed_lhs_, packed_rhs_);
426 
427  if (parallelize_by_sharding_dim_only_) {
428  const int num_worker_threads = device_.numThreadsInPool();
429 
430  if (shard_by_col) {
431  can_use_thread_local_packed_ = new std::atomic<bool>[nn_];
432  for (int i = 0; i < nn_; ++i) can_use_thread_local_packed_[i].store(true, std::memory_order_relaxed);
433 
434  Index num_blocks = num_worker_threads * gn_;
435  thread_local_pre_alocated_mem_ = kernel_.allocateSlices( //
436  device_, //
437  /*num_lhs=*/0, //
438  /*num_rhs=*/num_blocks, //
439  /*num_slices=*/1, //
440  /*lhs_blocks=*/nullptr, &rhs_thread_local_pre_allocated_);
441 
442  } else {
443  can_use_thread_local_packed_ = new std::atomic<bool>[nm_];
444  for (int i = 0; i < nm_; ++i) can_use_thread_local_packed_[i].store(true, std::memory_order_relaxed);
445 
446  Index num_blocks = num_worker_threads * gm_;
447  thread_local_pre_alocated_mem_ = kernel_.allocateSlices( //
448  device_, //
449  /*num_lhs=*/num_blocks, //
450  /*num_rhs=*/0, //
451  /*num_slices=*/1, &lhs_thread_local_pre_allocated_, //
452  /*rhs_blocks=*/nullptr);
453  }
454  }
455  }
456 
457  ~EvalParallelContext() {
458  for (Index x = 0; x < P; x++) {
459  for (Index m = 0; m < nm_; m++) delete[] state_kernel_[x][m];
460  delete[] state_kernel_[x];
461  }
462  kernel_.deallocate(device_, packed_mem_);
463  if (parallelize_by_sharding_dim_only_) {
464  kernel_.deallocate(device_, thread_local_pre_alocated_mem_);
465  delete[] can_use_thread_local_packed_;
466  }
467  }
468 
469  void run() {
470  // Kick off packing of the first slice.
471  signal_switch(0, 1);
472 
473  // Wait for overall completion.
474  //
475  // If parallel evaluation is executed in async mode, this is a no-op, and
476  // Wait() will return immediately. In synchronous mode it will block the
477  // caller thread until it will receive notification from last task.
478  //
479  // In async mode, last task when completed will call done callback from
480  // the same thread, and will delete this context.
481  //
482  // TODO(dvyukov): This wait can lead to deadlock if contraction is
483  // evaluated in synchronous mode. If nthreads contractions are
484  // concurrently submitted from worker threads, this wait will block all
485  // worker threads and the system will deadlock.
486  done_.Wait();
487  }
488 
489  private:
490  std::thread::id created_by_thread_id_;
491 
492  // This notification is specialized on the type of DoneCallback and can be
493  // blocking or non-blocking.
494  EvalParallelNotification<DoneCallback, EvalParallelContext> done_;
495 
496  const Device& device_;
497  LhsMapper lhs_;
498  RhsMapper rhs_;
499  Scalar* const buffer_;
500  OutputMapper output_;
501  OutputKernelType output_kernel_;
502  TensorContractionParams tensor_contraction_params_;
503  const int num_threads_;
504  const bool shard_by_col_;
505  const bool parallel_pack_;
506  const bool parallelize_by_sharding_dim_only_;
507  // Matrix sizes.
508  const Index m_;
509  const Index n_;
510  const Index k_;
511  // Block sizes.
512  const Index bm_;
513  const Index bn_;
514  const Index bk_;
515  // Number of tasks.
516  const Index nm_;
517  const Index nn_;
518  const Index nk_;
519  // Task grain sizes (number of kernels executed per task).
520  const Index gm_;
521  const Index gn_;
522  // Number of blocks (this is different from ni_/nn_ because of task size
523  // coarsening).
524  const Index nm0_;
525  const Index nn0_;
526  // Tensor contraction kernel.
527  TensorContractionKernel kernel_;
528 
529  // Parallelization strategy.
530  //
531  // Blocks related to the same k block can run in parallel because they write
532  // to different output blocks. So we parallelize within k slices, this
533  // gives us parallelism level of m x n. Before we can start any kernels
534  // related to k-th slice, we need to issue m lhs packing tasks and n rhs
535  // packing tasks.
536  //
537  // However, there is a bottleneck when we are finishing kernels for k-th
538  // slice (at the very end there is only 1 runnable kernel). To mitigate this
539  // bottleneck we allow kernels from k-th and k+1-th slices to run in
540  // parallel. Note that (m, n, k) and (m, n, k+1) kernels write to the same
541  // output block, so they must not run in parallel.
542  //
543  // This gives us the following dependency graph.
544  // On each k slice we have m x n kernel tasks, m lhs paking tasks and n rhs
545  // packing tasks.
546  // Kernel (m, n, k) can start when:
547  // - kernel (m, n, k-1) has finished
548  // - lhs packing (m, k) has finished
549  // - rhs packing (n, k) has finished
550  // Lhs/rhs packing can start when:
551  // - all k-1 packing has finished (artificially imposed to limit amount of
552  // parallel packing)
553  //
554  // On top of that we limit runnable tasks to two consecutive k slices.
555  // This is done to limit amount of memory we need for packed lhs/rhs
556  // (for each k slice we need m*bk + n*bk memory in packed_lhs_/packed_rhs_).
557  //
558  // state_switch_ tracks when we are ready to switch to the next k slice.
559  // state_kernel_[m][n] tracks when we are ready to kick off kernel (m, n).
560  // These variable are rolling over 3 consecutive k slices: first two we are
561  // actively executing + one to track completion of kernels in the second
562  // slice.
563  static constexpr Index P = 3;
564 
565  // Handle to the allocated temporary storage for Lhs/Rhs blocks.
566  BlockMemHandle packed_mem_;
567  std::vector<LhsBlock> packed_lhs_[P - 1];
568  std::vector<RhsBlock> packed_rhs_[P - 1];
569 
570  // If we choose to parallelize only by the sharding dimension, each thread
571  // will have it's own "thead local" (not a c++ thread local storage) memory
572  // for packed_lhs or packed_rhs (shard_by_col = false of true). This memory
573  // can't be passed to a kernel that might execute on a different thread.
574  //
575  // In practice when we are ready to pack memory for the sharding dimension
576  // (rhs if shard_by_col==true) of the K-th slice, all kernels for K-1 slice
577  // already computed (99% of the time), and we can pack data into the thread
578  // local storage, and guarantee that all the kernels will be executed
579  // immediately in the same thread. This significantly increases L1 cache hit
580  // ratio and reduces pressure on the memory bus.
581  //
582  // It's still possible that kernel for the K-th slice will be ready before
583  // completion of the K-1 kernel, so we have to allocate "global" packed_lhs_
584  // and packed_rhs_ to allow kernels to be executed later on a thread
585  // different from the thread that was used for packing.
586 
587  // Handle for pre-allocated thread local memory buffers.
588  BlockMemHandle thread_local_pre_alocated_mem_;
589 
590  // Only one of these will be initialized depending on shard_by_col value
591  // (the size will be `num_worker_threads * num_grains_in_the_sharding_dim`).
592  std::vector<LhsBlock> lhs_thread_local_pre_allocated_;
593  std::vector<RhsBlock> rhs_thread_local_pre_allocated_;
594 
595  // How many thread local blocks were already allocated.
596  std::atomic<int> num_thread_local_allocations_;
597  const int thread_local_capacity;
598 
599  // We will use pre-allocated Lhs/Rhs blocks defined above, if the number of
600  // unique threads in a system is below or equal to the number of threads in
601  // a thread pool. We will fallback on dynamic memory allocation after that.
602 
603  // ThreadLocalBlocks is a container for Lhs or Rhs thread local buffers. Its
604  // size is equal to the grain size in Lhs/Rhs sharding dimension.
605  template <typename BlockType>
606  class ThreadLocalBlocks {
607  public:
608  ThreadLocalBlocks() = default;
609 
610  ThreadLocalBlocks(BlockType* base, size_t grain_size)
611  : is_pre_allocated_(true), thread_local_pre_allocated_base_(base), grain_size_(grain_size) {}
612 
613  ThreadLocalBlocks(BlockMemHandle mem_handle, std::vector<BlockType> blocks)
614  : is_pre_allocated_(false), mem_handle_(std::move(mem_handle)), blocks_(std::move(blocks)) {}
615 
616  BlockType& block(int grain_index) {
617  eigen_assert(grain_index >= 0);
618  eigen_assert(static_cast<size_t>(grain_index) < size());
619  return is_pre_allocated_ ? thread_local_pre_allocated_base_[grain_index] : blocks_[grain_index];
620  }
621 
622  void Release(EvalParallelContext& ctx) const {
623  if (!is_pre_allocated_) {
624  ctx.kernel_.deallocate(ctx.device_, mem_handle_);
625  }
626  }
627 
628  size_t size() const { return is_pre_allocated_ ? grain_size_ : blocks_.size(); }
629 
630  private:
631  bool is_pre_allocated_;
632 
633  // Reuse pre-allocated thread local buffers.
634  BlockType* thread_local_pre_allocated_base_ = nullptr;
635  size_t grain_size_ = 0;
636 
637  // These will be initialized only if `is_pre_allocated == false`.
638  BlockMemHandle mem_handle_{};
639  std::vector<BlockType> blocks_;
640  };
641 
642  // ThreadLocalBlocksInitialize callable does custom thread local blocks
643  // initialization, and will reuse pre-allocated buffers if possible, or will
644  // dynamically allocate new memory.
645  //
646  // Lhs/Rhs blocks might be of the same type, so we have to pass explicitly
647  // for what side do we plan to do block allocation.
648  template <typename BlockType, bool is_rhs>
649  class ThreadLocalBlocksInitialize {
650  static constexpr bool kIsLhs = !is_rhs && std::is_same<BlockType, LhsBlock>::value;
651  static const bool kIsRhs = is_rhs && std::is_same<BlockType, RhsBlock>::value;
652  static_assert(kIsLhs || kIsRhs, "Unknown block type");
653 
654  using Blocks = ThreadLocalBlocks<BlockType>;
655 
656  public:
657  ThreadLocalBlocksInitialize(EvalParallelContext& ctx)
658  : ctx_(ctx), num_worker_threads_(ctx_.device_.numThreadsInPool()) {}
659 
660  void operator()(Blocks& blocks) {
661  const int n = ctx_.num_thread_local_allocations_.fetch_add(1, std::memory_order_relaxed);
662 
663  if (n >= num_worker_threads_) {
664  ThreadLocalBlocksAllocator<is_rhs>::allocate(ctx_, blocks);
665  } else {
666  ThreadLocalBlocksAllocator<is_rhs>::reuse(ctx_, n, blocks);
667  }
668  }
669 
670  private:
671  // NOTE(ezhulenev): Without 'if constexpr' we have to put calls to
672  // TensorContractionKernel::allocateSlices into template specializations.
673  // Also explicit specializations are not allowed at class scope in C++03,
674  // EvalCtx type parameter is just a workaround for that limitation.
675  template <bool pack_rhs, typename EvalCtx = EvalParallelContext>
676  struct ThreadLocalBlocksAllocator;
677 
678  template <typename EvalCtx>
679  struct ThreadLocalBlocksAllocator</*pack_rhs=*/true, EvalCtx> {
680  static void allocate(EvalCtx& ctx, Blocks& blocks) {
681  std::vector<RhsBlock> rhs_blocks;
682  BlockMemHandle mem_handle = ctx.kernel_.allocateSlices(ctx.device_,
683  /*num_lhs=*/0,
684  /*num_rhs=*/ctx.gn_,
685  /*num_slices=*/1,
686  /*lhs_blocks=*/nullptr, /*rhs_blocks=*/&rhs_blocks);
687 
688  blocks = ThreadLocalBlocks<RhsBlock>(std::move(mem_handle), std::move(rhs_blocks));
689  }
690 
691  static void reuse(EvalCtx& ctx, int index, Blocks& blocks) {
692  RhsBlock* ptr = &ctx.rhs_thread_local_pre_allocated_[ctx.gn_ * index];
693  blocks = ThreadLocalBlocks<RhsBlock>(ptr, ctx.gn_);
694  }
695  };
696 
697  template <typename EvalCtx>
698  struct ThreadLocalBlocksAllocator</*pack_rhs=*/false, EvalCtx> {
699  static void allocate(EvalCtx& ctx, Blocks& blocks) {
700  std::vector<LhsBlock> lhs_blocks;
701  BlockMemHandle mem_handle = ctx.kernel_.allocateSlices(ctx.device_,
702  /*num_lhs=*/ctx.gm_,
703  /*num_rhs=*/0,
704  /*num_slices=*/1,
705  /*lhs_blocks=*/&lhs_blocks, /*rhs_blocks=*/nullptr);
706 
707  blocks = ThreadLocalBlocks<LhsBlock>(std::move(mem_handle), std::move(lhs_blocks));
708  }
709 
710  static void reuse(EvalCtx& ctx, int index, Blocks& blocks) {
711  LhsBlock* ptr = &ctx.lhs_thread_local_pre_allocated_[ctx.gm_ * index];
712  blocks = ThreadLocalBlocks<LhsBlock>(ptr, ctx.gm_);
713  }
714  };
715 
716  EvalParallelContext& ctx_;
717  const int num_worker_threads_;
718  };
719 
720  template <typename BlockType>
721  class ThreadLocalBlocksRelease {
722  public:
723  using Blocks = ThreadLocalBlocks<BlockType>;
724  ThreadLocalBlocksRelease(EvalParallelContext& ctx) : ctx_(ctx) {}
725  void operator()(Blocks& blocks) { blocks.Release(ctx_); }
726 
727  private:
728  EvalParallelContext& ctx_;
729  };
730 
731  // ThreadLocalBlocks initialization callables.
732  using ThreadLocalLhsInit = ThreadLocalBlocksInitialize<LhsBlock, /*is_rhs=*/false>;
733  using ThreadLocalRhsInit = ThreadLocalBlocksInitialize<RhsBlock, /*is_rhs=*/true>;
734 
735  // ThreadLocalBlocks release callables.
736  using ThreadLocalLhsRelease = ThreadLocalBlocksRelease<LhsBlock>;
737  using ThreadLocalRhsRelease = ThreadLocalBlocksRelease<RhsBlock>;
738 
739  // Thread local containers for Lhs/Rhs block packs. In practice only one of
740  // them will be used, depending on the shard_by_col value.
741  Eigen::ThreadLocal<ThreadLocalBlocks<LhsBlock>, ThreadLocalLhsInit, ThreadLocalLhsRelease> lhs_thread_local_blocks_;
742  Eigen::ThreadLocal<ThreadLocalBlocks<RhsBlock>, ThreadLocalRhsInit, ThreadLocalRhsRelease> rhs_thread_local_blocks_;
743 
744  // After a particular shard for Kth slice missed thread local execution
745  // opportunity (K-1 slice didn't complete kernels execution), we can no
746  // longer schedule K+1 and following slices in thread local mode, because
747  // there is no more guarantee that previous kernels were executed
748  // sequentially in the same thread (size is nn_ or nm_).
749  std::atomic<bool>* can_use_thread_local_packed_;
750 
751  std::atomic<uint8_t>** state_kernel_[P];
752  // state_switch_ is frequently modified by worker threads, while other
753  // fields are read-only after constructor. Let's move it to a separate cache
754  // line to reduce cache-coherency traffic.
755  char pad_[128];
756  std::atomic<Index> state_packing_ready_[P];
757  std::atomic<Index> state_switch_[P];
758 
759  LhsBlock& packed_lhs(Index m, Index k, Index m1, bool use_thread_local) {
760  if (use_thread_local) {
761  eigen_assert(!shard_by_col_);
762  ThreadLocalBlocks<LhsBlock>& blocks = lhs_thread_local_blocks_.local();
763 
764  Index grain_index = m1 - m * gm_;
765  return blocks.block(
766  internal::convert_index<int>(grain_index)); // FIXME better make ThreadLocalBlocks use Eigen::Index?
767  } else {
768  return packed_lhs_[k % (P - 1)][m1];
769  }
770  }
771 
772  RhsBlock& packed_rhs(Index n, Index k, Index n1, bool use_thread_local) {
773  if (use_thread_local) {
774  eigen_assert(shard_by_col_);
775  ThreadLocalBlocks<RhsBlock>& blocks = rhs_thread_local_blocks_.local();
776 
777  Index grain_index = n1 - n * gn_;
778  return blocks.block(
779  internal::convert_index<int>(grain_index)); // FIXME better make ThreadLocalBlocks use Eigen::Index?
780  } else {
781  return packed_rhs_[k % (P - 1)][n1];
782  }
783  }
784 
785  // In following two methods (pack_lhs and pack_rhs), if we know for sure
786  // that we'll be able to immediately call a kernel with packed data, and do
787  // not submit it to the thread pool, we can use thread local memory for
788  // packed data.
789  //
790  // We can only reliably check it if we are running all kernels in sync mode
791  // (parallelize only by sharding dim). If kernel for m==0 (n==0) is ready to
792  // run, it's guaranteed that all kernels with larger values of m (n) are
793  // also ready, because we execute them in the same order for all K slices.
794 
795  void pack_lhs(Index m, Index k) {
796  bool use_thread_local = false;
797 
798  if (parallelize_by_sharding_dim_only_ && !shard_by_col_ &&
799  can_use_thread_local_packed_[m].load(std::memory_order_relaxed)) {
800  if (state_kernel_[k % P][m][0].load(std::memory_order_relaxed) == 1) {
801  use_thread_local = true;
802  } else {
803  // If we can't guarantee that all kernels in `k` slice will be
804  // executed sequentially in current thread, it's no longer safe to use
805  // thread local memory in following slices along the k dimensions.
806  eigen_assert(k > 0);
807  can_use_thread_local_packed_[m].store(false, std::memory_order_relaxed);
808  }
809  }
810 
811  const Index mend = m * gm_ + gm(m);
812  for (Index m1 = m * gm_; m1 < mend; m1++)
813  kernel_.packLhs(&packed_lhs(m, k, m1, use_thread_local), lhs_.getSubMapper(m1 * bm_, k * bk_), bk(k), bm(m1));
814 
815  if (!parallel_pack_ && shard_by_col_) {
816  eigen_assert(!use_thread_local);
817  signal_packing(k);
818  } else {
819  signal_switch(k + 1);
820  for (Index n = nn_ - 1; n >= 0; n--) {
821  bool sync = parallelize_by_sharding_dim_only_ || n == 0;
822  signal_kernel(m, n, k, sync, use_thread_local);
823  }
824  }
825  }
826 
827  void pack_rhs(Index n, Index k) {
828  bool use_thread_local = false;
829 
830  if (parallelize_by_sharding_dim_only_ && shard_by_col_ &&
831  can_use_thread_local_packed_[n].load(std::memory_order_relaxed)) {
832  if (state_kernel_[k % P][0][n].load(std::memory_order_relaxed) == 1) {
833  use_thread_local = true;
834  } else {
835  // If we can't guarantee that all kernels in `k` slice will be
836  // executed sequentially in current thread, it's no longer safe to use
837  // thread local memory in following slices along the k dimensions.
838  eigen_assert(k > 0);
839  can_use_thread_local_packed_[n].store(false, std::memory_order_relaxed);
840  }
841  }
842 
843  const Index nend = n * gn_ + gn(n);
844  for (Index n1 = n * gn_; n1 < nend; n1++) {
845  if (!TensorContractionKernel::HasBeta && k == 0) {
846  // Zero the output memory in parallel, only if contraction kernel does
847  // not support `beta`. Otherwise we will pass beta 0.0 to the first
848  // call to the `TensorContractionKernel::invoke()`.
849  //
850  // On 10000x2x10000 mm zeroing can easily take half of time. Zero (bn
851  // x m) row. Safe to do here because all kernels that will write to
852  // this memory depend on completion of this task. Note: don't call
853  // device_.fill() here. device_.fill() blocks on thread pool
854  // worker thread, which can lead to underutilization and deadlocks.
855  std::fill_n(buffer_ + n1 * bn_ * m_, bn(n1) * m_, Scalar(0));
856  }
857  kernel_.packRhs(&packed_rhs(n, k, n1, use_thread_local), rhs_.getSubMapper(k * bk_, n1 * bn_), bk(k), bn(n1));
858  }
859 
860  if (parallel_pack_ || shard_by_col_) {
861  signal_switch(k + 1);
862  for (Index m = nm_ - 1; m >= 0; m--) {
863  bool sync = parallelize_by_sharding_dim_only_ || m == 0;
864  signal_kernel(m, n, k, sync, use_thread_local);
865  }
866  } else {
867  eigen_assert(!use_thread_local);
868  signal_packing(k);
869  }
870  }
871 
872  void kernel(Index m, Index n, Index k, bool use_thread_local) {
873  // Note: order of iteration matters here. Iteration over m is innermost
874  // because we want to reuse the same packed rhs in consecutive tasks
875  // (rhs fits into L2$ while lhs only into L3$).
876  const Index nend = n * gn_ + gn(n);
877  const Index mend = m * gm_ + gm(m);
878 
879  // NOTE: output = alpha * LHS * RHS + beta * output.
880  const Scalar alpha = Scalar(1);
881  const Scalar beta = (TensorContractionKernel::HasBeta && k == 0) ? Scalar(0) : Scalar(1);
882 
883  if (shard_by_col_) {
884  for (Index n1 = n * gn_; n1 < nend; n1++) {
885  for (Index m1 = m * gm_; m1 < mend; m1++) {
886  const auto output_mapper = output_.getSubMapper(m1 * bm_, n1 * bn_);
887  kernel_.invoke(output_mapper, packed_lhs(m, k, m1, !shard_by_col_ && use_thread_local),
888  packed_rhs(n, k, n1, shard_by_col_ && use_thread_local), bm(m1), bk(k), bn(n1), alpha, beta);
889 
890  // We are done with the last task for the [m1, n1] block.
891  if (k + 1 == nk_) {
892  output_kernel_(output_mapper, tensor_contraction_params_, m1 * bm_, n1 * bn_, bm(m1), bn(n1));
893  }
894  }
895  }
896  } else {
897  for (Index m1 = m * gm_; m1 < mend; m1++)
898  for (Index n1 = n * gn_; n1 < nend; n1++) {
899  const auto output_mapper = output_.getSubMapper(m1 * bm_, n1 * bn_);
900  kernel_.invoke(output_mapper, packed_lhs(m, k, m1, !shard_by_col_ && use_thread_local),
901  packed_rhs(n, k, n1, shard_by_col_ && use_thread_local), bm(m1), bk(k), bn(n1), alpha, beta);
902 
903  // We are done with the last task for the [m1, n1] block.
904  if (k + 1 == nk_) {
905  output_kernel_(output_mapper, tensor_contraction_params_, m1 * bm_, n1 * bn_, bm(m1), bn(n1));
906  }
907  }
908  }
909  signal_kernel(m, n, k + 1, /*sync=*/false, /*use_thread_local=*/false);
910  signal_switch(k + 2);
911  }
912 
913  void signal_packing(Index k) {
914  eigen_assert(!parallel_pack_);
915  Index s = state_packing_ready_[k % P].fetch_sub(1);
916  eigen_assert(s > 0);
917  if (s != 1) return;
918  state_packing_ready_[k % P] = shard_by_col_ ? nm_ : nn_;
919  enqueue_packing(k, shard_by_col_);
920  }
921 
922  void signal_kernel(Index m, Index n, Index k, bool sync, bool use_thread_local) {
923  std::atomic<uint8_t>* state = &state_kernel_[k % P][m][n];
924  Index s = state->load();
925  eigen_assert(s > 0);
926  if (s != 1 && state->fetch_sub(1) != 1) {
927  eigen_assert(!use_thread_local);
928  return;
929  }
930  state->store(parallel_pack_ ? 3 : 2, std::memory_order_relaxed);
931  if (sync) {
932  kernel(m, n, k, use_thread_local);
933  } else {
934  eigen_assert(!use_thread_local);
935  device_.enqueueNoNotification([this, m, n, k, use_thread_local]() {
936  kernel(m, n, k, use_thread_local);
937  });
938  }
939  }
940 
941  void signal_switch(Index k, Index v = 1) {
942  Index s = state_switch_[k % P].fetch_sub(v);
943  eigen_assert(s >= v);
944  if (s != v) return;
945 
946  // Ready to switch to the next k slice.
947  // Reset counter for the next iteration.
948  state_switch_[k % P] = (parallel_pack_ ? nm_ + nn_ : (shard_by_col_ ? nn_ : nm_)) + nm_ * nn_;
949  if (k < nk_) {
950  // Issue lhs/rhs packing. Their completion will in turn kick off
951  // kernels.
952  if (parallel_pack_) {
953  enqueue_packing(k, !shard_by_col_);
954  enqueue_packing(k, shard_by_col_);
955  } else if (shard_by_col_) {
956  enqueue_packing(k, false);
957  } else {
958  enqueue_packing(k, true);
959  }
960 
961  // Termination handling.
962  // Because kernel completion signals k + 2 switch, we need to finish nk
963  // + 2 slices without issuing any tasks on nk + 1 slice. So here we
964  // pretend that all nk + 1 packing tasks just finish instantly; so that
965  // nk + 2 switch only waits for completion of nk kernels.
966  } else if (k == nk_) {
967  signal_switch(k + 1, parallel_pack_ ? nm_ + nn_ : (shard_by_col_ ? nn_ : nm_));
968  } else {
969  done_.Notify();
970  }
971  }
972 
973  // Enqueue all rhs/lhs packing for k-th slice.
974  void enqueue_packing(Index k, bool rhs) { enqueue_packing_helper(0, rhs ? nn_ : nm_, k, rhs); }
975 
976  void enqueue_packing_helper(Index start, Index end, Index k, bool rhs) {
977  if (end - start == 1) {
978  if (rhs)
979  pack_rhs(start, k);
980  else
981  pack_lhs(start, k);
982  } else {
983  while (end - start > 1) {
984  Index mid = (start + end) / 2;
985  device_.enqueueNoNotification([this, mid, end, k, rhs]() {
986  enqueue_packing_helper(mid, end, k, rhs);
987  });
988  end = mid;
989  }
990 
991  // Decide if we want to run first packing task (start == 0) in
992  // async mode if we parallelize only by sharding dim:
993  // (1) pack_lhs and pack_rhs call signal_switch before completing
994  // all calls to signal_kernel, which in sync mode might lead
995  // to the execution of the first kernel of the k+1 slice, before
996  // completing a call to the last kernel of the k slice.
997  // (2) all pack tasks for sharded dim must be executed in a thread
998  // pool to get pre-allocated thead local buffers.
999  bool pack_async = (start == 0) && (parallelize_by_sharding_dim_only_ && shard_by_col_ == rhs) &&
1000  (k > 0 || std::this_thread::get_id() == created_by_thread_id_);
1001 
1002  if (pack_async) {
1003  device_.enqueueNoNotification([this, start, end, k, rhs]() {
1004  enqueue_packing_helper(start, end, k, rhs);
1005  });
1006  } else {
1007  enqueue_packing_helper(start, end, k, rhs);
1008  }
1009  }
1010  }
1011 
1012  // Block sizes with accounting for potentially incomplete last block.
1013  Index bm(Index m) const { return m + 1 < nm0_ ? bm_ : m_ + bm_ - bm_ * nm0_; }
1014  Index bn(Index n) const { return n + 1 < nn0_ ? bn_ : n_ + bn_ - bn_ * nn0_; }
1015  Index bk(Index k) const { return k + 1 < nk_ ? bk_ : k_ + bk_ - bk_ * nk_; }
1016  // Task grain sizes accounting for potentially incomplete last task.
1017  Index gm(Index m) const { return m + 1 < nm_ ? gm_ : nm0_ + gm_ - gm_ * nm_; }
1018  Index gn(Index n) const { return n + 1 < nn_ ? gn_ : nn0_ + gn_ - gn_ * nn_; }
1019 
1020  EvalParallelContext(const EvalParallelContext&) = delete;
1021  void operator=(const EvalParallelContext&) = delete;
1022  };
1023 
1024  template <bool lhs_inner_dim_contiguous, bool rhs_inner_dim_contiguous, bool rhs_inner_dim_reordered, int Alignment>
1025  using SyncEvalParallelContext = EvalParallelContext<NoCallback, lhs_inner_dim_contiguous, rhs_inner_dim_contiguous,
1026  rhs_inner_dim_reordered, Alignment>;
1027 
1028  // ------------------------------------------------------------------------ //
1029 
1030  // EvalShardedByInnerDimContext orchestrates sync/async contraction
1031  // evaluation, when we shard by inner dimension. When it is executed in
1032  // asynchronous mode, it owns all the shared state that might be accessible by
1033  // block processing tasks.
1034 
1035  template <typename DoneCallback>
1036  struct EvalShardedByInnerDimContext {
1037  EvalShardedByInnerDimContext(const Self* self, int num_threads, Scalar* result_buffer, Index m_size, Index n_size,
1038  Index k_size, DoneCallback done_callback)
1039  : evaluator(self),
1040  m_lhs_inner_dim_contiguous(evaluator->m_lhs_inner_dim_contiguous),
1041  m_rhs_inner_dim_contiguous(evaluator->m_rhs_inner_dim_contiguous),
1042  m_rhs_inner_dim_reordered(evaluator->m_rhs_inner_dim_reordered),
1043  result(result_buffer),
1044  m(m_size),
1045  n(n_size),
1046  k(k_size),
1047  done(std::move(done_callback)),
1048  buffer_size_bytes(m * n * sizeof(Scalar)),
1049  block_size(blockSize(k, num_threads)),
1050  num_blocks(numext::div_ceil<Index>(k, block_size)),
1051  num_pending_blocks(internal::convert_index<int>(num_blocks)),
1052  l0_ranges(numext::div_ceil<Index>(num_blocks, l0_size)),
1053  l0_state(l0_ranges),
1054  block_buffers(num_blocks) {
1055  // Keep count of pending gemm tasks for each l0 range.
1056  for (int i = 0; i < l0_ranges; ++i) {
1057  const Index num_pending_tasks = actualRangeSize(l0_ranges, l0_size, i);
1058  l0_state.emplace_back(internal::convert_index<int>(num_pending_tasks));
1059  }
1060 
1061  // Allocate temporary buffers for each block.
1062  for (Index block_idx = 0; block_idx < num_blocks; ++block_idx) {
1063  Scalar* buf = block_idx == 0 ? result : static_cast<Scalar*>(evaluator->m_device.allocate(buffer_size_bytes));
1064  block_buffers.emplace_back(buf);
1065  }
1066  }
1067 
1068  ~EvalShardedByInnerDimContext() {
1069  for (Index i = 1; i < num_blocks; ++i) {
1070  evaluator->m_device.deallocate(block_buffers[i]);
1071  }
1072  }
1073 
1074  template <int Alignment>
1075  void run() {
1076  Barrier barrier(internal::convert_index<int>(num_blocks));
1077  eval<Alignment>(barrier, 0, num_blocks);
1078  barrier.Wait();
1079 
1080  // Aggregate partial sums from l0 ranges.
1081  aggregateL0Blocks<Alignment>();
1082 
1083  // Apply output kernel.
1084  applyOutputKernel();
1085  }
1086 
1087  template <int Alignment>
1088  void runAsync() {
1089  evalAsync<Alignment>(0, num_blocks);
1090  }
1091 
1092  private:
1093  // The underlying GEMM kernel assumes that k is a multiple of
1094  // the packet size and subtle breakage occurs if this is violated.
1095  static const Index packet_size = internal::packet_traits<RhsScalar>::size;
1096 
1097  const Self* evaluator; // TensorContraction evaluator
1098 
1099  // These fields required fromTENSOR_CONTRACTION_DISPATCH macro.
1100  bool m_lhs_inner_dim_contiguous;
1101  bool m_rhs_inner_dim_contiguous;
1102  bool m_rhs_inner_dim_reordered;
1103 
1104  Scalar* result;
1105 
1106  Index m;
1107  Index n;
1108  Index k;
1109 
1110  DoneCallback done;
1111 
1112  // ----------------------------------------------------------------------//
1113  // Algorithm parameters.
1114 
1115  // We will compute partial results into the buffers of this size.
1116  Index buffer_size_bytes;
1117 
1118  Index block_size;
1119  Index num_blocks;
1120 
1121  // Keep track of pending tasks when evaluate in async mode.
1122  std::atomic<int> num_pending_blocks;
1123 
1124  // We compute partial gemm results in parallel, and to get the final result
1125  // we need to add them all together. For the large number of threads (>= 48)
1126  // this adds a very expensive sequential step at the end.
1127  //
1128  // We split the [0, num_blocks) into small ranges, and when a task for the
1129  // block finishes its partial gemm computation, it checks if it was the last
1130  // gemm in the range, and if so, it will add all blocks of the range.
1131  //
1132  // After all tasks done, we need to add only these pre-aggregated blocks.
1133 
1134  // For now we use just a single level of ranges to compute pre-aggregated
1135  // partial sums, but in general we can use more layers to compute tree
1136  // aggregation in parallel and reduce the size of the sequential step.
1137  //
1138  // TODO(ezhulenev): Add multilevel tree aggregation? Probably will make
1139  // sense only if number of threads >= ~128?
1140  static const Index l0_size = 4;
1141  Index l0_ranges;
1142 
1143  // Keep count of pending gemm tasks for each l0 range.
1144  MaxSizeVector<std::atomic<int>> l0_state; // [0, l0_ranges)
1145 
1146  // Buffers allocated for each temporary block computation.
1147  MaxSizeVector<Scalar*> block_buffers; // [0, num_blocks)
1148 
1149  template <int Alignment>
1150  void processBlock(Index block_idx, Index begin, Index end) {
1151  Scalar* buf = block_buffers[block_idx];
1152 
1153  TENSOR_CONTRACTION_DISPATCH(evaluator->template evalGemmPartialWithoutOutputKernel, Alignment,
1154  (buf, begin, end,
1155  /*num_threads=*/internal::convert_index<int>(num_blocks)));
1156 
1157  // Check if it was the last task in l0 range.
1158  const Index l0_index = block_idx / l0_size;
1159  const int v = l0_state[l0_index].fetch_sub(1);
1160  eigen_assert(v >= 1);
1161 
1162  // If we processed the last block of the range, we can aggregate all
1163  // partial results into the first block of the range.
1164  if (v == 1) {
1165  const Index rng_size = actualRangeSize(l0_ranges, l0_size, l0_index);
1166  const Index dst_block_idx = l0_index * l0_size;
1167 
1168  if (rng_size == l0_size) {
1169  addAllToBuffer<Alignment>(m * n,
1170  /*src_buf0=*/block_buffers[dst_block_idx + 1],
1171  /*src_buf1=*/block_buffers[dst_block_idx + 2],
1172  /*src_buf2=*/block_buffers[dst_block_idx + 3],
1173  /*dst_buf= */ block_buffers[dst_block_idx]);
1174  } else {
1175  // Aggregate blocks of potentially incomplete last range.
1176  for (int i = 1; i < rng_size; ++i) {
1177  addToBuffer<Alignment>(m * n,
1178  /*src_buf=*/block_buffers[dst_block_idx + i],
1179  /*dst_buf=*/block_buffers[dst_block_idx]);
1180  }
1181  }
1182  }
1183  }
1184 
1185  // Aggregate partial sums from l0 ranges.
1186  template <int Alignment>
1187  void aggregateL0Blocks() const {
1188  Index l0_index = 1;
1189 
1190  for (; l0_index + 2 < l0_ranges; l0_index += 3) {
1191  addAllToBuffer<Alignment>(m * n,
1192  /*src_buf0=*/block_buffers[(l0_index + 0) * l0_size],
1193  /*src_buf1=*/block_buffers[(l0_index + 1) * l0_size],
1194  /*src_buf2=*/block_buffers[(l0_index + 2) * l0_size],
1195  /*dst_buf= */ block_buffers[0]);
1196  }
1197 
1198  for (; l0_index < l0_ranges; ++l0_index) {
1199  addToBuffer<Alignment>(m * n, block_buffers[l0_index * l0_size], block_buffers[0]);
1200  }
1201  }
1202 
1203  void applyOutputKernel() const {
1204  typedef internal::blas_data_mapper<Scalar, Index, ColMajor> OutputMapper;
1205  evaluator->m_output_kernel(OutputMapper(result, m), evaluator->m_tensor_contraction_params,
1206  static_cast<Eigen::Index>(0), static_cast<Eigen::Index>(0), m, n);
1207  }
1208 
1209  // Compute block size with accounting for potentially incomplete last block.
1210  Index actualBlockSize(Index block_idx) const {
1211  return block_idx + 1 < num_blocks ? block_size : k + block_size - block_size * num_blocks;
1212  };
1213 
1214  // Compute range size with accounting for potentially incomplete last range.
1215  Index actualRangeSize(Index num_ranges, Index range_size, Index range_idx) const {
1216  eigen_assert(range_idx < num_ranges);
1217  return range_idx + 1 < num_ranges ? range_size : num_blocks + range_size - range_size * num_ranges;
1218  };
1219 
1220  template <int Alignment>
1221  EIGEN_STRONG_INLINE static void addToBuffer(size_t n, const Scalar* src_buf, Scalar* tgt_buf) {
1222  const int output_packet_size = internal::unpacket_traits<PacketReturnType>::size;
1223  size_t i = 0;
1224  const size_t num_packets = n / output_packet_size;
1225  for (; i < output_packet_size * num_packets; i += output_packet_size) {
1226  const PacketReturnType src_val = internal::pload<PacketReturnType>(src_buf + i);
1227  const PacketReturnType tgt_val = internal::ploadt<PacketReturnType, Alignment>(tgt_buf + i);
1228  const PacketReturnType sum = internal::padd(src_val, tgt_val);
1229  internal::pstoret<Scalar, PacketReturnType, Alignment>(tgt_buf + i, sum);
1230  }
1231  for (; i < n; ++i) {
1232  tgt_buf[i] += src_buf[i];
1233  }
1234  }
1235 
1236  template <int Alignment>
1237  EIGEN_STRONG_INLINE static void addAllToBuffer(size_t n, const Scalar* src_buf0, const Scalar* src_buf1,
1238  const Scalar* src_buf2, Scalar* dst_buf) {
1243 
1244  const int output_packet_size = internal::unpacket_traits<PacketReturnType>::size;
1245 
1246  size_t i = 0;
1247  const size_t num_packets = n / output_packet_size;
1248  for (; i < output_packet_size * num_packets; i += output_packet_size) {
1249  const auto src_val0 = pload<PacketReturnType>(src_buf0 + i);
1250  const auto src_val1 = pload<PacketReturnType>(src_buf1 + i);
1251  const auto src_val2 = pload<PacketReturnType>(src_buf2 + i);
1252 
1253  const auto dst_val = ploadt<PacketReturnType, Alignment>(dst_buf + i);
1254  const auto sum = padd(padd(dst_val, src_val0), padd(src_val1, src_val2));
1255 
1256  pstoret<Scalar, PacketReturnType, Alignment>(dst_buf + i, sum);
1257  }
1258  for (; i < n; ++i) {
1259  dst_buf[i] += src_buf0[i] + src_buf1[i] + src_buf2[i];
1260  }
1261  }
1262 
1263  template <int Alignment>
1264  void eval(Barrier& barrier, Index start_block_idx, Index end_block_idx) {
1265  while (end_block_idx - start_block_idx > 1) {
1266  Index mid_block_idx = (start_block_idx + end_block_idx) / 2;
1267  evaluator->m_device.enqueueNoNotification([this, &barrier, mid_block_idx, end_block_idx]() {
1268  eval<Alignment>(barrier, mid_block_idx, end_block_idx);
1269  });
1270  end_block_idx = mid_block_idx;
1271  }
1272 
1273  Index block_idx = start_block_idx;
1274  Index block_start = block_idx * block_size;
1275  Index block_end = block_start + actualBlockSize(block_idx);
1276 
1277  processBlock<Alignment>(block_idx, block_start, block_end);
1278  barrier.Notify();
1279  }
1280 
1281  template <int Alignment>
1282  void evalAsync(Index start_block_idx, Index end_block_idx) {
1283  while (end_block_idx - start_block_idx > 1) {
1284  Index mid_block_idx = (start_block_idx + end_block_idx) / 2;
1285  evaluator->m_device.enqueueNoNotification(
1286  [this, mid_block_idx, end_block_idx]() {
1287  evalAsync<Alignment>(mid_block_idx, end_block_idx);
1288  });
1289  end_block_idx = mid_block_idx;
1290  }
1291 
1292  Index block_idx = start_block_idx;
1293 
1294  Index block_start = block_idx * block_size;
1295  Index block_end = block_start + actualBlockSize(block_idx);
1296 
1297  processBlock<Alignment>(block_idx, block_start, block_end);
1298 
1299  int v = num_pending_blocks.fetch_sub(1);
1300  eigen_assert(v >= 1);
1301 
1302  if (v == 1) {
1303  // Aggregate partial sums from l0 ranges.
1304  aggregateL0Blocks<Alignment>();
1305 
1306  // Apply output kernel.
1307  applyOutputKernel();
1308 
1309  // NOTE: If we call `done` callback before deleting this (context),
1310  // it might deallocate Self* pointer captured by context, and we'll
1311  // fail in destructor trying to deallocate temporary buffers.
1312 
1313  // Move done call back from context before it will be destructed.
1314  DoneCallback done_copy = std::move(done);
1315 
1316  // We are confident that we are the last one who touches context.
1317  delete this;
1318 
1319  // Now safely call the done callback.
1320  done_copy();
1321  }
1322  }
1323 
1324  // Cost model doesn't capture well the cost associated with constructing
1325  // tensor contraction mappers and computing loop bounds in gemm_pack_lhs
1326  // and gemm_pack_rhs, so we specify minimum desired block size.
1327  static Index blockSize(Index k, int num_threads) {
1328  const auto round_up = [=](Index index) -> Index {
1329  const Index kmultiple = packet_size <= 8 ? 8 : packet_size;
1330  return numext::div_ceil<Index>(index, kmultiple) * kmultiple;
1331  };
1332 
1333  const Index target_block_size = round_up(numext::div_ceil<Index>(k, num_threads));
1334  const Index desired_min_block_size = 12 * packet_size;
1335 
1336  return numext::mini<Index>(k, numext::maxi<Index>(desired_min_block_size, target_block_size));
1337  }
1338 
1339  EvalShardedByInnerDimContext(const EvalShardedByInnerDimContext&) = delete;
1340  void operator=(const EvalShardedByInnerDimContext&) = delete;
1341  };
1342 
1343  // ------------------------------------------------------------------------ //
1344 
1345  // Below are the function used by evalProductImpl heuristics, trying to select
1346  // optimcal parameters for parallelization algorithm.
1347 
1348  // Decide whether we want to shard m x n contraction by columns or by rows.
1349  static bool shardByCol(Index m, Index n, Index num_threads) {
1350  // Note: we are comparing both n and m against Traits::nr, it is not
1351  // a mistake. We are trying to figure out how both n and m will fit into
1352  // the main sharding dimension.
1353 
1354  // Sharding by column is the default
1355  // ... unless there is enough data for vectorization over rows
1356  if (m / num_threads >= Traits::nr &&
1357  // and not enough data for vectorization over columns
1358  (n / num_threads < Traits::nr ||
1359  // ... or barely enough data for vectorization over columns,
1360  // but it is not evenly dividable across threads
1361  (n / num_threads < 4 * Traits::nr && (n % (num_threads * Traits::nr)) != 0 &&
1362  // ... and it is evenly dividable across threads for rows
1363  ((m % (num_threads * Traits::nr)) == 0 ||
1364  // .. or it is not evenly dividable for both dimensions but
1365  // there is much more data over rows so that corner effects are
1366  // mitigated.
1367  (m / n >= 6)))))
1368  return false;
1369  // Wait, or if matrices are just substantially prolonged over the other
1370  // dimension.
1371  if (n / num_threads < 16 * Traits::nr && m > n * 32) return false;
1372  return true;
1373  }
1374 
1375  Index coarsenM(Index m, Index n, Index bm, Index bn, Index bk, Index gn, int num_threads, bool shard_by_col) const {
1376  Index gm = 1;
1377  Index gm1 = 1;
1378  Index nm0 = numext::div_ceil(m, bm);
1379  Index nm1 = nm0;
1380  for (;;) {
1381  // Find the next candidate for m grain size. It needs to result in
1382  // different number of blocks. E.g. if we have 10 kernels, we want to try
1383  // 5 and 10, but not 6, 7, 8 and 9.
1384  while (gm1 <= nm0 && nm1 == numext::div_ceil(nm0, gm1)) gm1++;
1385  if (gm1 > nm0) break;
1386  // Check the candidate.
1387  int res = checkGrain(m, n, bm, bn, bk, gm1, gn, gm, gn, num_threads, shard_by_col);
1388  if (res < 0) break;
1389  nm1 = numext::div_ceil(nm0, gm1);
1390  if (res == 0) continue;
1391  // Commit new grain size.
1392  gm = gm1;
1393  }
1394  return gm;
1395  }
1396 
1397  Index coarsenN(Index m, Index n, Index bm, Index bn, Index bk, Index gm, int num_threads, bool shard_by_col) const {
1398  Index gn = 1;
1399  Index gn1 = 1;
1400  Index nn0 = numext::div_ceil(n, bn);
1401  Index nn1 = nn0;
1402  for (;;) {
1403  while (gn1 <= nn0 && nn1 == numext::div_ceil(nn0, gn1)) gn1++;
1404  if (gn1 > nn0) break;
1405  int res = checkGrain(m, n, bm, bn, bk, gm, gn1, gm, gn, num_threads, shard_by_col);
1406  if (res < 0) break;
1407  nn1 = numext::div_ceil(nn0, gn1);
1408  if (res == 0) continue;
1409  gn = gn1;
1410  }
1411  return gn;
1412  }
1413 
1414  // checkGrain checks whether grain (gm, gn) is suitable and is better than
1415  // (oldgm, oldgn).
1416  int checkGrain(Index m, Index n, Index bm, Index bn, Index bk, Index gm, Index gn, Index oldgm, Index oldgn,
1417  int num_threads, bool shard_by_col) const {
1418  const TensorOpCost cost = contractionCost(bm * gm, bn * gn, bm, bn, bk, shard_by_col, true);
1419  double taskSize = TensorCostModel<ThreadPoolDevice>::taskSize(static_cast<double>(bm) * gm * bn * gn, cost);
1420  // If the task is too small, then we agree on it regardless of anything
1421  // else. Otherwise synchronization overheads will dominate.
1422  if (taskSize < 1) return 1;
1423  // If it is too large, then we reject it and all larger tasks.
1424  if (taskSize > 2) return -1;
1425  // Now we are in presumably good task size range.
1426  // The main deciding factor here is parallelism. Consider that we have 12
1427  // kernels and 4 threads. Grains of 2, 3 and 4 all yield good task sizes.
1428  // But 2/4 yield 6/3 tasks, which gives us parallelism of 0.75 (at most 3/4
1429  // of cores will be busy). While grain size 3 gives us 4 tasks, which gives
1430  // us parallelism of 1 (we can load all cores).
1431  Index nm0 = numext::div_ceil(m, bm);
1432  Index nn0 = numext::div_ceil(n, bn);
1433  Index new_tasks = numext::div_ceil(nm0, gm) * numext::div_ceil(nn0, gn);
1434  double new_parallelism =
1435  static_cast<double>(new_tasks) / (numext::div_ceil<Index>(new_tasks, num_threads) * num_threads);
1436  Index old_tasks = numext::div_ceil(nm0, oldgm) * numext::div_ceil(nn0, oldgn);
1437  double old_parallelism =
1438  static_cast<double>(old_tasks) / (numext::div_ceil<Index>(old_tasks, num_threads) * num_threads);
1439  if (new_parallelism > old_parallelism || new_parallelism == 1) return 1;
1440  return 0;
1441  }
1442 
1443  TensorOpCost contractionCost(Index m, Index n, Index bm, Index bn, Index bk, bool shard_by_col,
1444  bool prepacked) const {
1445  const int packed_size = std::min<int>(PacketType<LhsScalar, Device>::size, PacketType<RhsScalar, Device>::size);
1446  const int output_packet_size = internal::unpacket_traits<PacketReturnType>::size;
1447  const double kd = static_cast<double>(bk);
1448  double compute_bandwidth = computeBandwidth(false, bm, bn, bk);
1449  // Computations.
1450  TensorOpCost cost = TensorOpCost(0, 0, kd * compute_bandwidth, true, packed_size);
1451  // Output stores.
1452  cost += TensorOpCost(0, sizeof(CoeffReturnType), 0, true, output_packet_size);
1453  if (prepacked) {
1454  // Packing and kernels are executed in different tasks. When we calculate
1455  // task grain size we look only at kernel cost assuming that kernel
1456  // is more expensive than packing.
1457  return cost;
1458  }
1459  // Lhs/rhs loads + computations.
1460  TensorOpCost lhsCost = this->m_leftImpl.costPerCoeff(true) * (kd / n);
1461  TensorOpCost rhsCost = this->m_rightImpl.costPerCoeff(true) * (kd / m);
1462  // Lhs packing memory cost does not contribute considerably to overall
1463  // execution time because lhs is prefetched early and accessed sequentially.
1464  if (shard_by_col)
1465  lhsCost.dropMemoryCost();
1466  else
1467  rhsCost.dropMemoryCost();
1468  return cost + lhsCost + rhsCost;
1469  }
1470 
1471  // Decide whether we want to shard m x k x n contraction over the inner
1472  // (contraction) dimension (k).
1473  static bool shardByInnerDim(Index m, Index n, Index k, int num_threads, int num_threads_by_k) {
1474  std::ptrdiff_t bufsize = m * n * sizeof(Scalar);
1475  bool shard_by_k = false;
1476  if (n == 1 || // If mat*vec or...
1477  num_threads_by_k < 2 || // running single threaded or...
1478  num_threads_by_k < num_threads || // sharding by k gives less parallelism or...
1479  bufsize > l3CacheSize() / num_threads_by_k || // need more buffer space
1480  // than L3 cache or...
1481  k / num_threads_by_k < 2 * Traits::nr) { // k per thread is tiny.
1482  shard_by_k = false;
1483  } else if (numext::maxi(m, n) / num_threads < Traits::nr || // both other dimensions are tiny or...
1484  // k per thread is not small and...
1485  (k / num_threads_by_k > 8 * Traits::nr &&
1486  // one of the outer dimensions is tiny or sharding by k offers
1487  // more parallelism.
1488  (numext::mini(m, n) < 2 * Traits::nr || num_threads_by_k > num_threads))) {
1489  shard_by_k = true;
1490  }
1491  return shard_by_k;
1492  }
1493 
1494  TensorOpCost contractionCostPerInnerDim(Index m, Index n, Index k) const {
1495  // Compute cost.
1496  const int output_packet_size = internal::unpacket_traits<PacketReturnType>::size;
1497  TensorOpCost cost(0, 0, (computeBandwidth(true, m, n, k) * m) * n, true, output_packet_size);
1498  // Output stores.
1499  cost += TensorOpCost(0, sizeof(CoeffReturnType), 0, true, output_packet_size);
1500  TensorOpCost lhsCost = this->m_leftImpl.costPerCoeff(true) * m;
1501  TensorOpCost rhsCost = this->m_rightImpl.costPerCoeff(true) * n;
1502  // Since the inner gemm kernel is always sharded by column, the lhs
1503  // load cost is negligible.
1504  lhsCost.dropMemoryCost();
1505  return cost + lhsCost + rhsCost;
1506  }
1507 
1508  int numThreadsInnerDim(Index m, Index n, Index k) const {
1509  const int output_packet_size = internal::unpacket_traits<PacketReturnType>::size;
1510  TensorOpCost cost = contractionCostPerInnerDim(m, n, k);
1511  double total_parallel_cost = TensorCostModel<ThreadPoolDevice>::totalCost(k, cost);
1512  // Cost of reduction step accumulating the m*n per-thread buffers into the
1513  // result.
1514  double reduction_cost =
1515  TensorCostModel<ThreadPoolDevice>::totalCost(m * n, TensorOpCost(2, 1, 1, true, output_packet_size));
1516  int num_threads = 1;
1517  double min_cost = total_parallel_cost;
1518  double kPerThreadOverHead = 3000;
1519  double kFixedOverHead = 100000;
1520  for (int nt = 2; nt <= this->m_device.numThreads(); nt += 2) {
1521  double sequential_cost = kFixedOverHead + nt * (reduction_cost + kPerThreadOverHead);
1522  double parallel_cost = total_parallel_cost / nt + sequential_cost;
1523  if (parallel_cost < min_cost) {
1524  num_threads = nt;
1525  min_cost = parallel_cost;
1526  }
1527  }
1528  return num_threads;
1529  }
1530 
1531  double computeBandwidth(bool shard_by_col, Index bm, Index bn, Index bk) const {
1532  // Peak VFMA bandwidth is 0.5. However if we have not enough data for
1533  // vectorization bandwidth drops. The 4.0 and 2.0 bandwidth is determined
1534  // experimentally.
1535  double computeBandwidth = bk == 1 ? 4.0
1536  : (shard_by_col ? bn : bm) < Traits::nr || (shard_by_col ? bm : bn) < Traits::mr ? 2.0
1537  : 0.5;
1538 #ifndef EIGEN_VECTORIZE_FMA
1539  // Bandwidth of all of VFMA/MULPS/ADDPS is 0.5 on latest Intel processors.
1540  // However for MULPS/ADDPS we have dependent sequence of 2 such
1541  // instructions,
1542  // so overall bandwidth is 1.0.
1543  if (computeBandwidth == 0.5) computeBandwidth = 1.0;
1544 #endif
1545  return computeBandwidth;
1546  }
1547 };
1548 
1549 } // end namespace Eigen
1550 
1551 #endif // EIGEN_USE_THREADS
1552 #endif // EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_THREAD_POOL_H
Array< int, Dynamic, 1 > v
Definition: Array_initializer_list_vector_cxx11.cpp:1
int i
Definition: BiCGSTAB_step_by_step.cpp:9
const unsigned n
Definition: CG3DPackingUnitTest.cpp:11
Matrix3d m1
Definition: IOFormat.cpp:2
#define eigen_assert(x)
Definition: Macros.h:910
#define EIGEN_STRONG_INLINE
Definition: Macros.h:834
EIGEN_ALWAYS_INLINE Packet2cf padd(Packet2cf &a, std::complex< float > &b)
Definition: MatrixVectorProduct.h:1277
cout<< "Here is the matrix m:"<< endl<< m<< endl;Matrix< ptrdiff_t, 3, 1 > res
Definition: PartialRedux_count.cpp:3
void load(Archive &ar, ParticleHandler &handl)
Definition: Particles.h:21
#define TENSOR_CONTRACTION_DISPATCH(METHOD, ALIGNMENT, ARGS)
Definition: TensorContraction.h:607
#define TENSOR_CONTRACTION_ASYNC_DISPATCH(METHOD, DONE, ALIGNMENT, ARGS, FN)
Definition: TensorContraction.h:640
Scalar Scalar int size
Definition: benchVecAdd.cpp:17
SCALAR Scalar
Definition: bench_gemm.cpp:45
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE int numThreads(double output_size, const TensorOpCost &cost_per_coeff, int max_threads)
Definition: TensorCostModel.h:154
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double taskSize(double output_size, const TensorOpCost &cost_per_coeff)
Definition: TensorCostModel.h:166
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double totalCost(double output_size, const TensorOpCost &cost_per_coeff)
Definition: TensorCostModel.h:170
Definition: ThreadLocal.h:112
T & local()
Definition: ThreadLocal.h:137
static constexpr lastp1_t end
Definition: IndexedViewHelper.h:79
@ Unaligned
Definition: Constants.h:235
@ ColMajor
Definition: Constants.h:318
RealScalar s
Definition: level1_cplx_impl.h:130
return int(ret)+1
RealScalar alpha
Definition: level1_cplx_impl.h:151
int * m
Definition: level2_cplx_impl.h:294
Scalar beta
Definition: level2_cplx_impl.h:36
char char char int int * k
Definition: level2_impl.h:374
char char * op
Definition: level2_impl.h:374
EIGEN_DEVICE_FUNC Packet padd(const Packet &a, const Packet &b)
Definition: GenericPacketMath.h:318
@ Lhs
Definition: TensorContractionMapper.h:20
@ Rhs
Definition: TensorContractionMapper.h:20
EIGEN_DEVICE_FUNC IndexDest convert_index(const IndexSrc &idx)
Definition: XprHelper.h:63
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void pstoret(Scalar *to, const Packet &from)
Definition: GenericPacketMath.h:1355
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Packet ploadt(const typename unpacket_traits< Packet >::type *from)
Definition: GenericPacketMath.h:1334
EIGEN_DEVICE_FUNC Packet pload(const typename unpacket_traits< Packet >::type *from)
Definition: GenericPacketMath.h:752
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE T maxi(const T &x, const T &y)
Definition: MathFunctions.h:926
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE EIGEN_CONSTEXPR T div_ceil(T a, T b)
Definition: MathFunctions.h:1251
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE T mini(const T &x, const T &y)
Definition: MathFunctions.h:920
Namespace containing all symbols from the Eigen library.
Definition: bench_norm.cpp:70
auto run(Kernel kernel, Args &&... args) -> decltype(kernel(args...))
Definition: gpu_test_helper.h:414
std::array< T, N > array
Definition: EmulateArray.h:231
squared absolute value
Definition: GlobalFunctions.h:87
std::ptrdiff_t l2CacheSize()
Definition: products/GeneralBlockPanelKernel.h:3127
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
The Index type as used for the API.
Definition: Meta.h:83
std::ptrdiff_t l3CacheSize()
Definition: products/GeneralBlockPanelKernel.h:3135
double P
Uniform pressure.
Definition: TwenteMeshGluing.cpp:77
Definition: Eigen_Colamd.h:49
void start(const unsigned &i)
(Re-)start i-th timer
Definition: oomph_utilities.cc:243
list x
Definition: plotDoE.py:28
CwiseBinaryOp< internal::scalar_sum_op< double, double >, const CpyMatrixXd, const CpyMatrixXd > XprType
Definition: nestbyvalue.cpp:15
internal::nested_eval< T, 1 >::type eval(const T &xpr)
Definition: sparse_permutations.cpp:47
Definition: Barrier.h:64
internal::packet_traits< Scalar >::type type
Definition: TensorMeta.h:48
static constexpr int Layout
Definition: TensorEvaluator.h:46
Derived::Scalar Scalar
Definition: TensorEvaluator.h:33
const Device EIGEN_DEVICE_REF m_device
Definition: TensorEvaluator.h:170
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const Derived &m, const Device &device)
Definition: TensorEvaluator.h:66
Derived::Scalar CoeffReturnType
Definition: TensorEvaluator.h:34
Derived XprType
Definition: TensorEvaluator.h:37
Derived::Index Index
Definition: TensorEvaluator.h:32
PacketType< CoeffReturnType, Device >::type PacketReturnType
Definition: TensorEvaluator.h:35
Derived::Dimensions Dimensions
Definition: TensorEvaluator.h:36
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorBlock block(TensorBlockDesc &desc, TensorBlockScratch &scratch, bool=false) const
Definition: TensorEvaluator.h:147
static constexpr Index value
Definition: Meta.h:306
@ size
Definition: GenericPacketMath.h:113
@ size
Definition: GenericPacketMath.h:139