TensorPadding.h
Go to the documentation of this file.
1 // This file is part of Eigen, a lightweight C++ template library
2 // for linear algebra.
3 //
4 // Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
5 //
6 // This Source Code Form is subject to the terms of the Mozilla
7 // Public License v. 2.0. If a copy of the MPL was not distributed
8 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9 
10 #ifndef EIGEN_CXX11_TENSOR_TENSOR_PADDING_H
11 #define EIGEN_CXX11_TENSOR_TENSOR_PADDING_H
12 
13 // IWYU pragma: private
14 #include "./InternalHeaderCheck.h"
15 
16 namespace Eigen {
17 
25 namespace internal {
26 template <typename PaddingDimensions, typename XprType>
27 struct traits<TensorPaddingOp<PaddingDimensions, XprType> > : public traits<XprType> {
28  typedef typename XprType::Scalar Scalar;
30  typedef typename XprTraits::StorageKind StorageKind;
31  typedef typename XprTraits::Index Index;
32  typedef typename XprType::Nested Nested;
33  typedef std::remove_reference_t<Nested> Nested_;
34  static constexpr int NumDimensions = XprTraits::NumDimensions;
35  static constexpr int Layout = XprTraits::Layout;
36  typedef typename XprTraits::PointerType PointerType;
37 };
38 
39 template <typename PaddingDimensions, typename XprType>
40 struct eval<TensorPaddingOp<PaddingDimensions, XprType>, Eigen::Dense> {
42 };
43 
44 template <typename PaddingDimensions, typename XprType>
45 struct nested<TensorPaddingOp<PaddingDimensions, XprType>, 1,
46  typename eval<TensorPaddingOp<PaddingDimensions, XprType> >::type> {
48 };
49 
50 } // end namespace internal
51 
52 template <typename PaddingDimensions, typename XprType>
53 class TensorPaddingOp : public TensorBase<TensorPaddingOp<PaddingDimensions, XprType>, ReadOnlyAccessors> {
54  public:
57  typedef typename XprType::CoeffReturnType CoeffReturnType;
61 
62  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorPaddingOp(const XprType& expr, const PaddingDimensions& padding_dims,
63  const Scalar padding_value)
64  : m_xpr(expr), m_padding_dims(padding_dims), m_padding_value(padding_value) {}
65 
66  EIGEN_DEVICE_FUNC const PaddingDimensions& padding() const { return m_padding_dims; }
68 
70 
71  protected:
72  typename XprType::Nested m_xpr;
73  const PaddingDimensions m_padding_dims;
75 };
76 
77 // Eval as rvalue
78 template <typename PaddingDimensions, typename ArgType, typename Device>
79 struct TensorEvaluator<const TensorPaddingOp<PaddingDimensions, ArgType>, Device> {
81  typedef typename XprType::Index Index;
82  static constexpr int NumDims = internal::array_size<PaddingDimensions>::value;
84  typedef typename XprType::Scalar Scalar;
90 
92  enum {
93  IsAligned = true,
96  PreferBlockAccess = true,
97  CoordAccess = true,
98  RawAccess = false
99  };
100 
101  typedef std::remove_const_t<Scalar> ScalarNoConst;
102 
103  //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===//
106 
108  //===--------------------------------------------------------------------===//
109 
110  EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
111  : m_impl(op.expression(), device), m_padding(op.padding()), m_paddingValue(op.padding_value()), m_device(device) {
112  // The padding op doesn't change the rank of the tensor. Directly padding a scalar would lead
113  // to a vector, which doesn't make sense. Instead one should reshape the scalar into a vector
114  // of 1 element first and then pad.
115  EIGEN_STATIC_ASSERT((NumDims > 0), YOU_MADE_A_PROGRAMMING_MISTAKE);
116 
117  // Compute dimensions
118  m_dimensions = m_impl.dimensions();
119  for (int i = 0; i < NumDims; ++i) {
120  m_dimensions[i] += m_padding[i].first + m_padding[i].second;
121  }
122  const typename TensorEvaluator<ArgType, Device>::Dimensions& input_dims = m_impl.dimensions();
123  if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
124  m_inputStrides[0] = 1;
125  m_outputStrides[0] = 1;
126  for (int i = 1; i < NumDims; ++i) {
127  m_inputStrides[i] = m_inputStrides[i - 1] * input_dims[i - 1];
128  m_outputStrides[i] = m_outputStrides[i - 1] * m_dimensions[i - 1];
129  }
130  m_outputStrides[NumDims] = m_outputStrides[NumDims - 1] * m_dimensions[NumDims - 1];
131  } else {
132  m_inputStrides[NumDims - 1] = 1;
133  m_outputStrides[NumDims] = 1;
134  for (int i = NumDims - 2; i >= 0; --i) {
135  m_inputStrides[i] = m_inputStrides[i + 1] * input_dims[i + 1];
136  m_outputStrides[i + 1] = m_outputStrides[i + 2] * m_dimensions[i + 1];
137  }
138  m_outputStrides[0] = m_outputStrides[1] * m_dimensions[0];
139  }
140  }
141 
142  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; }
143 
145  m_impl.evalSubExprsIfNeeded(NULL);
146  return true;
147  }
148 
149 #ifdef EIGEN_USE_THREADS
150  template <typename EvalSubExprsCallback>
151  EIGEN_STRONG_INLINE void evalSubExprsIfNeededAsync(EvaluatorPointerType, EvalSubExprsCallback done) {
152  m_impl.evalSubExprsIfNeededAsync(nullptr, [done](bool) { done(true); });
153  }
154 #endif // EIGEN_USE_THREADS
155 
156  EIGEN_STRONG_INLINE void cleanup() { m_impl.cleanup(); }
157 
159  eigen_assert(index < dimensions().TotalSize());
160  Index inputIndex = 0;
161  if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
163  for (int i = NumDims - 1; i > 0; --i) {
164  const Index idx = index / m_outputStrides[i];
165  if (isPaddingAtIndexForDim(idx, i)) {
166  return m_paddingValue;
167  }
168  inputIndex += (idx - m_padding[i].first) * m_inputStrides[i];
169  index -= idx * m_outputStrides[i];
170  }
171  if (isPaddingAtIndexForDim(index, 0)) {
172  return m_paddingValue;
173  }
174  inputIndex += (index - m_padding[0].first);
175  } else {
177  for (int i = 0; i < NumDims - 1; ++i) {
178  const Index idx = index / m_outputStrides[i + 1];
179  if (isPaddingAtIndexForDim(idx, i)) {
180  return m_paddingValue;
181  }
182  inputIndex += (idx - m_padding[i].first) * m_inputStrides[i];
183  index -= idx * m_outputStrides[i + 1];
184  }
185  if (isPaddingAtIndexForDim(index, NumDims - 1)) {
186  return m_paddingValue;
187  }
188  inputIndex += (index - m_padding[NumDims - 1].first);
189  }
190  return m_impl.coeff(inputIndex);
191  }
192 
193  template <int LoadMode>
195  if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
196  return packetColMajor(index);
197  }
198  return packetRowMajor(index);
199  }
200 
202  TensorOpCost cost = m_impl.costPerCoeff(vectorized);
203  if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
205  for (int i = 0; i < NumDims; ++i) updateCostPerDimension(cost, i, i == 0);
206  } else {
208  for (int i = NumDims - 1; i >= 0; --i) updateCostPerDimension(cost, i, i == NumDims - 1);
209  }
210  return cost;
211  }
212 
214  const size_t target_size = m_device.lastLevelCacheSize();
216  internal::TensorBlockResourceRequirements::skewed<Scalar>(target_size), m_impl.getResourceRequirements());
217  }
218 
220  bool /*root_of_expr_ast*/ = false) const {
221  // If one of the dimensions is zero, return empty block view.
222  if (desc.size() == 0) {
224  }
225 
226  static const bool IsColMajor = Layout == static_cast<int>(ColMajor);
227  const int inner_dim_idx = IsColMajor ? 0 : NumDims - 1;
228 
229  Index offset = desc.offset();
230 
231  // Compute offsets in the output tensor corresponding to the desc.offset().
232  DSizes<Index, NumDims> output_offsets;
233  for (int i = NumDims - 1; i > 0; --i) {
234  const int dim = IsColMajor ? i : NumDims - i - 1;
235  const int stride_dim = IsColMajor ? dim : dim + 1;
236  output_offsets[dim] = offset / m_outputStrides[stride_dim];
237  offset -= output_offsets[dim] * m_outputStrides[stride_dim];
238  }
239  output_offsets[inner_dim_idx] = offset;
240 
241  // Offsets in the input corresponding to output offsets.
242  DSizes<Index, NumDims> input_offsets = output_offsets;
243  for (int i = 0; i < NumDims; ++i) {
244  const int dim = IsColMajor ? i : NumDims - i - 1;
245  input_offsets[dim] = input_offsets[dim] - m_padding[dim].first;
246  }
247 
248  // Compute offset in the input buffer (at this point it might be illegal and
249  // point outside of the input buffer, because we don't check for negative
250  // offsets, it will be autocorrected in the block iteration loop below).
251  Index input_offset = 0;
252  for (int i = 0; i < NumDims; ++i) {
253  const int dim = IsColMajor ? i : NumDims - i - 1;
254  input_offset += input_offsets[dim] * m_inputStrides[dim];
255  }
256 
257  // Destination buffer and scratch buffer both indexed from 0 and have the
258  // same dimensions as the requested block (for destination buffer this
259  // property is guaranteed by `desc.destination()`).
260  Index output_offset = 0;
261  const DSizes<Index, NumDims> output_strides = internal::strides<Layout>(desc.dimensions());
262 
263  // NOTE(ezhulenev): We initialize bock iteration state for `NumDims - 1`
264  // dimensions, skipping innermost dimension. In theory it should be possible
265  // to squeeze matching innermost dimensions, however in practice that did
266  // not show any improvements in benchmarks. Also in practice first outer
267  // dimension usually has padding, and will prevent squeezing.
268 
269  // Initialize output block iterator state. Dimension in this array are
270  // always in inner_most -> outer_most order (col major layout).
271  array<BlockIteratorState, NumDims - 1> it;
272  for (int i = 0; i < NumDims - 1; ++i) {
273  const int dim = IsColMajor ? i + 1 : NumDims - i - 2;
274  it[i].count = 0;
275  it[i].size = desc.dimension(dim);
276 
277  it[i].input_stride = m_inputStrides[dim];
278  it[i].input_span = it[i].input_stride * (it[i].size - 1);
279 
280  it[i].output_stride = output_strides[dim];
281  it[i].output_span = it[i].output_stride * (it[i].size - 1);
282  }
283 
284  const Index input_inner_dim_size = static_cast<Index>(m_impl.dimensions()[inner_dim_idx]);
285 
286  // Total output size.
287  const Index output_size = desc.size();
288 
289  // We will fill inner dimension of this size in the output. It might be
290  // larger than the inner dimension in the input, so we might have to pad
291  // before/after we copy values from the input inner dimension.
292  const Index output_inner_dim_size = desc.dimension(inner_dim_idx);
293 
294  // How many values to fill with padding BEFORE reading from the input inner
295  // dimension.
296  const Index output_inner_pad_before_size =
297  input_offsets[inner_dim_idx] < 0
298  ? numext::mini(numext::abs(input_offsets[inner_dim_idx]), output_inner_dim_size)
299  : 0;
300 
301  // How many values we can actually copy from the input inner dimension.
302  const Index output_inner_copy_size = numext::mini(
303  // Want to copy from input.
304  (output_inner_dim_size - output_inner_pad_before_size),
305  // Can copy from input.
306  numext::maxi(input_inner_dim_size - (input_offsets[inner_dim_idx] + output_inner_pad_before_size), Index(0)));
307 
308  eigen_assert(output_inner_copy_size >= 0);
309 
310  // How many values to fill with padding AFTER reading from the input inner
311  // dimension.
312  const Index output_inner_pad_after_size =
313  (output_inner_dim_size - output_inner_copy_size - output_inner_pad_before_size);
314 
315  // Sanity check, sum of all sizes must be equal to the output size.
316  eigen_assert(output_inner_dim_size ==
317  (output_inner_pad_before_size + output_inner_copy_size + output_inner_pad_after_size));
318 
319  // Keep track of current coordinates and padding in the output.
320  DSizes<Index, NumDims> output_coord = output_offsets;
321  DSizes<Index, NumDims> output_padded;
322  for (int i = 0; i < NumDims; ++i) {
323  const int dim = IsColMajor ? i : NumDims - i - 1;
324  output_padded[dim] = isPaddingAtIndexForDim(output_coord[dim], dim);
325  }
326 
328 
329  // Prepare storage for the materialized padding result.
330  const typename TensorBlock::Storage block_storage = TensorBlock::prepareStorage(desc, scratch);
331 
332  // TODO(ezhulenev): Squeeze multiple non-padded inner dimensions into a
333  // single logical inner dimension.
334 
335  // When possible we squeeze writes for the innermost (only if non-padded)
336  // dimension with the first padded dimension. This allows to reduce the
337  // number of calls to LinCopy and better utilize vector instructions.
338  const bool squeeze_writes = NumDims > 1 &&
339  // inner dimension is not padded
340  (input_inner_dim_size == m_dimensions[inner_dim_idx]) &&
341  // and equal to the block inner dimension
342  (input_inner_dim_size == output_inner_dim_size);
343 
344  const int squeeze_dim = IsColMajor ? inner_dim_idx + 1 : inner_dim_idx - 1;
345 
346  // Maximum coordinate on a squeeze dimension that we can write to.
347  const Index squeeze_max_coord =
348  squeeze_writes ? numext::mini(
349  // max non-padded element in the input
350  static_cast<Index>(m_dimensions[squeeze_dim] - m_padding[squeeze_dim].second),
351  // max element in the output buffer
352  static_cast<Index>(output_offsets[squeeze_dim] + desc.dimension(squeeze_dim)))
353  : static_cast<Index>(0);
354 
355  // Iterate copying data from `m_impl.data()` to the output buffer.
356  for (Index size = 0; size < output_size;) {
357  // Detect if we are in the padded region (exclude innermost dimension).
358  bool is_padded = false;
359  for (int j = 1; j < NumDims; ++j) {
360  const int dim = IsColMajor ? j : NumDims - j - 1;
361  is_padded = output_padded[dim];
362  if (is_padded) break;
363  }
364 
365  if (is_padded) {
366  // Fill single innermost dimension with padding value.
367  size += output_inner_dim_size;
368 
369  LinCopy::template Run<LinCopy::Kind::FillLinear>(typename LinCopy::Dst(output_offset, 1, block_storage.data()),
370  typename LinCopy::Src(0, 0, &m_paddingValue),
371  output_inner_dim_size);
372 
373  } else if (squeeze_writes) {
374  // Squeeze multiple reads from innermost dimensions.
375  const Index squeeze_num = squeeze_max_coord - output_coord[squeeze_dim];
376  size += output_inner_dim_size * squeeze_num;
377 
378  // Copy `squeeze_num` inner dimensions from input to output.
379  LinCopy::template Run<LinCopy::Kind::Linear>(typename LinCopy::Dst(output_offset, 1, block_storage.data()),
380  typename LinCopy::Src(input_offset, 1, m_impl.data()),
381  output_inner_dim_size * squeeze_num);
382 
383  // Update iteration state for only `squeeze_num - 1` processed inner
384  // dimensions, because we have another iteration state update at the end
385  // of the loop that will update iteration state for the last inner
386  // processed dimension.
387  it[0].count += (squeeze_num - 1);
388  input_offset += it[0].input_stride * (squeeze_num - 1);
389  output_offset += it[0].output_stride * (squeeze_num - 1);
390  output_coord[squeeze_dim] += (squeeze_num - 1);
391 
392  } else {
393  // Single read from innermost dimension.
394  size += output_inner_dim_size;
395 
396  { // Fill with padding before copying from input inner dimension.
397  const Index out = output_offset;
398 
399  LinCopy::template Run<LinCopy::Kind::FillLinear>(typename LinCopy::Dst(out, 1, block_storage.data()),
400  typename LinCopy::Src(0, 0, &m_paddingValue),
401  output_inner_pad_before_size);
402  }
403 
404  { // Copy data from input inner dimension.
405  const Index out = output_offset + output_inner_pad_before_size;
406  const Index in = input_offset + output_inner_pad_before_size;
407 
408  eigen_assert(output_inner_copy_size == 0 || m_impl.data() != NULL);
409 
410  LinCopy::template Run<LinCopy::Kind::Linear>(typename LinCopy::Dst(out, 1, block_storage.data()),
411  typename LinCopy::Src(in, 1, m_impl.data()),
412  output_inner_copy_size);
413  }
414 
415  { // Fill with padding after copying from input inner dimension.
416  const Index out = output_offset + output_inner_pad_before_size + output_inner_copy_size;
417 
418  LinCopy::template Run<LinCopy::Kind::FillLinear>(typename LinCopy::Dst(out, 1, block_storage.data()),
419  typename LinCopy::Src(0, 0, &m_paddingValue),
420  output_inner_pad_after_size);
421  }
422  }
423 
424  for (int j = 0; j < NumDims - 1; ++j) {
425  const int dim = IsColMajor ? j + 1 : NumDims - j - 2;
426 
427  if (++it[j].count < it[j].size) {
428  input_offset += it[j].input_stride;
429  output_offset += it[j].output_stride;
430  output_coord[dim] += 1;
431  output_padded[dim] = isPaddingAtIndexForDim(output_coord[dim], dim);
432  break;
433  }
434  it[j].count = 0;
435  input_offset -= it[j].input_span;
436  output_offset -= it[j].output_span;
437  output_coord[dim] -= it[j].size - 1;
438  output_padded[dim] = isPaddingAtIndexForDim(output_coord[dim], dim);
439  }
440  }
441 
442  return block_storage.AsTensorMaterializedBlock();
443  }
444 
446 
447  private:
448  struct BlockIteratorState {
449  BlockIteratorState() : count(0), size(0), input_stride(0), input_span(0), output_stride(0), output_span(0) {}
450 
457  };
458 
460  return (!internal::index_pair_first_statically_eq<PaddingDimensions>(dim_index, 0) &&
461  index < m_padding[dim_index].first) ||
462  (!internal::index_pair_second_statically_eq<PaddingDimensions>(dim_index, 0) &&
463  index >= m_dimensions[dim_index] - m_padding[dim_index].second);
464  }
465 
467  return internal::index_pair_first_statically_eq<PaddingDimensions>(dim_index, 0);
468  }
469 
471  return internal::index_pair_second_statically_eq<PaddingDimensions>(dim_index, 0);
472  }
473 
474  void updateCostPerDimension(TensorOpCost& cost, int i, bool first) const {
475  const double in = static_cast<double>(m_impl.dimensions()[i]);
476  const double out = in + m_padding[i].first + m_padding[i].second;
477  if (out == 0) return;
478  const double reduction = in / out;
479  cost *= reduction;
480  if (first) {
481  cost += TensorOpCost(0, 0, 2 * TensorOpCost::AddCost<Index>() + reduction * (1 * TensorOpCost::AddCost<Index>()));
482  } else {
483  cost += TensorOpCost(0, 0,
484  2 * TensorOpCost::AddCost<Index>() + 2 * TensorOpCost::MulCost<Index>() +
485  reduction * (2 * TensorOpCost::MulCost<Index>() + 1 * TensorOpCost::DivCost<Index>()));
486  }
487  }
488 
489  protected:
491  eigen_assert(index + PacketSize - 1 < dimensions().TotalSize());
492 
493  const Index initialIndex = index;
494  Index inputIndex = 0;
496  for (int i = NumDims - 1; i > 0; --i) {
497  const Index firstIdx = index;
498  const Index lastIdx = index + PacketSize - 1;
499  const Index lastPaddedLeft = m_padding[i].first * m_outputStrides[i];
500  const Index firstPaddedRight = (m_dimensions[i] - m_padding[i].second) * m_outputStrides[i];
501  const Index lastPaddedRight = m_outputStrides[i + 1];
502 
503  if (!isLeftPaddingCompileTimeZero(i) && lastIdx < lastPaddedLeft) {
504  // all the coefficient are in the padding zone.
505  return internal::pset1<PacketReturnType>(m_paddingValue);
506  } else if (!isRightPaddingCompileTimeZero(i) && firstIdx >= firstPaddedRight && lastIdx < lastPaddedRight) {
507  // all the coefficient are in the padding zone.
508  return internal::pset1<PacketReturnType>(m_paddingValue);
509  } else if ((isLeftPaddingCompileTimeZero(i) && isRightPaddingCompileTimeZero(i)) ||
510  (firstIdx >= lastPaddedLeft && lastIdx < firstPaddedRight)) {
511  // all the coefficient are between the 2 padding zones.
512  const Index idx = index / m_outputStrides[i];
513  inputIndex += (idx - m_padding[i].first) * m_inputStrides[i];
514  index -= idx * m_outputStrides[i];
515  } else {
516  // Every other case
517  return packetWithPossibleZero(initialIndex);
518  }
519  }
520 
521  const Index lastIdx = index + PacketSize - 1;
522  const Index firstIdx = index;
523  const Index lastPaddedLeft = m_padding[0].first;
524  const Index firstPaddedRight = (m_dimensions[0] - m_padding[0].second);
525  const Index lastPaddedRight = m_outputStrides[1];
526 
527  if (!isLeftPaddingCompileTimeZero(0) && lastIdx < lastPaddedLeft) {
528  // all the coefficient are in the padding zone.
529  return internal::pset1<PacketReturnType>(m_paddingValue);
530  } else if (!isRightPaddingCompileTimeZero(0) && firstIdx >= firstPaddedRight && lastIdx < lastPaddedRight) {
531  // all the coefficient are in the padding zone.
532  return internal::pset1<PacketReturnType>(m_paddingValue);
533  } else if ((isLeftPaddingCompileTimeZero(0) && isRightPaddingCompileTimeZero(0)) ||
534  (firstIdx >= lastPaddedLeft && lastIdx < firstPaddedRight)) {
535  // all the coefficient are between the 2 padding zones.
536  inputIndex += (index - m_padding[0].first);
537  return m_impl.template packet<Unaligned>(inputIndex);
538  }
539  // Every other case
540  return packetWithPossibleZero(initialIndex);
541  }
542 
544  eigen_assert(index + PacketSize - 1 < dimensions().TotalSize());
545 
546  const Index initialIndex = index;
547  Index inputIndex = 0;
549  for (int i = 0; i < NumDims - 1; ++i) {
550  const Index firstIdx = index;
551  const Index lastIdx = index + PacketSize - 1;
552  const Index lastPaddedLeft = m_padding[i].first * m_outputStrides[i + 1];
553  const Index firstPaddedRight = (m_dimensions[i] - m_padding[i].second) * m_outputStrides[i + 1];
554  const Index lastPaddedRight = m_outputStrides[i];
555 
556  if (!isLeftPaddingCompileTimeZero(i) && lastIdx < lastPaddedLeft) {
557  // all the coefficient are in the padding zone.
558  return internal::pset1<PacketReturnType>(m_paddingValue);
559  } else if (!isRightPaddingCompileTimeZero(i) && firstIdx >= firstPaddedRight && lastIdx < lastPaddedRight) {
560  // all the coefficient are in the padding zone.
561  return internal::pset1<PacketReturnType>(m_paddingValue);
562  } else if ((isLeftPaddingCompileTimeZero(i) && isRightPaddingCompileTimeZero(i)) ||
563  (firstIdx >= lastPaddedLeft && lastIdx < firstPaddedRight)) {
564  // all the coefficient are between the 2 padding zones.
565  const Index idx = index / m_outputStrides[i + 1];
566  inputIndex += (idx - m_padding[i].first) * m_inputStrides[i];
567  index -= idx * m_outputStrides[i + 1];
568  } else {
569  // Every other case
570  return packetWithPossibleZero(initialIndex);
571  }
572  }
573 
574  const Index lastIdx = index + PacketSize - 1;
575  const Index firstIdx = index;
576  const Index lastPaddedLeft = m_padding[NumDims - 1].first;
577  const Index firstPaddedRight = (m_dimensions[NumDims - 1] - m_padding[NumDims - 1].second);
578  const Index lastPaddedRight = m_outputStrides[NumDims - 1];
579 
580  if (!isLeftPaddingCompileTimeZero(NumDims - 1) && lastIdx < lastPaddedLeft) {
581  // all the coefficient are in the padding zone.
582  return internal::pset1<PacketReturnType>(m_paddingValue);
583  } else if (!isRightPaddingCompileTimeZero(NumDims - 1) && firstIdx >= firstPaddedRight &&
584  lastIdx < lastPaddedRight) {
585  // all the coefficient are in the padding zone.
586  return internal::pset1<PacketReturnType>(m_paddingValue);
587  } else if ((isLeftPaddingCompileTimeZero(NumDims - 1) && isRightPaddingCompileTimeZero(NumDims - 1)) ||
588  (firstIdx >= lastPaddedLeft && lastIdx < firstPaddedRight)) {
589  // all the coefficient are between the 2 padding zones.
590  inputIndex += (index - m_padding[NumDims - 1].first);
591  return m_impl.template packet<Unaligned>(inputIndex);
592  }
593  // Every other case
594  return packetWithPossibleZero(initialIndex);
595  }
596 
598  EIGEN_ALIGN_MAX std::remove_const_t<CoeffReturnType> values[PacketSize];
600  for (int i = 0; i < PacketSize; ++i) {
601  values[i] = coeff(index + i);
602  }
603  PacketReturnType rslt = internal::pload<PacketReturnType>(values);
604  return rslt;
605  }
606 
611  PaddingDimensions m_padding;
612 
614 
616 };
617 
618 } // end namespace Eigen
619 
620 #endif // EIGEN_CXX11_TENSOR_TENSOR_PADDING_H
int i
Definition: BiCGSTAB_step_by_step.cpp:9
#define EIGEN_ALIGN_MAX
Definition: ConfigureVectorization.h:146
#define EIGEN_ALWAYS_INLINE
Definition: Macros.h:845
#define EIGEN_UNROLL_LOOP
Definition: Macros.h:1298
#define EIGEN_DEVICE_FUNC
Definition: Macros.h:892
#define eigen_assert(x)
Definition: Macros.h:910
#define EIGEN_STRONG_INLINE
Definition: Macros.h:834
#define EIGEN_STATIC_ASSERT(X, MSG)
Definition: StaticAssert.h:26
#define EIGEN_DEVICE_REF
Definition: TensorMacros.h:34
Scalar Scalar int size
Definition: benchVecAdd.cpp:17
SCALAR Scalar
Definition: bench_gemm.cpp:45
Generic expression where a coefficient-wise binary operator is applied to two expressions.
Definition: CwiseBinaryOp.h:79
The tensor base class.
Definition: TensorBase.h:1026
Definition: TensorCostModel.h:28
Definition: TensorPadding.h:53
XprType::CoeffReturnType CoeffReturnType
Definition: TensorPadding.h:57
Eigen::internal::traits< TensorPaddingOp >::Index Index
Definition: TensorPadding.h:60
EIGEN_DEVICE_FUNC const internal::remove_all_t< typename XprType::Nested > & expression() const
Definition: TensorPadding.h:69
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorPaddingOp(const XprType &expr, const PaddingDimensions &padding_dims, const Scalar padding_value)
Definition: TensorPadding.h:62
const PaddingDimensions m_padding_dims
Definition: TensorPadding.h:73
Eigen::internal::traits< TensorPaddingOp >::StorageKind StorageKind
Definition: TensorPadding.h:59
XprType::Nested m_xpr
Definition: TensorPadding.h:72
Eigen::NumTraits< Scalar >::Real RealScalar
Definition: TensorPadding.h:56
EIGEN_DEVICE_FUNC const PaddingDimensions & padding() const
Definition: TensorPadding.h:66
const Scalar m_padding_value
Definition: TensorPadding.h:74
Eigen::internal::traits< TensorPaddingOp >::Scalar Scalar
Definition: TensorPadding.h:55
Eigen::internal::nested< TensorPaddingOp >::type Nested
Definition: TensorPadding.h:58
EIGEN_DEVICE_FUNC Scalar padding_value() const
Definition: TensorPadding.h:67
Definition: TensorBlock.h:902
IndexType size() const
Definition: TensorBlock.h:273
IndexType offset() const
Definition: TensorBlock.h:270
IndexType dimension(int index) const
Definition: TensorBlock.h:272
const Dimensions & dimensions() const
Definition: TensorBlock.h:271
TensorMaterializedBlock AsTensorMaterializedBlock() const
Definition: TensorBlock.h:644
Scalar * data() const
Definition: TensorBlock.h:640
Definition: TensorBlock.h:604
static EIGEN_STRONG_INLINE Storage prepareStorage(TensorBlockDesc &desc, TensorBlockScratch &scratch, bool allow_strided_storage=false)
Definition: TensorBlock.h:671
@ ColMajor
Definition: Constants.h:318
char char * op
Definition: level2_impl.h:374
@ kView
Definition: TensorBlock.h:545
typename remove_all< T >::type remove_all_t
Definition: Meta.h:142
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE T maxi(const T &x, const T &y)
Definition: MathFunctions.h:926
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE std::enable_if_t< NumTraits< T >::IsSigned||NumTraits< T >::IsComplex, typename NumTraits< T >::Real > abs(const T &x)
Definition: MathFunctions.h:1355
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE T mini(const T &x, const T &y)
Definition: MathFunctions.h:920
Namespace containing all symbols from the Eigen library.
Definition: bench_norm.cpp:70
std::array< T, N > array
Definition: EmulateArray.h:231
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
The Index type as used for the API.
Definition: Meta.h:83
Definition: Eigen_Colamd.h:49
Definition: Constants.h:519
T Real
Definition: NumTraits.h:183
Definition: TensorMeta.h:47
Definition: TensorForwardDeclarations.h:42
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packetRowMajor(Index index) const
Definition: TensorPadding.h:543
TensorEvaluator< ArgType, Device > m_impl
Definition: TensorPadding.h:610
TensorPaddingOp< PaddingDimensions, ArgType > XprType
Definition: TensorPadding.h:80
EIGEN_STRONG_INLINE void cleanup()
Definition: TensorPadding.h:156
const Device EIGEN_DEVICE_REF m_device
Definition: TensorPadding.h:615
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorBlock block(TensorBlockDesc &desc, TensorBlockScratch &scratch, bool=false) const
Definition: TensorPadding.h:219
array< Index, NumDims > m_inputStrides
Definition: TensorPadding.h:609
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool isRightPaddingCompileTimeZero(int dim_index) const
Definition: TensorPadding.h:470
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EvaluatorPointerType data() const
Definition: TensorPadding.h:445
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE internal::TensorBlockResourceRequirements getResourceRequirements() const
Definition: TensorPadding.h:213
PacketType< CoeffReturnType, Device >::type PacketReturnType
Definition: TensorPadding.h:86
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
Definition: TensorPadding.h:158
array< Index, NumDims+1 > m_outputStrides
Definition: TensorPadding.h:608
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool isPaddingAtIndexForDim(Index index, int dim_index) const
Definition: TensorPadding.h:459
EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType)
Definition: TensorPadding.h:144
void updateCostPerDimension(TensorOpCost &cost, int i, bool first) const
Definition: TensorPadding.h:474
internal::TensorBlockScratchAllocator< Device > TensorBlockScratch
Definition: TensorPadding.h:105
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const
Definition: TensorPadding.h:201
EIGEN_STRONG_INLINE TensorEvaluator(const XprType &op, const Device &device)
Definition: TensorPadding.h:110
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packetColMajor(Index index) const
Definition: TensorPadding.h:490
DSizes< Index, NumDims > Dimensions
Definition: TensorPadding.h:83
StorageMemory< CoeffReturnType, Device > Storage
Definition: TensorPadding.h:88
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions & dimensions() const
Definition: TensorPadding.h:142
internal::TensorBlockDescriptor< NumDims, Index > TensorBlockDesc
Definition: TensorPadding.h:104
XprType::CoeffReturnType CoeffReturnType
Definition: TensorPadding.h:85
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool isLeftPaddingCompileTimeZero(int dim_index) const
Definition: TensorPadding.h:466
std::remove_const_t< Scalar > ScalarNoConst
Definition: TensorPadding.h:101
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packetWithPossibleZero(Index index) const
Definition: TensorPadding.h:597
internal::TensorMaterializedBlock< ScalarNoConst, NumDims, Layout, Index > TensorBlock
Definition: TensorPadding.h:107
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const
Definition: TensorPadding.h:194
A cost model used to limit the number of threads used for evaluating tensor expression.
Definition: TensorEvaluator.h:31
static constexpr int Layout
Definition: TensorEvaluator.h:46
const Device EIGEN_DEVICE_REF m_device
Definition: TensorEvaluator.h:170
Storage::Type EvaluatorPointerType
Definition: TensorEvaluator.h:41
@ PacketAccess
Definition: TensorEvaluator.h:50
@ IsAligned
Definition: TensorEvaluator.h:49
static constexpr int PacketSize
Definition: TensorEvaluator.h:38
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
Definition: TensorEvaluator.h:89
Derived::Index Index
Definition: TensorEvaluator.h:32
internal::TensorMaterializedBlock< ScalarNoConst, NumCoords, Layout, Index > TensorBlock
Definition: TensorEvaluator.h:63
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions & dimensions() const
Definition: TensorEvaluator.h:69
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorBlockResourceRequirements merge(const TensorBlockResourceRequirements &lhs, const TensorBlockResourceRequirements &rhs)
Definition: TensorBlock.h:129
Definition: Meta.h:305
const TensorPaddingOp< PaddingDimensions, XprType > & type
Definition: TensorPadding.h:41
Definition: XprHelper.h:427
Definition: TensorTraits.h:152
ref_selector< T >::type type
Definition: TensorTraits.h:153
XprTraits::StorageKind StorageKind
Definition: TensorPadding.h:30
traits< XprType > XprTraits
Definition: TensorPadding.h:29
XprTraits::PointerType PointerType
Definition: TensorPadding.h:36
std::remove_reference_t< Nested > Nested_
Definition: TensorPadding.h:33
Definition: ForwardDeclarations.h:21
std::ofstream out("Result.txt")
std::ptrdiff_t j
Definition: tut_arithmetic_redux_minmax.cpp:2