10 #ifndef EIGEN_CXX11_TENSOR_TENSOR_PADDING_H
11 #define EIGEN_CXX11_TENSOR_TENSOR_PADDING_H
26 template <
typename PaddingDimensions,
typename XprType>
32 typedef typename XprType::Nested
Nested;
33 typedef std::remove_reference_t<Nested>
Nested_;
34 static constexpr
int NumDimensions = XprTraits::NumDimensions;
35 static constexpr
int Layout = XprTraits::Layout;
39 template <
typename PaddingDimensions,
typename XprType>
44 template <
typename PaddingDimensions,
typename XprType>
52 template <
typename PaddingDimensions,
typename XprType>
78 template <
typename PaddingDimensions,
typename ArgType,
typename Device>
96 PreferBlockAccess =
true,
111 : m_impl(
op.expression(), device), m_padding(
op.padding()), m_paddingValue(
op.padding_value()),
m_device(device) {
118 m_dimensions = m_impl.dimensions();
119 for (
int i = 0;
i < NumDims; ++
i) {
120 m_dimensions[
i] += m_padding[
i].first + m_padding[
i].second;
124 m_inputStrides[0] = 1;
125 m_outputStrides[0] = 1;
126 for (
int i = 1;
i < NumDims; ++
i) {
127 m_inputStrides[
i] = m_inputStrides[
i - 1] * input_dims[
i - 1];
128 m_outputStrides[
i] = m_outputStrides[
i - 1] * m_dimensions[
i - 1];
130 m_outputStrides[NumDims] = m_outputStrides[NumDims - 1] * m_dimensions[NumDims - 1];
132 m_inputStrides[NumDims - 1] = 1;
133 m_outputStrides[NumDims] = 1;
134 for (
int i = NumDims - 2;
i >= 0; --
i) {
135 m_inputStrides[
i] = m_inputStrides[
i + 1] * input_dims[
i + 1];
136 m_outputStrides[
i + 1] = m_outputStrides[
i + 2] * m_dimensions[
i + 1];
138 m_outputStrides[0] = m_outputStrides[1] * m_dimensions[0];
145 m_impl.evalSubExprsIfNeeded(NULL);
149 #ifdef EIGEN_USE_THREADS
150 template <
typename EvalSubExprsCallback>
152 m_impl.evalSubExprsIfNeededAsync(
nullptr, [done](
bool) { done(
true); });
160 Index inputIndex = 0;
163 for (
int i = NumDims - 1;
i > 0; --
i) {
164 const Index idx = index / m_outputStrides[
i];
165 if (isPaddingAtIndexForDim(idx,
i)) {
166 return m_paddingValue;
168 inputIndex += (idx - m_padding[
i].first) * m_inputStrides[
i];
169 index -= idx * m_outputStrides[
i];
171 if (isPaddingAtIndexForDim(index, 0)) {
172 return m_paddingValue;
174 inputIndex += (index - m_padding[0].first);
177 for (
int i = 0;
i < NumDims - 1; ++
i) {
178 const Index idx = index / m_outputStrides[
i + 1];
179 if (isPaddingAtIndexForDim(idx,
i)) {
180 return m_paddingValue;
182 inputIndex += (idx - m_padding[
i].first) * m_inputStrides[
i];
183 index -= idx * m_outputStrides[
i + 1];
185 if (isPaddingAtIndexForDim(index, NumDims - 1)) {
186 return m_paddingValue;
188 inputIndex += (index - m_padding[NumDims - 1].first);
190 return m_impl.coeff(inputIndex);
193 template <
int LoadMode>
196 return packetColMajor(index);
198 return packetRowMajor(index);
205 for (
int i = 0;
i < NumDims; ++
i) updateCostPerDimension(cost,
i,
i == 0);
208 for (
int i = NumDims - 1;
i >= 0; --
i) updateCostPerDimension(cost,
i,
i == NumDims - 1);
214 const size_t target_size =
m_device.lastLevelCacheSize();
216 internal::TensorBlockResourceRequirements::skewed<Scalar>(target_size), m_impl.getResourceRequirements());
220 bool =
false)
const {
222 if (desc.
size() == 0) {
226 static const bool IsColMajor =
Layout ==
static_cast<int>(
ColMajor);
227 const int inner_dim_idx = IsColMajor ? 0 : NumDims - 1;
233 for (
int i = NumDims - 1;
i > 0; --
i) {
234 const int dim = IsColMajor ?
i : NumDims -
i - 1;
235 const int stride_dim = IsColMajor ? dim : dim + 1;
236 output_offsets[dim] = offset / m_outputStrides[stride_dim];
237 offset -= output_offsets[dim] * m_outputStrides[stride_dim];
239 output_offsets[inner_dim_idx] = offset;
243 for (
int i = 0;
i < NumDims; ++
i) {
244 const int dim = IsColMajor ?
i : NumDims -
i - 1;
245 input_offsets[dim] = input_offsets[dim] - m_padding[dim].first;
251 Index input_offset = 0;
252 for (
int i = 0;
i < NumDims; ++
i) {
253 const int dim = IsColMajor ?
i : NumDims -
i - 1;
254 input_offset += input_offsets[dim] * m_inputStrides[dim];
260 Index output_offset = 0;
271 array<BlockIteratorState, NumDims - 1> it;
272 for (
int i = 0;
i < NumDims - 1; ++
i) {
273 const int dim = IsColMajor ?
i + 1 : NumDims -
i - 2;
277 it[
i].input_stride = m_inputStrides[dim];
278 it[
i].input_span = it[
i].input_stride * (it[
i].size - 1);
280 it[
i].output_stride = output_strides[dim];
281 it[
i].output_span = it[
i].output_stride * (it[
i].size - 1);
284 const Index input_inner_dim_size =
static_cast<Index>(m_impl.dimensions()[inner_dim_idx]);
292 const Index output_inner_dim_size = desc.
dimension(inner_dim_idx);
296 const Index output_inner_pad_before_size =
297 input_offsets[inner_dim_idx] < 0
304 (output_inner_dim_size - output_inner_pad_before_size),
306 numext::maxi(input_inner_dim_size - (input_offsets[inner_dim_idx] + output_inner_pad_before_size),
Index(0)));
312 const Index output_inner_pad_after_size =
313 (output_inner_dim_size - output_inner_copy_size - output_inner_pad_before_size);
317 (output_inner_pad_before_size + output_inner_copy_size + output_inner_pad_after_size));
322 for (
int i = 0;
i < NumDims; ++
i) {
323 const int dim = IsColMajor ?
i : NumDims -
i - 1;
324 output_padded[dim] = isPaddingAtIndexForDim(output_coord[dim], dim);
338 const bool squeeze_writes = NumDims > 1 &&
340 (input_inner_dim_size == m_dimensions[inner_dim_idx]) &&
342 (input_inner_dim_size == output_inner_dim_size);
344 const int squeeze_dim = IsColMajor ? inner_dim_idx + 1 : inner_dim_idx - 1;
347 const Index squeeze_max_coord =
350 static_cast<Index>(m_dimensions[squeeze_dim] - m_padding[squeeze_dim].second),
352 static_cast<Index>(output_offsets[squeeze_dim] + desc.
dimension(squeeze_dim)))
353 :
static_cast<Index>(0);
358 bool is_padded =
false;
359 for (
int j = 1;
j < NumDims; ++
j) {
360 const int dim = IsColMajor ?
j : NumDims -
j - 1;
361 is_padded = output_padded[dim];
362 if (is_padded)
break;
367 size += output_inner_dim_size;
369 LinCopy::template Run<LinCopy::Kind::FillLinear>(
typename LinCopy::Dst(output_offset, 1, block_storage.
data()),
370 typename LinCopy::Src(0, 0, &m_paddingValue),
371 output_inner_dim_size);
373 }
else if (squeeze_writes) {
375 const Index squeeze_num = squeeze_max_coord - output_coord[squeeze_dim];
376 size += output_inner_dim_size * squeeze_num;
379 LinCopy::template Run<LinCopy::Kind::Linear>(
typename LinCopy::Dst(output_offset, 1, block_storage.
data()),
380 typename LinCopy::Src(input_offset, 1, m_impl.data()),
381 output_inner_dim_size * squeeze_num);
387 it[0].count += (squeeze_num - 1);
388 input_offset += it[0].input_stride * (squeeze_num - 1);
389 output_offset += it[0].output_stride * (squeeze_num - 1);
390 output_coord[squeeze_dim] += (squeeze_num - 1);
394 size += output_inner_dim_size;
399 LinCopy::template Run<LinCopy::Kind::FillLinear>(
typename LinCopy::Dst(
out, 1, block_storage.
data()),
400 typename LinCopy::Src(0, 0, &m_paddingValue),
401 output_inner_pad_before_size);
405 const Index out = output_offset + output_inner_pad_before_size;
406 const Index in = input_offset + output_inner_pad_before_size;
408 eigen_assert(output_inner_copy_size == 0 || m_impl.data() != NULL);
410 LinCopy::template Run<LinCopy::Kind::Linear>(
typename LinCopy::Dst(
out, 1, block_storage.
data()),
411 typename LinCopy::Src(in, 1, m_impl.data()),
412 output_inner_copy_size);
416 const Index out = output_offset + output_inner_pad_before_size + output_inner_copy_size;
418 LinCopy::template Run<LinCopy::Kind::FillLinear>(
typename LinCopy::Dst(
out, 1, block_storage.
data()),
419 typename LinCopy::Src(0, 0, &m_paddingValue),
420 output_inner_pad_after_size);
424 for (
int j = 0;
j < NumDims - 1; ++
j) {
425 const int dim = IsColMajor ?
j + 1 : NumDims -
j - 2;
427 if (++it[
j].count < it[
j].
size) {
428 input_offset += it[
j].input_stride;
429 output_offset += it[
j].output_stride;
430 output_coord[dim] += 1;
431 output_padded[dim] = isPaddingAtIndexForDim(output_coord[dim], dim);
435 input_offset -= it[
j].input_span;
436 output_offset -= it[
j].output_span;
437 output_coord[dim] -= it[
j].size - 1;
438 output_padded[dim] = isPaddingAtIndexForDim(output_coord[dim], dim);
448 struct BlockIteratorState {
460 return (!internal::index_pair_first_statically_eq<PaddingDimensions>(dim_index, 0) &&
461 index < m_padding[dim_index].first) ||
462 (!internal::index_pair_second_statically_eq<PaddingDimensions>(dim_index, 0) &&
463 index >= m_dimensions[dim_index] - m_padding[dim_index].second);
467 return internal::index_pair_first_statically_eq<PaddingDimensions>(dim_index, 0);
471 return internal::index_pair_second_statically_eq<PaddingDimensions>(dim_index, 0);
475 const double in =
static_cast<double>(m_impl.dimensions()[
i]);
476 const double out = in + m_padding[
i].first + m_padding[
i].second;
477 if (
out == 0)
return;
478 const double reduction = in /
out;
481 cost +=
TensorOpCost(0, 0, 2 * TensorOpCost::AddCost<Index>() + reduction * (1 * TensorOpCost::AddCost<Index>()));
484 2 * TensorOpCost::AddCost<Index>() + 2 * TensorOpCost::MulCost<Index>() +
485 reduction * (2 * TensorOpCost::MulCost<Index>() + 1 * TensorOpCost::DivCost<Index>()));
493 const Index initialIndex = index;
494 Index inputIndex = 0;
496 for (
int i = NumDims - 1;
i > 0; --
i) {
497 const Index firstIdx = index;
499 const Index lastPaddedLeft = m_padding[
i].first * m_outputStrides[
i];
500 const Index firstPaddedRight = (m_dimensions[
i] - m_padding[
i].second) * m_outputStrides[
i];
501 const Index lastPaddedRight = m_outputStrides[
i + 1];
503 if (!isLeftPaddingCompileTimeZero(
i) && lastIdx < lastPaddedLeft) {
505 return internal::pset1<PacketReturnType>(m_paddingValue);
506 }
else if (!isRightPaddingCompileTimeZero(
i) && firstIdx >= firstPaddedRight && lastIdx < lastPaddedRight) {
508 return internal::pset1<PacketReturnType>(m_paddingValue);
509 }
else if ((isLeftPaddingCompileTimeZero(
i) && isRightPaddingCompileTimeZero(
i)) ||
510 (firstIdx >= lastPaddedLeft && lastIdx < firstPaddedRight)) {
512 const Index idx = index / m_outputStrides[
i];
513 inputIndex += (idx - m_padding[
i].first) * m_inputStrides[
i];
514 index -= idx * m_outputStrides[
i];
517 return packetWithPossibleZero(initialIndex);
522 const Index firstIdx = index;
523 const Index lastPaddedLeft = m_padding[0].first;
524 const Index firstPaddedRight = (m_dimensions[0] - m_padding[0].second);
525 const Index lastPaddedRight = m_outputStrides[1];
527 if (!isLeftPaddingCompileTimeZero(0) && lastIdx < lastPaddedLeft) {
529 return internal::pset1<PacketReturnType>(m_paddingValue);
530 }
else if (!isRightPaddingCompileTimeZero(0) && firstIdx >= firstPaddedRight && lastIdx < lastPaddedRight) {
532 return internal::pset1<PacketReturnType>(m_paddingValue);
533 }
else if ((isLeftPaddingCompileTimeZero(0) && isRightPaddingCompileTimeZero(0)) ||
534 (firstIdx >= lastPaddedLeft && lastIdx < firstPaddedRight)) {
536 inputIndex += (index - m_padding[0].first);
537 return m_impl.template packet<Unaligned>(inputIndex);
540 return packetWithPossibleZero(initialIndex);
546 const Index initialIndex = index;
547 Index inputIndex = 0;
549 for (
int i = 0;
i < NumDims - 1; ++
i) {
550 const Index firstIdx = index;
552 const Index lastPaddedLeft = m_padding[
i].first * m_outputStrides[
i + 1];
553 const Index firstPaddedRight = (m_dimensions[
i] - m_padding[
i].second) * m_outputStrides[
i + 1];
554 const Index lastPaddedRight = m_outputStrides[
i];
556 if (!isLeftPaddingCompileTimeZero(
i) && lastIdx < lastPaddedLeft) {
558 return internal::pset1<PacketReturnType>(m_paddingValue);
559 }
else if (!isRightPaddingCompileTimeZero(
i) && firstIdx >= firstPaddedRight && lastIdx < lastPaddedRight) {
561 return internal::pset1<PacketReturnType>(m_paddingValue);
562 }
else if ((isLeftPaddingCompileTimeZero(
i) && isRightPaddingCompileTimeZero(
i)) ||
563 (firstIdx >= lastPaddedLeft && lastIdx < firstPaddedRight)) {
565 const Index idx = index / m_outputStrides[
i + 1];
566 inputIndex += (idx - m_padding[
i].first) * m_inputStrides[
i];
567 index -= idx * m_outputStrides[
i + 1];
570 return packetWithPossibleZero(initialIndex);
575 const Index firstIdx = index;
576 const Index lastPaddedLeft = m_padding[NumDims - 1].first;
577 const Index firstPaddedRight = (m_dimensions[NumDims - 1] - m_padding[NumDims - 1].second);
578 const Index lastPaddedRight = m_outputStrides[NumDims - 1];
580 if (!isLeftPaddingCompileTimeZero(NumDims - 1) && lastIdx < lastPaddedLeft) {
582 return internal::pset1<PacketReturnType>(m_paddingValue);
583 }
else if (!isRightPaddingCompileTimeZero(NumDims - 1) && firstIdx >= firstPaddedRight &&
584 lastIdx < lastPaddedRight) {
586 return internal::pset1<PacketReturnType>(m_paddingValue);
587 }
else if ((isLeftPaddingCompileTimeZero(NumDims - 1) && isRightPaddingCompileTimeZero(NumDims - 1)) ||
588 (firstIdx >= lastPaddedLeft && lastIdx < firstPaddedRight)) {
590 inputIndex += (index - m_padding[NumDims - 1].first);
591 return m_impl.template packet<Unaligned>(inputIndex);
594 return packetWithPossibleZero(initialIndex);
int i
Definition: BiCGSTAB_step_by_step.cpp:9
#define EIGEN_ALWAYS_INLINE
Definition: Macros.h:845
#define EIGEN_UNROLL_LOOP
Definition: Macros.h:1298
#define EIGEN_DEVICE_FUNC
Definition: Macros.h:892
#define eigen_assert(x)
Definition: Macros.h:910
#define EIGEN_STRONG_INLINE
Definition: Macros.h:834
#define EIGEN_STATIC_ASSERT(X, MSG)
Definition: StaticAssert.h:26
#define EIGEN_DEVICE_REF
Definition: TensorMacros.h:34
Scalar Scalar int size
Definition: benchVecAdd.cpp:17
SCALAR Scalar
Definition: bench_gemm.cpp:45
Generic expression where a coefficient-wise binary operator is applied to two expressions.
Definition: CwiseBinaryOp.h:79
The tensor base class.
Definition: TensorBase.h:1026
Definition: TensorCostModel.h:28
Definition: TensorPadding.h:53
XprType::CoeffReturnType CoeffReturnType
Definition: TensorPadding.h:57
Eigen::internal::traits< TensorPaddingOp >::Index Index
Definition: TensorPadding.h:60
EIGEN_DEVICE_FUNC const internal::remove_all_t< typename XprType::Nested > & expression() const
Definition: TensorPadding.h:69
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorPaddingOp(const XprType &expr, const PaddingDimensions &padding_dims, const Scalar padding_value)
Definition: TensorPadding.h:62
const PaddingDimensions m_padding_dims
Definition: TensorPadding.h:73
Eigen::internal::traits< TensorPaddingOp >::StorageKind StorageKind
Definition: TensorPadding.h:59
XprType::Nested m_xpr
Definition: TensorPadding.h:72
Eigen::NumTraits< Scalar >::Real RealScalar
Definition: TensorPadding.h:56
EIGEN_DEVICE_FUNC const PaddingDimensions & padding() const
Definition: TensorPadding.h:66
const Scalar m_padding_value
Definition: TensorPadding.h:74
Eigen::internal::traits< TensorPaddingOp >::Scalar Scalar
Definition: TensorPadding.h:55
Eigen::internal::nested< TensorPaddingOp >::type Nested
Definition: TensorPadding.h:58
EIGEN_DEVICE_FUNC Scalar padding_value() const
Definition: TensorPadding.h:67
Definition: TensorBlock.h:902
IndexType size() const
Definition: TensorBlock.h:273
IndexType offset() const
Definition: TensorBlock.h:270
IndexType dimension(int index) const
Definition: TensorBlock.h:272
const Dimensions & dimensions() const
Definition: TensorBlock.h:271
Definition: TensorBlock.h:475
Definition: TensorBlock.h:638
TensorMaterializedBlock AsTensorMaterializedBlock() const
Definition: TensorBlock.h:644
Scalar * data() const
Definition: TensorBlock.h:640
Definition: TensorBlock.h:604
static EIGEN_STRONG_INLINE Storage prepareStorage(TensorBlockDesc &desc, TensorBlockScratch &scratch, bool allow_strided_storage=false)
Definition: TensorBlock.h:671
@ ColMajor
Definition: Constants.h:318
char char * op
Definition: level2_impl.h:374
@ kView
Definition: TensorBlock.h:545
typename remove_all< T >::type remove_all_t
Definition: Meta.h:142
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE T maxi(const T &x, const T &y)
Definition: MathFunctions.h:926
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE std::enable_if_t< NumTraits< T >::IsSigned||NumTraits< T >::IsComplex, typename NumTraits< T >::Real > abs(const T &x)
Definition: MathFunctions.h:1355
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE T mini(const T &x, const T &y)
Definition: MathFunctions.h:920
Namespace containing all symbols from the Eigen library.
Definition: bench_norm.cpp:70
std::array< T, N > array
Definition: EmulateArray.h:231
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
The Index type as used for the API.
Definition: Meta.h:83
Definition: Eigen_Colamd.h:49
Definition: Constants.h:519
T Real
Definition: NumTraits.h:183
Definition: TensorMeta.h:47
Definition: TensorForwardDeclarations.h:42
Index size
Definition: TensorPadding.h:452
BlockIteratorState()
Definition: TensorPadding.h:449
Index output_stride
Definition: TensorPadding.h:455
Index input_stride
Definition: TensorPadding.h:453
Index output_span
Definition: TensorPadding.h:456
Index count
Definition: TensorPadding.h:451
Index input_span
Definition: TensorPadding.h:454
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packetRowMajor(Index index) const
Definition: TensorPadding.h:543
TensorEvaluator< ArgType, Device > m_impl
Definition: TensorPadding.h:610
TensorPaddingOp< PaddingDimensions, ArgType > XprType
Definition: TensorPadding.h:80
EIGEN_STRONG_INLINE void cleanup()
Definition: TensorPadding.h:156
const Device EIGEN_DEVICE_REF m_device
Definition: TensorPadding.h:615
Scalar m_paddingValue
Definition: TensorPadding.h:613
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorBlock block(TensorBlockDesc &desc, TensorBlockScratch &scratch, bool=false) const
Definition: TensorPadding.h:219
array< Index, NumDims > m_inputStrides
Definition: TensorPadding.h:609
Storage::Type EvaluatorPointerType
Definition: TensorPadding.h:89
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool isRightPaddingCompileTimeZero(int dim_index) const
Definition: TensorPadding.h:470
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EvaluatorPointerType data() const
Definition: TensorPadding.h:445
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE internal::TensorBlockResourceRequirements getResourceRequirements() const
Definition: TensorPadding.h:213
PacketType< CoeffReturnType, Device >::type PacketReturnType
Definition: TensorPadding.h:86
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
Definition: TensorPadding.h:158
array< Index, NumDims+1 > m_outputStrides
Definition: TensorPadding.h:608
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool isPaddingAtIndexForDim(Index index, int dim_index) const
Definition: TensorPadding.h:459
EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType)
Definition: TensorPadding.h:144
void updateCostPerDimension(TensorOpCost &cost, int i, bool first) const
Definition: TensorPadding.h:474
Dimensions m_dimensions
Definition: TensorPadding.h:607
internal::TensorBlockScratchAllocator< Device > TensorBlockScratch
Definition: TensorPadding.h:105
PaddingDimensions m_padding
Definition: TensorPadding.h:611
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const
Definition: TensorPadding.h:201
EIGEN_STRONG_INLINE TensorEvaluator(const XprType &op, const Device &device)
Definition: TensorPadding.h:110
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packetColMajor(Index index) const
Definition: TensorPadding.h:490
DSizes< Index, NumDims > Dimensions
Definition: TensorPadding.h:83
StorageMemory< CoeffReturnType, Device > Storage
Definition: TensorPadding.h:88
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions & dimensions() const
Definition: TensorPadding.h:142
XprType::Index Index
Definition: TensorPadding.h:81
internal::TensorBlockDescriptor< NumDims, Index > TensorBlockDesc
Definition: TensorPadding.h:104
XprType::CoeffReturnType CoeffReturnType
Definition: TensorPadding.h:85
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool isLeftPaddingCompileTimeZero(int dim_index) const
Definition: TensorPadding.h:466
XprType::Scalar Scalar
Definition: TensorPadding.h:84
std::remove_const_t< Scalar > ScalarNoConst
Definition: TensorPadding.h:101
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packetWithPossibleZero(Index index) const
Definition: TensorPadding.h:597
internal::TensorMaterializedBlock< ScalarNoConst, NumDims, Layout, Index > TensorBlock
Definition: TensorPadding.h:107
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const
Definition: TensorPadding.h:194
A cost model used to limit the number of threads used for evaluating tensor expression.
Definition: TensorEvaluator.h:31
static constexpr int Layout
Definition: TensorEvaluator.h:46
const Device EIGEN_DEVICE_REF m_device
Definition: TensorEvaluator.h:170
Storage::Type EvaluatorPointerType
Definition: TensorEvaluator.h:41
@ PacketAccess
Definition: TensorEvaluator.h:50
@ IsAligned
Definition: TensorEvaluator.h:49
static constexpr int PacketSize
Definition: TensorEvaluator.h:38
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
Definition: TensorEvaluator.h:89
Derived::Index Index
Definition: TensorEvaluator.h:32
internal::TensorMaterializedBlock< ScalarNoConst, NumCoords, Layout, Index > TensorBlock
Definition: TensorEvaluator.h:63
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions & dimensions() const
Definition: TensorEvaluator.h:69
Definition: TensorBlock.h:75
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorBlockResourceRequirements merge(const TensorBlockResourceRequirements &lhs, const TensorBlockResourceRequirements &rhs)
Definition: TensorBlock.h:129
const TensorPaddingOp< PaddingDimensions, XprType > & type
Definition: TensorPadding.h:41
Definition: XprHelper.h:427
TensorPaddingOp< PaddingDimensions, XprType > type
Definition: TensorPadding.h:47
Definition: TensorTraits.h:152
ref_selector< T >::type type
Definition: TensorTraits.h:153
XprTraits::StorageKind StorageKind
Definition: TensorPadding.h:30
XprType::Nested Nested
Definition: TensorPadding.h:32
traits< XprType > XprTraits
Definition: TensorPadding.h:29
XprTraits::Index Index
Definition: TensorPadding.h:31
XprTraits::PointerType PointerType
Definition: TensorPadding.h:36
XprType::Scalar Scalar
Definition: TensorPadding.h:28
std::remove_reference_t< Nested > Nested_
Definition: TensorPadding.h:33
Definition: ForwardDeclarations.h:21
std::ofstream out("Result.txt")
std::ptrdiff_t j
Definition: tut_arithmetic_redux_minmax.cpp:2