Eigen::internal::TensorMaterializedBlock< Scalar, NumDims, Layout, IndexType > Class Template Reference

#include <TensorBlock.h>

Classes

class  Storage
 

Public Types

typedef DSizes< IndexType, NumDims > Dimensions
 
typedef TensorMap< const Tensor< Scalar, NumDims, Layout > > XprType
 
typedef internal::TensorBlockDescriptor< NumDims, IndexType > TensorBlockDesc
 

Public Member Functions

 TensorMaterializedBlock (TensorBlockKind kind, const Scalar *data, const Dimensions &dimensions, bool valid_expr=true)
 
TensorBlockKind kind () const
 
const XprTypeexpr () const
 
const Scalardata () const
 
void cleanup ()
 

Static Public Member Functions

template<typename TensorBlockScratch >
static EIGEN_STRONG_INLINE Storage prepareStorage (TensorBlockDesc &desc, TensorBlockScratch &scratch, bool allow_strided_storage=false)
 
template<typename DataDimensions , typename TensorBlockScratch >
static EIGEN_STRONG_INLINE TensorMaterializedBlock materialize (const Scalar *data, const DataDimensions &data_dims, TensorBlockDesc &desc, TensorBlockScratch &scratch)
 

Private Attributes

TensorBlockKind m_kind
 
const Scalarm_data
 
Dimensions m_dimensions
 
XprType m_expr
 
bool m_valid_expr
 

Member Typedef Documentation

◆ Dimensions

template<typename Scalar , int NumDims, int Layout, typename IndexType = Eigen::Index>
typedef DSizes<IndexType, NumDims> Eigen::internal::TensorMaterializedBlock< Scalar, NumDims, Layout, IndexType >::Dimensions

◆ TensorBlockDesc

template<typename Scalar , int NumDims, int Layout, typename IndexType = Eigen::Index>
typedef internal::TensorBlockDescriptor<NumDims, IndexType> Eigen::internal::TensorMaterializedBlock< Scalar, NumDims, Layout, IndexType >::TensorBlockDesc

◆ XprType

template<typename Scalar , int NumDims, int Layout, typename IndexType = Eigen::Index>
typedef TensorMap<const Tensor<Scalar, NumDims, Layout> > Eigen::internal::TensorMaterializedBlock< Scalar, NumDims, Layout, IndexType >::XprType

Constructor & Destructor Documentation

◆ TensorMaterializedBlock()

template<typename Scalar , int NumDims, int Layout, typename IndexType = Eigen::Index>
Eigen::internal::TensorMaterializedBlock< Scalar, NumDims, Layout, IndexType >::TensorMaterializedBlock ( TensorBlockKind  kind,
const Scalar data,
const Dimensions dimensions,
bool  valid_expr = true 
)
inline
611  : m_kind(kind), m_data(data), m_dimensions(dimensions), m_expr(m_data, m_dimensions), m_valid_expr(valid_expr) {
615  }
#define eigen_assert(x)
Definition: Macros.h:910
const Scalar * data() const
Definition: TensorBlock.h:625
TensorBlockKind kind() const
Definition: TensorBlock.h:617
bool m_valid_expr
Definition: TensorBlock.h:759
XprType m_expr
Definition: TensorBlock.h:758
TensorBlockKind m_kind
Definition: TensorBlock.h:755
const Scalar * m_data
Definition: TensorBlock.h:756
Dimensions m_dimensions
Definition: TensorBlock.h:757
@ kMaterializedInOutput
Definition: TensorBlock.h:559
@ kMaterializedInScratch
Definition: TensorBlock.h:550
@ kView
Definition: TensorBlock.h:545

References eigen_assert, Eigen::internal::kMaterializedInOutput, Eigen::internal::kMaterializedInScratch, Eigen::internal::kView, and Eigen::internal::TensorMaterializedBlock< Scalar, NumDims, Layout, IndexType >::m_kind.

Referenced by Eigen::internal::TensorMaterializedBlock< Scalar, NumDims, Layout, IndexType >::Storage::AsTensorMaterializedBlock(), and Eigen::internal::TensorMaterializedBlock< Scalar, NumDims, Layout, IndexType >::materialize().

Member Function Documentation

◆ cleanup()

template<typename Scalar , int NumDims, int Layout, typename IndexType = Eigen::Index>
void Eigen::internal::TensorMaterializedBlock< Scalar, NumDims, Layout, IndexType >::cleanup ( )
inline

◆ data()

◆ expr()

◆ kind()

◆ materialize()

template<typename Scalar , int NumDims, int Layout, typename IndexType = Eigen::Index>
template<typename DataDimensions , typename TensorBlockScratch >
static EIGEN_STRONG_INLINE TensorMaterializedBlock Eigen::internal::TensorMaterializedBlock< Scalar, NumDims, Layout, IndexType >::materialize ( const Scalar data,
const DataDimensions &  data_dims,
TensorBlockDesc desc,
TensorBlockScratch &  scratch 
)
inlinestatic
700  {
701  eigen_assert(array_size<DataDimensions>::value == desc.dimensions().size());
702 
703  // If a tensor block dimensions covers a contiguous block of the underlying
704  // memory, we can skip block buffer memory allocation, and construct a block
705  // from existing `data` memory buffer.
706  //
707  // Example: (RowMajor layout)
708  // data_dims: [11, 12, 13, 14]
709  // desc.dimensions(): [1, 1, 3, 14]
710  //
711  // In this case we can construct a TensorBlock starting at
712  // `data + desc.offset()`, with a `desc.dimensions()` block sizes.
713  static const bool is_col_major = Layout == ColMajor;
714 
715  // Find out how many inner dimensions have a matching size.
716  int num_matching_inner_dims = 0;
717  for (int i = 0; i < NumDims; ++i) {
718  int dim = is_col_major ? i : NumDims - i - 1;
719  if (data_dims[dim] != desc.dimensions()[dim]) break;
720  ++num_matching_inner_dims;
721  }
722 
723  // All the outer dimensions must be of size `1`, except a single dimension
724  // before the matching inner dimension (`3` in the example above).
725  bool can_use_direct_access = true;
726  for (int i = num_matching_inner_dims + 1; i < NumDims; ++i) {
727  int dim = is_col_major ? i : NumDims - i - 1;
728  if (desc.dimension(dim) != 1) {
729  can_use_direct_access = false;
730  break;
731  }
732  }
733 
734  if (can_use_direct_access) {
735  const Scalar* block_start = data + desc.offset();
736  return TensorMaterializedBlock(internal::TensorBlockKind::kView, block_start, desc.dimensions());
737 
738  } else {
739  // Reuse destination buffer or allocate new buffer with scratch allocator.
740  const Storage storage = prepareStorage(desc, scratch);
741 
742  typedef internal::TensorBlockIO<Scalar, IndexType, NumDims, Layout> TensorBlockIO;
743  typedef typename TensorBlockIO::Dst TensorBlockIODst;
744  typedef typename TensorBlockIO::Src TensorBlockIOSrc;
745 
746  TensorBlockIOSrc src(internal::strides<Layout>(Dimensions(data_dims)), data, desc.offset());
747  TensorBlockIODst dst(storage.dimensions(), storage.strides(), storage.data());
748 
749  TensorBlockIO::Copy(dst, src);
750  return storage.AsTensorMaterializedBlock();
751  }
752  }
int i
Definition: BiCGSTAB_step_by_step.cpp:9
SCALAR Scalar
Definition: bench_gemm.cpp:45
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE IndexType Copy(const Dst &dst, const Src &src, const DimensionsMap &dst_to_src_dim_map)
Definition: TensorBlock.h:1126
TensorMaterializedBlock(TensorBlockKind kind, const Scalar *data, const Dimensions &dimensions, bool valid_expr=true)
Definition: TensorBlock.h:609
static EIGEN_STRONG_INLINE Storage prepareStorage(TensorBlockDesc &desc, TensorBlockScratch &scratch, bool allow_strided_storage=false)
Definition: TensorBlock.h:671
DSizes< IndexType, NumDims > Dimensions
Definition: TensorBlock.h:606
@ ColMajor
Definition: Constants.h:318
static constexpr Index value
Definition: Meta.h:306

References Eigen::internal::TensorMaterializedBlock< Scalar, NumDims, Layout, IndexType >::Storage::AsTensorMaterializedBlock(), Eigen::ColMajor, Eigen::internal::TensorBlockIO< Scalar, IndexType, NumDims, Layout >::Copy(), Eigen::internal::TensorMaterializedBlock< Scalar, NumDims, Layout, IndexType >::data(), Eigen::internal::TensorMaterializedBlock< Scalar, NumDims, Layout, IndexType >::Storage::data(), Eigen::internal::TensorBlockDescriptor< NumDims, IndexType >::dimension(), Eigen::internal::TensorBlockDescriptor< NumDims, IndexType >::dimensions(), Eigen::internal::TensorMaterializedBlock< Scalar, NumDims, Layout, IndexType >::Storage::dimensions(), eigen_assert, i, Eigen::internal::kView, Eigen::internal::TensorBlockDescriptor< NumDims, IndexType >::offset(), Eigen::internal::TensorMaterializedBlock< Scalar, NumDims, Layout, IndexType >::prepareStorage(), Eigen::internal::TensorMaterializedBlock< Scalar, NumDims, Layout, IndexType >::Storage::strides(), and Eigen::internal::TensorMaterializedBlock< Scalar, NumDims, Layout, IndexType >::TensorMaterializedBlock().

Referenced by Eigen::TensorEvaluator< Derived, Device >::block(), Eigen::TensorEvaluator< const Derived, Device >::block(), Eigen::TensorEvaluator< const TensorForcedEvalOp< ArgType_ >, Device >::block(), and Eigen::TensorEvaluator< const TensorReshapingOp< NewDimensions, ArgType >, Device >::block().

◆ prepareStorage()

template<typename Scalar , int NumDims, int Layout, typename IndexType = Eigen::Index>
template<typename TensorBlockScratch >
static EIGEN_STRONG_INLINE Storage Eigen::internal::TensorMaterializedBlock< Scalar, NumDims, Layout, IndexType >::prepareStorage ( TensorBlockDesc desc,
TensorBlockScratch &  scratch,
bool  allow_strided_storage = false 
)
inlinestatic
672  {
673  // Try to reuse destination as an output block buffer.
674  typedef typename TensorBlockDesc::DestinationBuffer DestinationBuffer;
675 
676  if (desc.destination().kind() == DestinationBuffer::kContiguous) {
677  Scalar* buffer = desc.destination().template data<Scalar>();
678  desc.DropDestinationBuffer();
679  return Storage(buffer, desc.dimensions(), internal::strides<Layout>(desc.dimensions()),
680  /*materialized_in_output=*/true,
681  /*strided_storage=*/false);
682 
683  } else if (desc.destination().kind() == DestinationBuffer::kStrided && allow_strided_storage) {
684  Scalar* buffer = desc.destination().template data<Scalar>();
685  desc.DropDestinationBuffer();
686  return Storage(buffer, desc.dimensions(), desc.destination().strides(),
687  /*materialized_in_output=*/true, /*strided_storage=*/true);
688 
689  } else {
690  void* mem = scratch.allocate(desc.size() * sizeof(Scalar));
691  return Storage(static_cast<Scalar*>(mem), desc.dimensions(), internal::strides<Layout>(desc.dimensions()),
692  /*materialized_in_output=*/false,
693  /*strided_storage=*/false);
694  }
695  }

References Eigen::internal::TensorBlockDescriptor< NumDims, IndexType >::destination(), Eigen::internal::TensorBlockDescriptor< NumDims, IndexType >::dimensions(), Eigen::internal::TensorBlockDescriptor< NumDims, IndexType >::DropDestinationBuffer(), Eigen::internal::TensorBlockDescriptor< NumDims, IndexType >::DestinationBuffer::kind(), Eigen::internal::TensorBlockDescriptor< NumDims, IndexType >::size(), and Eigen::internal::TensorBlockDescriptor< NumDims, IndexType >::DestinationBuffer::strides().

Referenced by Eigen::TensorEvaluator< const TensorChippingOp< DimId, ArgType >, Device >::block(), Eigen::TensorEvaluator< const TensorShufflingOp< Shuffle, ArgType >, Device >::block(), Eigen::TensorEvaluator< const TensorBroadcastingOp< Broadcast, ArgType >, Device >::block(), Eigen::TensorEvaluator< const TensorGeneratorOp< Generator, ArgType >, Device >::block(), Eigen::TensorEvaluator< const TensorPaddingOp< PaddingDimensions, ArgType >, Device >::block(), Eigen::TensorEvaluator< const TensorReverseOp< ReverseDimensions, ArgType >, Device >::block(), Eigen::TensorEvaluator< const TensorRollOp< RollDimensions, ArgType >, Device >::block(), and Eigen::internal::TensorMaterializedBlock< Scalar, NumDims, Layout, IndexType >::materialize().

Member Data Documentation

◆ m_data

template<typename Scalar , int NumDims, int Layout, typename IndexType = Eigen::Index>
const Scalar* Eigen::internal::TensorMaterializedBlock< Scalar, NumDims, Layout, IndexType >::m_data
private

◆ m_dimensions

template<typename Scalar , int NumDims, int Layout, typename IndexType = Eigen::Index>
Dimensions Eigen::internal::TensorMaterializedBlock< Scalar, NumDims, Layout, IndexType >::m_dimensions
private

◆ m_expr

template<typename Scalar , int NumDims, int Layout, typename IndexType = Eigen::Index>
XprType Eigen::internal::TensorMaterializedBlock< Scalar, NumDims, Layout, IndexType >::m_expr
private

◆ m_kind

◆ m_valid_expr

template<typename Scalar , int NumDims, int Layout, typename IndexType = Eigen::Index>
bool Eigen::internal::TensorMaterializedBlock< Scalar, NumDims, Layout, IndexType >::m_valid_expr
private

The documentation for this class was generated from the following file: